Compare commits
124 Commits
vr0.4.245
...
unclecode-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e12d2e29e5 | ||
|
|
fc425023f5 | ||
|
|
cbef406f9b | ||
|
|
8a76563018 | ||
|
|
415c1c5bee | ||
|
|
f334daa979 | ||
|
|
d024749633 | ||
|
|
c612f9a852 | ||
|
|
95175cb394 | ||
|
|
cba4a466e5 | ||
|
|
a9e24307cc | ||
|
|
3a87b4e43b | ||
|
|
4bcd4cbda1 | ||
|
|
71ce01c9e1 | ||
|
|
c6d48080a4 | ||
|
|
46d2f12851 | ||
|
|
367cd71db9 | ||
|
|
2af958e12c | ||
|
|
3cb28875c3 | ||
|
|
dad592c801 | ||
|
|
c171891999 | ||
|
|
3b1025abbb | ||
|
|
f00dcc276f | ||
|
|
392c923980 | ||
|
|
2864015469 | ||
|
|
8bb799068e | ||
|
|
063df572b0 | ||
|
|
966fb47e64 | ||
|
|
43e09da694 | ||
|
|
69705df0b3 | ||
|
|
91a5fea11f | ||
|
|
467be9ac76 | ||
|
|
19df96ed56 | ||
|
|
b957ff2ecd | ||
|
|
91073c1244 | ||
|
|
926beee832 | ||
|
|
a9415aaaf6 | ||
|
|
c308a794e8 | ||
|
|
bc7559586f | ||
|
|
04bc643cec | ||
|
|
33a21d6a7a | ||
|
|
7b1ef07c41 | ||
|
|
2f15976b34 | ||
|
|
20920fa17b | ||
|
|
53ac3ec0b4 | ||
|
|
ce4f04dad2 | ||
|
|
f81712eb91 | ||
|
|
31938fb922 | ||
|
|
f8fd9d9eff | ||
|
|
dde14eba7d | ||
|
|
54c84079c4 | ||
|
|
d0586f09a9 | ||
|
|
09ac7ed008 | ||
|
|
97796f39d2 | ||
|
|
4d7f91b378 | ||
|
|
69a77222ef | ||
|
|
0afc3e9e5e | ||
|
|
65d33bcc0f | ||
|
|
6a01008a2b | ||
|
|
6dc01eae3a | ||
|
|
7b7fe84e0d | ||
|
|
5c36f4308f | ||
|
|
45809d1c91 | ||
|
|
357414c345 | ||
|
|
260b9120c3 | ||
|
|
976ea52167 | ||
|
|
2d69bf2366 | ||
|
|
dee5fe9851 | ||
|
|
88697c4630 | ||
|
|
16b8d4945b | ||
|
|
d09c611d15 | ||
|
|
9247877037 | ||
|
|
2cec527a22 | ||
|
|
4b1309cbf2 | ||
|
|
8b6fe6a98f | ||
|
|
91463e34f1 | ||
|
|
1221be30a3 | ||
|
|
6dfa9cb703 | ||
|
|
e363234172 | ||
|
|
3d09b6a221 | ||
|
|
2d6b19e1a2 | ||
|
|
ece9202b61 | ||
|
|
9d694da939 | ||
|
|
20c027b79c | ||
|
|
8878b3d032 | ||
|
|
1ab9d115cf | ||
|
|
8ec12d7d68 | ||
|
|
c3370ec5da | ||
|
|
f3ae5a657c | ||
|
|
825c78a048 | ||
|
|
3865342c93 | ||
|
|
ac5f461d40 | ||
|
|
f9c601eb7e | ||
|
|
e8b4ac6046 | ||
|
|
051a6cf974 | ||
|
|
1c9464b988 | ||
|
|
6838901788 | ||
|
|
ad5e5d21ca | ||
|
|
26d821c0de | ||
|
|
010677cbee | ||
|
|
c110d459fb | ||
|
|
4d1975e0a7 | ||
|
|
82734a750c | ||
|
|
56fa4e1e42 | ||
|
|
ca3e33122e | ||
|
|
fe52311bf4 | ||
|
|
01b73950ee | ||
|
|
12880f1ffa | ||
|
|
53be88b677 | ||
|
|
3427ead8b8 | ||
|
|
32652189b0 | ||
|
|
ae376f15fb | ||
|
|
72fbdac467 | ||
|
|
0857c7b448 | ||
|
|
07b4c1c0ed | ||
|
|
196dc79ec7 | ||
|
|
24b3da717a | ||
|
|
98acc4254d | ||
|
|
eac78c7993 | ||
|
|
da1bc0f7bf | ||
|
|
aa4f92f458 | ||
|
|
a96e05d4ae | ||
|
|
5c95fd92b4 | ||
|
|
8e2403a7da |
220
.codeiumignore
220
.codeiumignore
@@ -1,220 +0,0 @@
|
|||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
.Python
|
|
||||||
build/
|
|
||||||
develop-eggs/
|
|
||||||
dist/
|
|
||||||
downloads/
|
|
||||||
eggs/
|
|
||||||
.eggs/
|
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
|
||||||
sdist/
|
|
||||||
var/
|
|
||||||
wheels/
|
|
||||||
share/python-wheels/
|
|
||||||
*.egg-info/
|
|
||||||
.installed.cfg
|
|
||||||
*.egg
|
|
||||||
MANIFEST
|
|
||||||
|
|
||||||
# PyInstaller
|
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
pip-delete-this-directory.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.nox/
|
|
||||||
.coverage
|
|
||||||
.coverage.*
|
|
||||||
.cache
|
|
||||||
nosetests.xml
|
|
||||||
coverage.xml
|
|
||||||
*.cover
|
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
|
||||||
.pytest_cache/
|
|
||||||
cover/
|
|
||||||
|
|
||||||
# Translations
|
|
||||||
*.mo
|
|
||||||
*.pot
|
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
.webassets-cache
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
|
||||||
.scrapy
|
|
||||||
|
|
||||||
# Sphinx documentation
|
|
||||||
docs/_build/
|
|
||||||
|
|
||||||
# PyBuilder
|
|
||||||
.pybuilder/
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
|
||||||
# For a library or package, you might want to ignore these files since the code is
|
|
||||||
# intended to run in multiple environments; otherwise, check them in:
|
|
||||||
# .python-version
|
|
||||||
|
|
||||||
# pipenv
|
|
||||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
||||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
||||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
||||||
# install all needed dependencies.
|
|
||||||
#Pipfile.lock
|
|
||||||
|
|
||||||
# poetry
|
|
||||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
||||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
||||||
# commonly ignored for libraries.
|
|
||||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
||||||
#poetry.lock
|
|
||||||
|
|
||||||
# pdm
|
|
||||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
||||||
#pdm.lock
|
|
||||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
|
||||||
# in version control.
|
|
||||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
|
||||||
.pdm.toml
|
|
||||||
.pdm-python
|
|
||||||
.pdm-build/
|
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
|
||||||
*.sage.py
|
|
||||||
|
|
||||||
# Environments
|
|
||||||
.env
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
|
||||||
.spyderproject
|
|
||||||
.spyproject
|
|
||||||
|
|
||||||
# Rope project settings
|
|
||||||
.ropeproject
|
|
||||||
|
|
||||||
# mkdocs documentation
|
|
||||||
/site
|
|
||||||
|
|
||||||
# mypy
|
|
||||||
.mypy_cache/
|
|
||||||
.dmypy.json
|
|
||||||
dmypy.json
|
|
||||||
|
|
||||||
# Pyre type checker
|
|
||||||
.pyre/
|
|
||||||
|
|
||||||
# pytype static type analyzer
|
|
||||||
.pytype/
|
|
||||||
|
|
||||||
# Cython debug symbols
|
|
||||||
cython_debug/
|
|
||||||
|
|
||||||
# PyCharm
|
|
||||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
||||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
||||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
||||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
||||||
#.idea/
|
|
||||||
|
|
||||||
Crawl4AI.egg-info/
|
|
||||||
Crawl4AI.egg-info/*
|
|
||||||
crawler_data.db
|
|
||||||
.vscode/
|
|
||||||
.tests/
|
|
||||||
.test_pads/
|
|
||||||
test_pad.py
|
|
||||||
test_pad*.py
|
|
||||||
.data/
|
|
||||||
Crawl4AI.egg-info/
|
|
||||||
|
|
||||||
requirements0.txt
|
|
||||||
a.txt
|
|
||||||
|
|
||||||
*.sh
|
|
||||||
.idea
|
|
||||||
docs/examples/.chainlit/
|
|
||||||
docs/examples/.chainlit/*
|
|
||||||
.chainlit/config.toml
|
|
||||||
.chainlit/translations/en-US.json
|
|
||||||
|
|
||||||
local/
|
|
||||||
.files/
|
|
||||||
|
|
||||||
a.txt
|
|
||||||
.lambda_function.py
|
|
||||||
ec2*
|
|
||||||
|
|
||||||
update_changelog.sh
|
|
||||||
|
|
||||||
.DS_Store
|
|
||||||
docs/.DS_Store
|
|
||||||
tmp/
|
|
||||||
test_env/
|
|
||||||
**/.DS_Store
|
|
||||||
**/.DS_Store
|
|
||||||
|
|
||||||
todo.md
|
|
||||||
todo_executor.md
|
|
||||||
git_changes.py
|
|
||||||
git_changes.md
|
|
||||||
pypi_build.sh
|
|
||||||
git_issues.py
|
|
||||||
git_issues.md
|
|
||||||
|
|
||||||
.next/
|
|
||||||
.tests/
|
|
||||||
.docs/
|
|
||||||
.gitboss/
|
|
||||||
todo_executor.md
|
|
||||||
protect-all-except-feature.sh
|
|
||||||
manage-collab.sh
|
|
||||||
publish.sh
|
|
||||||
combine.sh
|
|
||||||
combined_output.txt
|
|
||||||
tree.md
|
|
||||||
|
|
||||||
12
.gitattributes
vendored
Normal file
12
.gitattributes
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# Documentation
|
||||||
|
*.html linguist-documentation
|
||||||
|
docs/* linguist-documentation
|
||||||
|
docs/examples/* linguist-documentation
|
||||||
|
docs/md_v2/* linguist-documentation
|
||||||
|
|
||||||
|
# Explicitly mark Python as the main language
|
||||||
|
*.py linguist-detectable=true
|
||||||
|
*.py linguist-language=Python
|
||||||
|
|
||||||
|
# Exclude HTML from language statistics
|
||||||
|
*.html linguist-detectable=false
|
||||||
59
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
59
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
title: "[Feature Request]: "
|
||||||
|
labels: ["⚙️ New"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thank you for your interest in suggesting a new feature! Before you submit, please take a moment to check if already exists in
|
||||||
|
this discussions category to avoid duplicates. 😊
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: needs_to_be_done
|
||||||
|
attributes:
|
||||||
|
label: What needs to be done?
|
||||||
|
description: Please describe the feature or functionality you'd like to see.
|
||||||
|
placeholder: "e.g., Return alt text along with images scraped from a webpages in Result"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: problem_to_solve
|
||||||
|
attributes:
|
||||||
|
label: What problem does this solve?
|
||||||
|
description: Explain the pain point or issue this feature will help address.
|
||||||
|
placeholder: "e.g., Bypass Captchas added by cloudflare"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: target_users
|
||||||
|
attributes:
|
||||||
|
label: Target users/beneficiaries
|
||||||
|
description: Who would benefit from this feature? (e.g., specific teams, developers, users, etc.)
|
||||||
|
placeholder: "e.g., Marketing teams, developers"
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: current_workarounds
|
||||||
|
attributes:
|
||||||
|
label: Current alternatives/workarounds
|
||||||
|
description: Are there any existing solutions or workarounds? How does this feature improve upon them?
|
||||||
|
placeholder: "e.g., Users manually select the css classes mapped to data fields to extract them"
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
### 💡 Implementation Ideas
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: proposed_approach
|
||||||
|
attributes:
|
||||||
|
label: Proposed approach
|
||||||
|
description: Share any ideas you have for how this feature could be implemented. Point out any challenges your foresee
|
||||||
|
and the success metrics for this feature
|
||||||
|
placeholder: "e.g., Implement a breadth first traversal algorithm for scraper"
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
127
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
127
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
name: Bug Report
|
||||||
|
description: Report a bug with the Crawl4AI.
|
||||||
|
title: "[Bug]: "
|
||||||
|
labels: ["🐞 Bug","🩺 Needs Triage"]
|
||||||
|
body:
|
||||||
|
- type: input
|
||||||
|
id: crawl4ai_version
|
||||||
|
attributes:
|
||||||
|
label: crawl4ai version
|
||||||
|
description: Specify the version of crawl4ai you are using.
|
||||||
|
placeholder: "e.g., 2.0.0"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: expected_behavior
|
||||||
|
attributes:
|
||||||
|
label: Expected Behavior
|
||||||
|
description: Describe what you expected to happen.
|
||||||
|
placeholder: "Provide a detailed explanation of the expected outcome."
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: current_behavior
|
||||||
|
attributes:
|
||||||
|
label: Current Behavior
|
||||||
|
description: Describe what is happening instead of the expected behavior.
|
||||||
|
placeholder: "Describe the actual result or issue you encountered."
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: reproducible
|
||||||
|
attributes:
|
||||||
|
label: Is this reproducible?
|
||||||
|
description: Indicate whether this bug can be reproduced consistently.
|
||||||
|
options:
|
||||||
|
- "Yes"
|
||||||
|
- "No"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: inputs
|
||||||
|
attributes:
|
||||||
|
label: Inputs Causing the Bug
|
||||||
|
description: Provide details about the inputs causing the issue.
|
||||||
|
placeholder: |
|
||||||
|
- URL(s):
|
||||||
|
- Settings used:
|
||||||
|
- Input data (if applicable):
|
||||||
|
render: bash
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: steps_to_reproduce
|
||||||
|
attributes:
|
||||||
|
label: Steps to Reproduce
|
||||||
|
description: Provide step-by-step instructions to reproduce the issue.
|
||||||
|
placeholder: |
|
||||||
|
1. Go to...
|
||||||
|
2. Click on...
|
||||||
|
3. Observe the issue...
|
||||||
|
render: bash
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: code_snippets
|
||||||
|
attributes:
|
||||||
|
label: Code snippets
|
||||||
|
description: Provide code snippets(if any). Add comments as necessary
|
||||||
|
placeholder: print("Hello world")
|
||||||
|
render: python
|
||||||
|
|
||||||
|
# Header Section with Title
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
## Supporting Information
|
||||||
|
Please provide the following details to help us understand and resolve your issue. This will assist us in reproducing and diagnosing the problem
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: os
|
||||||
|
attributes:
|
||||||
|
label: OS
|
||||||
|
description: Please provide the operating system & distro where the issue occurs.
|
||||||
|
placeholder: "e.g., Windows, macOS, Linux"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: python_version
|
||||||
|
attributes:
|
||||||
|
label: Python version
|
||||||
|
description: Specify the Python version being used.
|
||||||
|
placeholder: "e.g., 3.8.5"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
# Browser Field
|
||||||
|
- type: input
|
||||||
|
id: browser
|
||||||
|
attributes:
|
||||||
|
label: Browser
|
||||||
|
description: Provide the name of the browser you are using.
|
||||||
|
placeholder: "e.g., Chrome, Firefox, Safari"
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
# Browser Version Field
|
||||||
|
- type: input
|
||||||
|
id: browser_version
|
||||||
|
attributes:
|
||||||
|
label: Browser version
|
||||||
|
description: Provide the version of the browser you are using.
|
||||||
|
placeholder: "e.g., 91.0.4472.124"
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
# Error Logs Field (Text Area)
|
||||||
|
- type: textarea
|
||||||
|
id: error_logs
|
||||||
|
attributes:
|
||||||
|
label: Error logs & Screenshots (if applicable)
|
||||||
|
description: If you encountered any errors, please provide the error logs. Attach any relevant screenshots to help us understand the issue.
|
||||||
|
placeholder: "Paste error logs here and attach your screenshots"
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Feature Requests
|
||||||
|
url: https://github.com/unclecode/crawl4ai/discussions/categories/feature-requests
|
||||||
|
about: "Suggest new features or enhancements for Crawl4AI"
|
||||||
|
- name: Forums - Q&A
|
||||||
|
url: https://github.com/unclecode/crawl4ai/discussions/categories/forums-q-a
|
||||||
|
about: "Ask questions or engage in general discussions about Crawl4AI"
|
||||||
19
.github/pull_request_template.md
vendored
Normal file
19
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
## Summary
|
||||||
|
Please include a summary of the change and/or which issues are fixed.
|
||||||
|
|
||||||
|
eg: `Fixes #123` (Tag GitHub issue numbers in this format, so it automatically links the issues with your PR)
|
||||||
|
|
||||||
|
## List of files changed and why
|
||||||
|
eg: quickstart.py - To update the example as per new changes
|
||||||
|
|
||||||
|
## How Has This Been Tested?
|
||||||
|
Please describe the tests that you ran to verify your changes.
|
||||||
|
|
||||||
|
## Checklist:
|
||||||
|
|
||||||
|
- [ ] My code follows the style guidelines of this project
|
||||||
|
- [ ] I have performed a self-review of my own code
|
||||||
|
- [ ] I have commented my code, particularly in hard-to-understand areas
|
||||||
|
- [ ] I have made corresponding changes to the documentation
|
||||||
|
- [ ] I have added/updated unit tests that prove my fix is effective or that my feature works
|
||||||
|
- [ ] New and existing unit tests pass locally with my changes
|
||||||
30
.gitignore
vendored
30
.gitignore
vendored
@@ -225,3 +225,33 @@ tree.md
|
|||||||
.scripts
|
.scripts
|
||||||
.local
|
.local
|
||||||
.do
|
.do
|
||||||
|
/plans
|
||||||
|
plans/
|
||||||
|
|
||||||
|
# Codeium
|
||||||
|
.codeiumignore
|
||||||
|
todo/
|
||||||
|
|
||||||
|
# Continue development files
|
||||||
|
.continue/
|
||||||
|
.continuerc.json
|
||||||
|
continue.lock
|
||||||
|
continue_core.log
|
||||||
|
contextProviders/
|
||||||
|
continue_workspace/
|
||||||
|
.continue-cache/
|
||||||
|
continue_config.json
|
||||||
|
|
||||||
|
# Continue temporary files
|
||||||
|
.continue-temp/
|
||||||
|
.continue-logs/
|
||||||
|
.continue-downloads/
|
||||||
|
|
||||||
|
# Continue VS Code specific
|
||||||
|
.vscode-continue/
|
||||||
|
.vscode-continue-cache/
|
||||||
|
|
||||||
|
.prompts/
|
||||||
|
|
||||||
|
.llm.env
|
||||||
|
.private/
|
||||||
|
|||||||
270
CHANGELOG.md
270
CHANGELOG.md
@@ -5,6 +5,270 @@ All notable changes to Crawl4AI will be documented in this file.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## Version 0.5.0 (2025-03-02)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- *(profiles)* Add BrowserProfiler class for dedicated browser profile management
|
||||||
|
- *(cli)* Add interactive profile management to CLI with rich UI
|
||||||
|
- *(profiles)* Add ability to crawl directly from profile management interface
|
||||||
|
- *(browser)* Support identity-based browsing with persistent profiles
|
||||||
|
- *(deep-crawling)* Add max_pages parameter to limit the number of pages crawled in all deep crawling strategies
|
||||||
|
- *(deep-crawling)* Add score_threshold parameter to BFS and DFS strategies to filter URLs by score
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- *(browser)* Refactor profile management from ManagedBrowser to BrowserProfiler class
|
||||||
|
- *(cli)* Enhance CLI with profile selection and status display for crawling
|
||||||
|
- *(examples)* Update identity-based browsing example to use BrowserProfiler class
|
||||||
|
- *(docs)* Update identity-based crawling documentation
|
||||||
|
- *(docs)* Update deep crawling documentation with max_pages and score_threshold parameters
|
||||||
|
- *(examples)* Add example demonstrating the use of max_pages and score_threshold parameters
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- *(browser)* Fix profile detection and management on different platforms
|
||||||
|
- *(cli)* Fix CLI command structure for better user experience
|
||||||
|
- *(deep-crawling)* Improve BFS and DFS strategies to handle page count limits more efficiently
|
||||||
|
|
||||||
|
|
||||||
|
## Version 0.5.0 (2025-02-21)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- *(crawler)* [**breaking**] Add memory-adaptive dispatcher with rate limiting
|
||||||
|
- *(scraping)* [**breaking**] Add LXML-based scraping mode for improved performance
|
||||||
|
- *(content-filter)* Add LLMContentFilter for intelligent markdown generation
|
||||||
|
- *(dispatcher)* [**breaking**] Add streaming support for URL processing
|
||||||
|
- *(browser)* [**breaking**] Improve browser context management and add shared data support
|
||||||
|
- *(config)* [**breaking**] Add streaming support and config cloning
|
||||||
|
- *(crawler)* Add URL redirection tracking
|
||||||
|
- *(extraction)* Add LLM-powered schema generation utility
|
||||||
|
- *(proxy)* Add proxy configuration support to CrawlerRunConfig
|
||||||
|
- *(robots)* Add robots.txt compliance support
|
||||||
|
- *(release)* [**breaking**] Prepare v0.4.3 beta release
|
||||||
|
- *(proxy)* Add proxy rotation support and documentation
|
||||||
|
- *(browser)* Add CDP URL configuration support
|
||||||
|
- *(demo)* Uncomment feature demos and add fake-useragent dependency
|
||||||
|
- *(pdf)* Add PDF processing capabilities
|
||||||
|
- *(crawler)* [**breaking**] Enhance JavaScript execution and PDF processing
|
||||||
|
- *(docker)* Add Docker deployment configuration and API server
|
||||||
|
- *(docker)* Add Docker service integration and config serialization
|
||||||
|
- *(docker)* [**breaking**] Enhance Docker deployment setup and configuration
|
||||||
|
- *(api)* Improve cache handling and add API tests
|
||||||
|
- *(crawler)* [**breaking**] Add deep crawling capabilities with BFS strategy
|
||||||
|
- *(proxy)* [**breaking**] Add proxy rotation strategy
|
||||||
|
- *(deep-crawling)* Add DFS strategy and update exports; refactor CLI entry point
|
||||||
|
- *(cli)* Add command line interface with comprehensive features
|
||||||
|
- *(config)* Enhance serialization and add deep crawling exports
|
||||||
|
- *(crawler)* Add HTTP crawler strategy for lightweight web scraping
|
||||||
|
- *(docker)* [**breaking**] Implement supervisor and secure API endpoints
|
||||||
|
- *(docker)* [**breaking**] Add JWT authentication and improve server architecture
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- *(browser)* Update browser channel default to 'chromium' in BrowserConfig.from_args method
|
||||||
|
- *(crawler)* Optimize response handling and default settings
|
||||||
|
- *(crawler)* - Update hello_world example with proper content filtering
|
||||||
|
- - Update hello_world.py example
|
||||||
|
- *(docs)* [**breaking**] Reorganize documentation structure and update styles
|
||||||
|
- *(dispatcher)* [**breaking**] Migrate to modular dispatcher system with enhanced monitoring
|
||||||
|
- *(scraping)* [**breaking**] Replace ScrapingMode enum with strategy pattern
|
||||||
|
- *(browser)* Improve browser path management
|
||||||
|
- *(models)* Rename final_url to redirected_url for consistency
|
||||||
|
- *(core)* [**breaking**] Improve type hints and remove unused file
|
||||||
|
- *(docs)* Improve code formatting in features demo
|
||||||
|
- *(user-agent)* Improve user agent generation system
|
||||||
|
- *(core)* [**breaking**] Reorganize project structure and remove legacy code
|
||||||
|
- *(docker)* Clean up import statements in server.py
|
||||||
|
- *(docker)* Remove unused models and utilities for cleaner codebase
|
||||||
|
- *(docker)* [**breaking**] Improve server architecture and configuration
|
||||||
|
- *(deep-crawl)* [**breaking**] Reorganize deep crawling functionality into dedicated module
|
||||||
|
- *(deep-crawling)* [**breaking**] Reorganize deep crawling strategies and add new implementations
|
||||||
|
- *(crawling)* [**breaking**] Improve type hints and code cleanup
|
||||||
|
- *(crawler)* [**breaking**] Improve HTML handling and cleanup codebase
|
||||||
|
- *(crawler)* [**breaking**] Remove content filter functionality
|
||||||
|
- *(examples)* Update API usage in features demo
|
||||||
|
- *(config)* [**breaking**] Enhance serialization and config handling
|
||||||
|
|
||||||
|
### Docs
|
||||||
|
|
||||||
|
- Add Code of Conduct for the project (#410)
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
|
||||||
|
- *(extraction)* Add clarifying comments for CSS selector behavior
|
||||||
|
- *(readme)* Update personal story and project vision
|
||||||
|
- *(urls)* [**breaking**] Update documentation URLs to new domain
|
||||||
|
- *(api)* Add streaming mode documentation and examples
|
||||||
|
- *(readme)* Update version and feature announcements for v0.4.3b1
|
||||||
|
- *(examples)* Update demo scripts and fix output formats
|
||||||
|
- *(examples)* Update v0.4.3 features demo to v0.4.3b2
|
||||||
|
- *(readme)* Update version references and fix links
|
||||||
|
- *(multi-url)* [**breaking**] Improve documentation clarity and update examples
|
||||||
|
- *(examples)* Update proxy rotation demo and disable other demos
|
||||||
|
- *(api)* Improve formatting and readability of API documentation
|
||||||
|
- *(examples)* Add SERP API project example
|
||||||
|
- *(urls)* Update documentation URLs to new domain
|
||||||
|
- *(readme)* Resolve merge conflict and update version info
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- *(browser)* Update default browser channel to chromium and simplify channel selection logic
|
||||||
|
- *(browser)* [**breaking**] Default to Chromium channel for new headless mode (#387)
|
||||||
|
- *(browser)* Resolve merge conflicts in browser channel configuration
|
||||||
|
- Prevent memory leaks by ensuring proper closure of Playwright pages
|
||||||
|
- Not working long page screenshot (#403)
|
||||||
|
- *(extraction)* JsonCss selector and crawler improvements
|
||||||
|
- *(models)* [**breaking**] Make model fields optional with default values
|
||||||
|
- *(dispatcher)* Adjust memory threshold and fix dispatcher initialization
|
||||||
|
- *(install)* Ensure proper exit after running doctor command
|
||||||
|
|
||||||
|
### Miscellaneous Tasks
|
||||||
|
|
||||||
|
- *(cleanup)* Remove unused files and improve type hints
|
||||||
|
- Add .gitattributes file
|
||||||
|
|
||||||
|
## License Update
|
||||||
|
|
||||||
|
Crawl4AI v0.5.0 updates the license to Apache 2.0 *with a required attribution clause*. This means you are free to use, modify, and distribute Crawl4AI (even commercially), but you *must* clearly attribute the project in any public use or distribution. See the updated `LICENSE` file for the full legal text and specific requirements.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Version 0.4.3b2 (2025-01-21)
|
||||||
|
|
||||||
|
This release introduces several powerful new features, including robots.txt compliance, dynamic proxy support, LLM-powered schema generation, and improved documentation.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- **Robots.txt Compliance:**
|
||||||
|
- Added robots.txt compliance support with efficient SQLite-based caching.
|
||||||
|
- New `check_robots_txt` parameter in `CrawlerRunConfig` to enable robots.txt checking before crawling a URL.
|
||||||
|
- Automated robots.txt checking is now integrated into `AsyncWebCrawler` with 403 status codes for blocked URLs.
|
||||||
|
|
||||||
|
- **Proxy Configuration:**
|
||||||
|
- Added proxy configuration support to `CrawlerRunConfig`, allowing dynamic proxy settings per crawl request.
|
||||||
|
- Updated documentation with examples for using proxy configuration in crawl operations.
|
||||||
|
|
||||||
|
- **LLM-Powered Schema Generation:**
|
||||||
|
- Introduced a new utility for automatic CSS and XPath schema generation using OpenAI or Ollama models.
|
||||||
|
- Added comprehensive documentation and examples for schema generation.
|
||||||
|
- New prompt templates optimized for HTML schema analysis.
|
||||||
|
|
||||||
|
- **URL Redirection Tracking:**
|
||||||
|
- Added URL redirection tracking to capture the final URL after any redirects.
|
||||||
|
- The final URL is now available in the `redirected_url` field of the `AsyncCrawlResponse` object.
|
||||||
|
|
||||||
|
- **Enhanced Streamlined Documentation:**
|
||||||
|
- Refactored and improved the documentation structure for clarity and ease of use.
|
||||||
|
- Added detailed explanations of new features and updated examples.
|
||||||
|
|
||||||
|
- **Improved Browser Context Management:**
|
||||||
|
- Enhanced the management of browser contexts and added shared data support.
|
||||||
|
- Introduced the `shared_data` parameter in `CrawlerRunConfig` to pass data between hooks.
|
||||||
|
|
||||||
|
- **Memory Dispatcher System:**
|
||||||
|
- Migrated to a memory dispatcher system with enhanced monitoring capabilities.
|
||||||
|
- Introduced `MemoryAdaptiveDispatcher` and `SemaphoreDispatcher` for improved resource management.
|
||||||
|
- Added `RateLimiter` for rate limiting support.
|
||||||
|
- New `CrawlerMonitor` for real-time monitoring of crawler operations.
|
||||||
|
|
||||||
|
- **Streaming Support:**
|
||||||
|
- Added streaming support for processing crawled URLs as they are processed.
|
||||||
|
- Enabled streaming mode with the `stream` parameter in `CrawlerRunConfig`.
|
||||||
|
|
||||||
|
- **Content Scraping Strategy:**
|
||||||
|
- Introduced a new `LXMLWebScrapingStrategy` for faster content scraping.
|
||||||
|
- Added support for selecting the scraping strategy via the `scraping_strategy` parameter in `CrawlerRunConfig`.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- **Browser Path Management:**
|
||||||
|
- Improved browser path management for consistent behavior across different environments.
|
||||||
|
|
||||||
|
- **Memory Threshold:**
|
||||||
|
- Adjusted the default memory threshold to improve resource utilization.
|
||||||
|
|
||||||
|
- **Pydantic Model Fields:**
|
||||||
|
- Made several model fields optional with default values to improve flexibility.
|
||||||
|
|
||||||
|
### Refactor
|
||||||
|
|
||||||
|
- **Documentation Structure:**
|
||||||
|
- Reorganized documentation structure to improve navigation and readability.
|
||||||
|
- Updated styles and added new sections for advanced features.
|
||||||
|
|
||||||
|
- **Scraping Mode:**
|
||||||
|
- Replaced the `ScrapingMode` enum with a strategy pattern for more flexible content scraping.
|
||||||
|
|
||||||
|
- **Version Update:**
|
||||||
|
- Updated the version to `0.4.248`.
|
||||||
|
|
||||||
|
- **Code Cleanup:**
|
||||||
|
- Removed unused files and improved type hints.
|
||||||
|
- Applied Ruff corrections for code quality.
|
||||||
|
|
||||||
|
- **Updated dependencies:**
|
||||||
|
- Updated dependencies to their latest versions to ensure compatibility and security.
|
||||||
|
|
||||||
|
- **Ignored certain patterns and directories:**
|
||||||
|
- Updated `.gitignore` and `.codeiumignore` to ignore additional patterns and directories, streamlining the development environment.
|
||||||
|
|
||||||
|
- **Simplified Personal Story in README:**
|
||||||
|
- Streamlined the personal story and project vision in the `README.md` for clarity.
|
||||||
|
|
||||||
|
- **Removed Deprecated Files:**
|
||||||
|
- Deleted several deprecated files and examples that are no longer relevant.
|
||||||
|
|
||||||
|
---
|
||||||
|
**Previous Releases:**
|
||||||
|
|
||||||
|
### 0.4.24x (2024-12-31)
|
||||||
|
- **Enhanced SSL & Security**: New SSL certificate handling with custom paths and validation options for secure crawling.
|
||||||
|
- **Smart Content Filtering**: Advanced filtering system with regex support and efficient chunking strategies.
|
||||||
|
- **Improved JSON Extraction**: Support for complex JSONPath, JSON-CSS, and Microdata extraction.
|
||||||
|
- **New Field Types**: Added `computed`, `conditional`, `aggregate`, and `template` field types.
|
||||||
|
- **Performance Boost**: Optimized caching, parallel processing, and memory management.
|
||||||
|
- **Better Error Handling**: Enhanced debugging capabilities with detailed error tracking.
|
||||||
|
- **Security Features**: Improved input validation and safe expression evaluation.
|
||||||
|
|
||||||
|
### 0.4.247 (2025-01-06)
|
||||||
|
|
||||||
|
#### Added
|
||||||
|
- **Windows Event Loop Configuration**: Introduced a utility function `configure_windows_event_loop` to resolve `NotImplementedError` for asyncio subprocesses on Windows. ([#utils.py](crawl4ai/utils.py), [#tutorials/async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||||
|
- **`page_need_scroll` Method**: Added a method to determine if a page requires scrolling before taking actions in `AsyncPlaywrightCrawlerStrategy`. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||||
|
|
||||||
|
#### Changed
|
||||||
|
- **Version Bump**: Updated the version from `0.4.246` to `0.4.247`. ([#__version__.py](crawl4ai/__version__.py))
|
||||||
|
- **Improved Scrolling Logic**: Enhanced scrolling methods in `AsyncPlaywrightCrawlerStrategy` by adding a `scroll_delay` parameter for better control. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||||
|
- **Markdown Generation Example**: Updated the `hello_world.py` example to reflect the latest API changes and better illustrate features. ([#examples/hello_world.py](docs/examples/hello_world.py))
|
||||||
|
- **Documentation Update**:
|
||||||
|
- Added Windows-specific instructions for handling asyncio event loops. ([#async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||||
|
|
||||||
|
#### Removed
|
||||||
|
- **Legacy Markdown Generation Code**: Removed outdated and unused code for markdown generation in `content_scraping_strategy.py`. ([#content_scraping_strategy.py](crawl4ai/content_scraping_strategy.py))
|
||||||
|
|
||||||
|
#### Fixed
|
||||||
|
- **Page Closing to Prevent Memory Leaks**:
|
||||||
|
- **Description**: Added a `finally` block to ensure pages are closed when no `session_id` is provided.
|
||||||
|
- **Impact**: Prevents memory leaks caused by lingering pages after a crawl.
|
||||||
|
- **File**: [`async_crawler_strategy.py`](crawl4ai/async_crawler_strategy.py)
|
||||||
|
- **Code**:
|
||||||
|
```python
|
||||||
|
finally:
|
||||||
|
# If no session_id is given we should close the page
|
||||||
|
if not config.session_id:
|
||||||
|
await page.close()
|
||||||
|
```
|
||||||
|
- **Multiple Element Selection**: Modified `_get_elements` in `JsonCssExtractionStrategy` to return all matching elements instead of just the first one, ensuring comprehensive extraction. ([#extraction_strategy.py](crawl4ai/extraction_strategy.py))
|
||||||
|
- **Error Handling in Scrolling**: Added robust error handling to ensure scrolling proceeds safely even if a configuration is missing. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||||
|
|
||||||
|
## [0.4.267] - 2025 - 01 - 06
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Windows Event Loop Configuration**: Introduced a utility function `configure_windows_event_loop` to resolve `NotImplementedError` for asyncio subprocesses on Windows. ([#utils.py](crawl4ai/utils.py), [#tutorials/async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||||
|
- **`page_need_scroll` Method**: Added a method to determine if a page requires scrolling before taking actions in `AsyncPlaywrightCrawlerStrategy`. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||||
|
|
||||||
## [0.4.24] - 2024-12-31
|
## [0.4.24] - 2024-12-31
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@@ -147,12 +411,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Fixed potential viewport mismatches by ensuring consistent use of `self.viewport_width` and `self.viewport_height` throughout the code.
|
- Fixed potential viewport mismatches by ensuring consistent use of `self.viewport_width` and `self.viewport_height` throughout the code.
|
||||||
- Improved robustness of dynamic content loading to avoid timeouts and failed evaluations.
|
- Improved robustness of dynamic content loading to avoid timeouts and failed evaluations.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## [0.3.75] December 1, 2024
|
## [0.3.75] December 1, 2024
|
||||||
|
|
||||||
### PruningContentFilter
|
### PruningContentFilter
|
||||||
|
|||||||
131
CODE_OF_CONDUCT.md
Normal file
131
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
# Crawl4AI Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
|
identity and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the overall
|
||||||
|
community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||||
|
any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email address,
|
||||||
|
without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official email address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
unclecode@crawl4ai.com. All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series of
|
||||||
|
actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or permanent
|
||||||
|
ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||||
|
community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.1, available at
|
||||||
|
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by
|
||||||
|
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||||
|
[https://www.contributor-covenant.org/translations][translations].
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||||
|
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||||
|
[FAQ]: https://www.contributor-covenant.org/faq
|
||||||
|
[translations]: https://www.contributor-covenant.org/translations
|
||||||
@@ -6,7 +6,7 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
|||||||
|
|
||||||
- [Unclecode](https://github.com/unclecode) - Project Creator and Main Developer
|
- [Unclecode](https://github.com/unclecode) - Project Creator and Main Developer
|
||||||
- [Nasrin](https://github.com/ntohidi) - Project Manager and Developer
|
- [Nasrin](https://github.com/ntohidi) - Project Manager and Developer
|
||||||
- [Aravind Karnam](https://github.com/aravindkarnam) - Developer
|
- [Aravind Karnam](https://github.com/aravindkarnam) - Head of Community and Product
|
||||||
|
|
||||||
## Community Contributors
|
## Community Contributors
|
||||||
|
|
||||||
@@ -24,6 +24,14 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
|||||||
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
|
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
|
||||||
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
|
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
|
||||||
|
|
||||||
|
#### Feb-Alpha-1
|
||||||
|
- [sufianuddin](https://github.com/sufianuddin) - fix: [Documentation for JsonCssExtractionStrategy](https://github.com/unclecode/crawl4ai/issues/651)
|
||||||
|
- [tautikAg](https://github.com/tautikAg) - fix: [Markdown output has incorect spacing](https://github.com/unclecode/crawl4ai/issues/599)
|
||||||
|
- [cardit1](https://github.com/cardit1) - fix: ['AsyncPlaywrightCrawlerStrategy' object has no attribute 'downloads_path'](https://github.com/unclecode/crawl4ai/issues/585)
|
||||||
|
- [dmurat](https://github.com/dmurat) - fix: [ Incorrect rendering of inline code inside of links ](https://github.com/unclecode/crawl4ai/issues/583)
|
||||||
|
- [Sparshsing](https://github.com/Sparshsing) - fix: [Relative Urls in the webpage not extracted properly ](https://github.com/unclecode/crawl4ai/issues/570)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Other Contributors
|
## Other Contributors
|
||||||
|
|
||||||
@@ -31,6 +39,11 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
|||||||
- [Shiv Kumar](https://github.com/shivkumar0757)
|
- [Shiv Kumar](https://github.com/shivkumar0757)
|
||||||
- [QIN2DIM](https://github.com/QIN2DIM)
|
- [QIN2DIM](https://github.com/QIN2DIM)
|
||||||
|
|
||||||
|
#### Typo fixes
|
||||||
|
- [ssoydan](https://github.com/ssoydan)
|
||||||
|
- [Darshan](https://github.com/Darshan2104)
|
||||||
|
- [tuhinmallick](https://github.com/tuhinmallick)
|
||||||
|
|
||||||
## Acknowledgements
|
## Acknowledgements
|
||||||
|
|
||||||
We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better.
|
We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better.
|
||||||
|
|||||||
132
Dockerfile
132
Dockerfile
@@ -1,32 +1,31 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
FROM python:3.10-slim
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
# Set build arguments
|
||||||
ARG BUILDPLATFORM
|
ARG APP_HOME=/app
|
||||||
|
ARG GITHUB_REPO=https://github.com/unclecode/crawl4ai.git
|
||||||
|
ARG GITHUB_BRANCH=main
|
||||||
|
ARG USE_LOCAL=true
|
||||||
|
|
||||||
|
ENV PYTHONFAULTHANDLER=1 \
|
||||||
|
PYTHONHASHSEED=random \
|
||||||
|
PYTHONUNBUFFERED=1 \
|
||||||
|
PIP_NO_CACHE_DIR=1 \
|
||||||
|
PYTHONDONTWRITEBYTECODE=1 \
|
||||||
|
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||||
|
PIP_DEFAULT_TIMEOUT=100 \
|
||||||
|
DEBIAN_FRONTEND=noninteractive \
|
||||||
|
REDIS_HOST=localhost \
|
||||||
|
REDIS_PORT=6379
|
||||||
|
|
||||||
# Other build arguments
|
|
||||||
ARG PYTHON_VERSION=3.10
|
ARG PYTHON_VERSION=3.10
|
||||||
|
ARG INSTALL_TYPE=default
|
||||||
# Base stage with system dependencies
|
|
||||||
FROM python:${PYTHON_VERSION}-slim as base
|
|
||||||
|
|
||||||
# Declare ARG variables again within the build stage
|
|
||||||
ARG INSTALL_TYPE=all
|
|
||||||
ARG ENABLE_GPU=false
|
ARG ENABLE_GPU=false
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
# Platform-specific labels
|
|
||||||
LABEL maintainer="unclecode"
|
LABEL maintainer="unclecode"
|
||||||
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
|
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
|
||||||
LABEL version="1.0"
|
LABEL version="1.0"
|
||||||
|
|
||||||
# Environment setup
|
|
||||||
ENV PYTHONUNBUFFERED=1 \
|
|
||||||
PYTHONDONTWRITEBYTECODE=1 \
|
|
||||||
PIP_NO_CACHE_DIR=1 \
|
|
||||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
|
||||||
PIP_DEFAULT_TIMEOUT=100 \
|
|
||||||
DEBIAN_FRONTEND=noninteractive
|
|
||||||
|
|
||||||
# Install system dependencies
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
build-essential \
|
build-essential \
|
||||||
curl \
|
curl \
|
||||||
@@ -37,10 +36,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
pkg-config \
|
pkg-config \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
libjpeg-dev \
|
libjpeg-dev \
|
||||||
libpng-dev \
|
redis-server \
|
||||||
|
supervisor \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Playwright system dependencies for Linux
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
libglib2.0-0 \
|
libglib2.0-0 \
|
||||||
libnss3 \
|
libnss3 \
|
||||||
@@ -65,8 +64,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
libatspi2.0-0 \
|
libatspi2.0-0 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# GPU support if enabled and architecture is supported
|
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
|
||||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \
|
|
||||||
apt-get update && apt-get install -y --no-install-recommends \
|
apt-get update && apt-get install -y --no-install-recommends \
|
||||||
nvidia-cuda-toolkit \
|
nvidia-cuda-toolkit \
|
||||||
&& rm -rf /var/lib/apt/lists/* ; \
|
&& rm -rf /var/lib/apt/lists/* ; \
|
||||||
@@ -74,19 +72,42 @@ else \
|
|||||||
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
|
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create and set working directory
|
RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||||
WORKDIR /app
|
echo "🦾 Installing ARM-specific optimizations"; \
|
||||||
|
apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
libopenblas-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*; \
|
||||||
|
elif [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
|
echo "🖥️ Installing AMD64-specific optimizations"; \
|
||||||
|
apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
libomp-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*; \
|
||||||
|
else \
|
||||||
|
echo "Skipping platform-specific optimizations (unsupported platform)"; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Copy the entire project
|
WORKDIR ${APP_HOME}
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Install base requirements
|
RUN echo '#!/bin/bash\n\
|
||||||
|
if [ "$USE_LOCAL" = "true" ]; then\n\
|
||||||
|
echo "📦 Installing from local source..."\n\
|
||||||
|
pip install --no-cache-dir /tmp/project/\n\
|
||||||
|
else\n\
|
||||||
|
echo "🌐 Installing from GitHub..."\n\
|
||||||
|
for i in {1..3}; do \n\
|
||||||
|
git clone --branch ${GITHUB_BRANCH} ${GITHUB_REPO} /tmp/crawl4ai && break || \n\
|
||||||
|
{ echo "Attempt $i/3 failed! Taking a short break... ☕"; sleep 5; }; \n\
|
||||||
|
done\n\
|
||||||
|
pip install --no-cache-dir /tmp/crawl4ai\n\
|
||||||
|
fi' > /tmp/install.sh && chmod +x /tmp/install.sh
|
||||||
|
|
||||||
|
COPY . /tmp/project/
|
||||||
|
|
||||||
|
COPY deploy/docker/supervisord.conf .
|
||||||
|
|
||||||
|
COPY deploy/docker/requirements.txt .
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
# Install required library for FastAPI
|
|
||||||
RUN pip install fastapi uvicorn psutil
|
|
||||||
|
|
||||||
# Install ML dependencies first for better layer caching
|
|
||||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||||
pip install --no-cache-dir \
|
pip install --no-cache-dir \
|
||||||
torch \
|
torch \
|
||||||
@@ -99,38 +120,37 @@ RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
|||||||
python -m nltk.downloader punkt stopwords ; \
|
python -m nltk.downloader punkt stopwords ; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install the package
|
|
||||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||||
pip install ".[all]" && \
|
pip install "/tmp/project/[all]" && \
|
||||||
python -m crawl4ai.model_loader ; \
|
python -m crawl4ai.model_loader ; \
|
||||||
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
|
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
|
||||||
pip install ".[torch]" ; \
|
pip install "/tmp/project/[torch]" ; \
|
||||||
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
|
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
|
||||||
pip install ".[transformer]" && \
|
pip install "/tmp/project/[transformer]" && \
|
||||||
python -m crawl4ai.model_loader ; \
|
python -m crawl4ai.model_loader ; \
|
||||||
else \
|
else \
|
||||||
pip install "." ; \
|
pip install "/tmp/project" ; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install MkDocs and required plugins
|
RUN pip install --no-cache-dir --upgrade pip && \
|
||||||
RUN pip install --no-cache-dir \
|
/tmp/install.sh && \
|
||||||
mkdocs \
|
python -c "import crawl4ai; print('✅ crawl4ai is ready to rock!')" && \
|
||||||
mkdocs-material \
|
python -c "from playwright.sync_api import sync_playwright; print('✅ Playwright is feeling dramatic!')"
|
||||||
mkdocs-terminal \
|
|
||||||
pymdown-extensions
|
|
||||||
|
|
||||||
# Build MkDocs documentation
|
RUN playwright install --with-deps chromium
|
||||||
RUN mkdocs build
|
|
||||||
|
|
||||||
# Install Playwright and browsers
|
COPY deploy/docker/* ${APP_HOME}/
|
||||||
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
|
||||||
playwright install chromium; \
|
|
||||||
elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
|
||||||
playwright install chromium; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Expose port
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
EXPOSE 8000 11235 9222 8080
|
CMD bash -c '\
|
||||||
|
MEM=$(free -m | awk "/^Mem:/{print \$2}"); \
|
||||||
|
if [ $MEM -lt 2048 ]; then \
|
||||||
|
echo "⚠️ Warning: Less than 2GB RAM available! Your container might need a memory boost! 🚀"; \
|
||||||
|
exit 1; \
|
||||||
|
fi && \
|
||||||
|
redis-cli ping > /dev/null && \
|
||||||
|
curl -f http://localhost:8000/health || exit 1'
|
||||||
|
|
||||||
|
EXPOSE 6379
|
||||||
|
CMD ["supervisord", "-c", "supervisord.conf"]
|
||||||
|
|
||||||
# Start the FastAPI server
|
|
||||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "11235"]
|
|
||||||
18
LICENSE
18
LICENSE
@@ -49,3 +49,21 @@ You may add Your own copyright statement to Your modifications and may provide a
|
|||||||
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
---
|
||||||
|
Attribution Requirement
|
||||||
|
|
||||||
|
All distributions, publications, or public uses of this software, or derivative works based on this software, must include the following attribution:
|
||||||
|
|
||||||
|
"This product includes software developed by UncleCode (https://x.com/unclecode) as part of the Crawl4AI project (https://github.com/unclecode/crawl4ai)."
|
||||||
|
|
||||||
|
This attribution must be displayed in a prominent and easily accessible location, such as:
|
||||||
|
|
||||||
|
- For software distributions: In a NOTICE file, README file, or equivalent documentation.
|
||||||
|
- For publications (research papers, articles, blog posts): In the acknowledgments section or a footnote.
|
||||||
|
- For websites/web applications: In an "About" or "Credits" section.
|
||||||
|
- For command-line tools: In the help/usage output.
|
||||||
|
|
||||||
|
This requirement ensures proper credit is given for the use of Crawl4AI and helps promote the project.
|
||||||
|
|
||||||
|
---
|
||||||
211
README.md
211
README.md
@@ -15,14 +15,27 @@
|
|||||||
[](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
|
[](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
|
||||||
[](https://github.com/psf/black)
|
[](https://github.com/psf/black)
|
||||||
[](https://github.com/PyCQA/bandit)
|
[](https://github.com/PyCQA/bandit)
|
||||||
|
[](code_of_conduct.md)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
||||||
|
|
||||||
[✨ Check out latest update v0.4.24x](#-recent-updates)
|
[✨ Check out latest update v0.5.0](#-recent-updates)
|
||||||
|
|
||||||
🎉 **Version 0.4.24x is out!** Major improvements in extraction strategies with enhanced JSON handling, SSL security, and Amazon product extraction. Plus, a completely revamped content filtering system! [Read the release notes →](https://crawl4ai.com/mkdocs/blog)
|
🎉 **Version 0.5.0 is out!** This major release introduces Deep Crawling with BFS/DFS/BestFirst strategies, Memory-Adaptive Dispatcher, Multiple Crawling Strategies (Playwright and HTTP), Docker Deployment with FastAPI, Command-Line Interface (CLI), and more! [Read the release notes →](https://docs.crawl4ai.com/blog)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||||
|
|
||||||
|
My journey with computers started in childhood when my dad, a computer scientist, introduced me to an Amstrad computer. Those early days sparked a fascination with technology, leading me to pursue computer science and specialize in NLP during my postgraduate studies. It was during this time that I first delved into web crawling, building tools to help researchers organize papers and extract information from publications a challenging yet rewarding experience that honed my skills in data extraction.
|
||||||
|
|
||||||
|
Fast forward to 2023, I was working on a tool for a project and needed a crawler to convert a webpage into markdown. While exploring solutions, I found one that claimed to be open-source but required creating an account and generating an API token. Worse, it turned out to be a SaaS model charging $16, and its quality didn’t meet my standards. Frustrated, I realized this was a deeper problem. That frustration turned into turbo anger mode, and I decided to build my own solution. In just a few days, I created Crawl4AI. To my surprise, it went viral, earning thousands of GitHub stars and resonating with a global community.
|
||||||
|
|
||||||
|
I made Crawl4AI open-source for two reasons. First, it’s my way of giving back to the open-source community that has supported me throughout my career. Second, I believe data should be accessible to everyone, not locked behind paywalls or monopolized by a few. Open access to data lays the foundation for the democratization of AI, a vision where individuals can train their own models and take ownership of their information. This library is the first step in a larger journey to create the best open-source data extraction and generation tool the world has ever seen, built collaboratively by a passionate community.
|
||||||
|
|
||||||
|
Thank you to everyone who has supported this project, used it, and shared feedback. Your encouragement motivates me to dream even bigger. Join us, file issues, submit PRs, or spread the word. Together, we can build a tool that truly empowers people to access their own data and reshape the future of AI.
|
||||||
|
</details>
|
||||||
|
|
||||||
## 🧐 Why Crawl4AI?
|
## 🧐 Why Crawl4AI?
|
||||||
|
|
||||||
@@ -40,6 +53,9 @@ Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant
|
|||||||
# Install the package
|
# Install the package
|
||||||
pip install -U crawl4ai
|
pip install -U crawl4ai
|
||||||
|
|
||||||
|
# For pre release versions
|
||||||
|
pip install crawl4ai --pre
|
||||||
|
|
||||||
# Run post-installation setup
|
# Run post-installation setup
|
||||||
crawl4ai-setup
|
crawl4ai-setup
|
||||||
|
|
||||||
@@ -52,7 +68,7 @@ If you encounter any browser-related issues, you can install them manually:
|
|||||||
python -m playwright install --with-deps chromium
|
python -m playwright install --with-deps chromium
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Run a simple web crawl:
|
2. Run a simple web crawl with Python:
|
||||||
```python
|
```python
|
||||||
import asyncio
|
import asyncio
|
||||||
from crawl4ai import *
|
from crawl4ai import *
|
||||||
@@ -68,6 +84,18 @@ if __name__ == "__main__":
|
|||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
```
|
```
|
||||||
|
|
||||||
|
3. Or use the new command-line interface:
|
||||||
|
```bash
|
||||||
|
# Basic crawl with markdown output
|
||||||
|
crwl https://www.nbcnews.com/business -o markdown
|
||||||
|
|
||||||
|
# Deep crawl with BFS strategy, max 10 pages
|
||||||
|
crwl https://docs.crawl4ai.com --deep-crawl bfs --max-pages 10
|
||||||
|
|
||||||
|
# Use LLM extraction with a specific question
|
||||||
|
crwl https://www.example.com/products -q "Extract all product prices"
|
||||||
|
```
|
||||||
|
|
||||||
## ✨ Features
|
## ✨ Features
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
@@ -96,6 +124,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
|
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
|
||||||
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
|
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
|
||||||
|
- 👤 **Browser Profiler**: Create and manage persistent profiles with saved authentication states, cookies, and settings.
|
||||||
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
|
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
|
||||||
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
|
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
|
||||||
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
|
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
|
||||||
@@ -124,10 +153,11 @@ if __name__ == "__main__":
|
|||||||
<details>
|
<details>
|
||||||
<summary>🚀 <strong>Deployment</strong></summary>
|
<summary>🚀 <strong>Deployment</strong></summary>
|
||||||
|
|
||||||
- 🐳 **Dockerized Setup**: Optimized Docker image with API server for easy deployment.
|
- 🐳 **Dockerized Setup**: Optimized Docker image with FastAPI server for easy deployment.
|
||||||
|
- 🔑 **Secure Authentication**: Built-in JWT token authentication for API security.
|
||||||
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
|
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
|
||||||
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
|
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
|
||||||
- ⚙️ **DigitalOcean Deployment**: Ready-to-deploy configurations for DigitalOcean and similar platforms.
|
- ☁️ **Cloud Deployment**: Ready-to-deploy configurations for major cloud platforms.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -148,7 +178,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
✨ Play around with this [](https://colab.research.google.com/drive/1SgRPrByQLzjRfwoRNq1wSGE9nYY_EE8C?usp=sharing)
|
✨ Play around with this [](https://colab.research.google.com/drive/1SgRPrByQLzjRfwoRNq1wSGE9nYY_EE8C?usp=sharing)
|
||||||
|
|
||||||
✨ Visit our [Documentation Website](https://crawl4ai.com/mkdocs/)
|
✨ Visit our [Documentation Website](https://docs.crawl4ai.com/)
|
||||||
|
|
||||||
## Installation 🛠️
|
## Installation 🛠️
|
||||||
|
|
||||||
@@ -264,7 +294,7 @@ task_id = response.json()["task_id"]
|
|||||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||||
```
|
```
|
||||||
|
|
||||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://crawl4ai.com/mkdocs/basic/docker-deployment/).
|
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -302,9 +332,8 @@ async def main():
|
|||||||
url="https://docs.micronaut.io/4.7.6/guide/",
|
url="https://docs.micronaut.io/4.7.6/guide/",
|
||||||
config=run_config
|
config=run_config
|
||||||
)
|
)
|
||||||
print(len(result.markdown))
|
print(len(result.markdown.raw_markdown))
|
||||||
print(len(result.fit_markdown))
|
print(len(result.markdown.fit_markdown))
|
||||||
print(len(result.markdown_v2.fit_markdown))
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
@@ -391,7 +420,7 @@ if __name__ == "__main__":
|
|||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
import asyncio
|
import asyncio
|
||||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LlmConfig
|
||||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
@@ -407,7 +436,7 @@ async def main():
|
|||||||
extraction_strategy=LLMExtractionStrategy(
|
extraction_strategy=LLMExtractionStrategy(
|
||||||
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
|
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
|
||||||
# provider="ollama/qwen2", api_token="no-token",
|
# provider="ollama/qwen2", api_token="no-token",
|
||||||
provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'),
|
llmConfig = LlmConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||||
schema=OpenAIModelFee.schema(),
|
schema=OpenAIModelFee.schema(),
|
||||||
extraction_type="schema",
|
extraction_type="schema",
|
||||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||||
@@ -431,7 +460,7 @@ if __name__ == "__main__":
|
|||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>🤖 <strong>Using You own Browswer with Custome User Profile</strong></summary>
|
<summary>🤖 <strong>Using You own Browser with Custom User Profile</strong></summary>
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os, sys
|
import os, sys
|
||||||
@@ -469,24 +498,80 @@ async def test_news_crawl():
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
|
||||||
## ✨ Recent Updates
|
## ✨ Recent Updates
|
||||||
|
|
||||||
- 🔒 **Enhanced SSL & Security**: New SSL certificate handling with custom paths and validation options for secure crawling
|
### Version 0.5.0 Major Release Highlights
|
||||||
- 🔍 **Smart Content Filtering**: Advanced filtering system with regex support and efficient chunking strategies
|
|
||||||
- 📦 **Improved JSON Extraction**: Support for complex JSONPath, JSON-CSS, and Microdata extraction
|
|
||||||
- 🏗️ **New Field Types**: Added `computed`, `conditional`, `aggregate`, and `template` field types
|
|
||||||
- ⚡ **Performance Boost**: Optimized caching, parallel processing, and memory management
|
|
||||||
- 🐛 **Better Error Handling**: Enhanced debugging capabilities with detailed error tracking
|
|
||||||
- 🔐 **Security Features**: Improved input validation and safe expression evaluation
|
|
||||||
|
|
||||||
Read the full details of this release in our [0.4.24 Release Notes](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
- **🚀 Deep Crawling System**: Explore websites beyond initial URLs with three strategies:
|
||||||
|
- **BFS Strategy**: Breadth-first search explores websites level by level
|
||||||
|
- **DFS Strategy**: Depth-first search explores each branch deeply before backtracking
|
||||||
|
- **BestFirst Strategy**: Uses scoring functions to prioritize which URLs to crawl next
|
||||||
|
- **Page Limiting**: Control the maximum number of pages to crawl with `max_pages` parameter
|
||||||
|
- **Score Thresholds**: Filter URLs based on relevance scores
|
||||||
|
- **⚡ Memory-Adaptive Dispatcher**: Dynamically adjusts concurrency based on system memory with built-in rate limiting
|
||||||
|
- **🔄 Multiple Crawling Strategies**:
|
||||||
|
- **AsyncPlaywrightCrawlerStrategy**: Browser-based crawling with JavaScript support (Default)
|
||||||
|
- **AsyncHTTPCrawlerStrategy**: Fast, lightweight HTTP-only crawler for simple tasks
|
||||||
|
- **🐳 Docker Deployment**: Easy deployment with FastAPI server and streaming/non-streaming endpoints
|
||||||
|
- **💻 Command-Line Interface**: New `crwl` CLI provides convenient terminal access to all features with intuitive commands and configuration options
|
||||||
|
- **👤 Browser Profiler**: Create and manage persistent browser profiles to save authentication states, cookies, and settings for seamless crawling of protected content
|
||||||
|
- **🧠 Crawl4AI Coding Assistant**: AI-powered coding assistant to answer your question for Crawl4ai, and generate proper code for crawling.
|
||||||
|
- **🏎️ LXML Scraping Mode**: Fast HTML parsing using the `lxml` library for improved performance
|
||||||
|
- **🌐 Proxy Rotation**: Built-in support for proxy switching with `RoundRobinProxyStrategy`
|
||||||
|
- **🤖 LLM Content Filter**: Intelligent markdown generation using LLMs
|
||||||
|
- **📄 PDF Processing**: Extract text, images, and metadata from PDF files
|
||||||
|
- **🔗 URL Redirection Tracking**: Automatically follow and record HTTP redirects
|
||||||
|
- **🤖 LLM Schema Generation**: Easily create extraction schemas with LLM assistance
|
||||||
|
- **🔍 robots.txt Compliance**: Respect website crawling rules
|
||||||
|
|
||||||
|
Read the full details in our [0.5.0 Release Notes](https://docs.crawl4ai.com/blog/releases/0.5.0.html) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||||
|
|
||||||
|
## Version Numbering in Crawl4AI
|
||||||
|
|
||||||
|
Crawl4AI follows standard Python version numbering conventions (PEP 440) to help users understand the stability and features of each release.
|
||||||
|
|
||||||
|
### Version Numbers Explained
|
||||||
|
|
||||||
|
Our version numbers follow this pattern: `MAJOR.MINOR.PATCH` (e.g., 0.4.3)
|
||||||
|
|
||||||
|
#### Pre-release Versions
|
||||||
|
We use different suffixes to indicate development stages:
|
||||||
|
|
||||||
|
- `dev` (0.4.3dev1): Development versions, unstable
|
||||||
|
- `a` (0.4.3a1): Alpha releases, experimental features
|
||||||
|
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
|
||||||
|
- `rc` (0.4.3rc1): Release candidates, potential final version
|
||||||
|
|
||||||
|
#### Installation
|
||||||
|
- Regular installation (stable version):
|
||||||
|
```bash
|
||||||
|
pip install -U crawl4ai
|
||||||
|
```
|
||||||
|
|
||||||
|
- Install pre-release versions:
|
||||||
|
```bash
|
||||||
|
pip install crawl4ai --pre
|
||||||
|
```
|
||||||
|
|
||||||
|
- Install specific version:
|
||||||
|
```bash
|
||||||
|
pip install crawl4ai==0.4.3b1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Why Pre-releases?
|
||||||
|
We use pre-releases to:
|
||||||
|
- Test new features in real-world scenarios
|
||||||
|
- Gather feedback before final releases
|
||||||
|
- Ensure stability for production users
|
||||||
|
- Allow early adopters to try new features
|
||||||
|
|
||||||
|
For production environments, we recommend using the stable version. For testing new features, you can opt-in to pre-releases using the `--pre` flag.
|
||||||
|
|
||||||
## 📖 Documentation & Roadmap
|
## 📖 Documentation & Roadmap
|
||||||
|
|
||||||
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
|
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
|
||||||
|
|
||||||
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://crawl4ai.com/mkdocs/).
|
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://docs.crawl4ai.com/).
|
||||||
|
|
||||||
To check our development plans and upcoming features, visit our [Roadmap](https://github.com/unclecode/crawl4ai/blob/main/ROADMAP.md).
|
To check our development plans and upcoming features, visit our [Roadmap](https://github.com/unclecode/crawl4ai/blob/main/ROADMAP.md).
|
||||||
|
|
||||||
@@ -510,11 +595,85 @@ To check our development plans and upcoming features, visit our [Roadmap](https:
|
|||||||
|
|
||||||
## 🤝 Contributing
|
## 🤝 Contributing
|
||||||
|
|
||||||
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTING.md) for more information.
|
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTORS.md) for more information.
|
||||||
|
|
||||||
## 📄 License
|
I'll help modify the license section with badges. For the halftone effect, here's a version with it:
|
||||||
|
|
||||||
Crawl4AI is released under the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE).
|
Here's the updated license section:
|
||||||
|
|
||||||
|
## 📄 License & Attribution
|
||||||
|
|
||||||
|
This project is licensed under the Apache License 2.0 with a required attribution clause. See the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) file for details.
|
||||||
|
|
||||||
|
### Attribution Requirements
|
||||||
|
When using Crawl4AI, you must include one of the following attribution methods:
|
||||||
|
|
||||||
|
#### 1. Badge Attribution (Recommended)
|
||||||
|
Add one of these badges to your README, documentation, or website:
|
||||||
|
|
||||||
|
| Theme | Badge |
|
||||||
|
|-------|-------|
|
||||||
|
| **Disco Theme (Animated)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||||
|
| **Night Theme (Dark with Neon)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||||
|
| **Dark Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||||
|
| **Light Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||||
|
|
||||||
|
|
||||||
|
HTML code for adding the badges:
|
||||||
|
```html
|
||||||
|
<!-- Disco Theme (Animated) -->
|
||||||
|
<a href="https://github.com/unclecode/crawl4ai">
|
||||||
|
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<!-- Night Theme (Dark with Neon) -->
|
||||||
|
<a href="https://github.com/unclecode/crawl4ai">
|
||||||
|
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<!-- Dark Theme (Classic) -->
|
||||||
|
<a href="https://github.com/unclecode/crawl4ai">
|
||||||
|
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<!-- Light Theme (Classic) -->
|
||||||
|
<a href="https://github.com/unclecode/crawl4ai">
|
||||||
|
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<!-- Simple Shield Badge -->
|
||||||
|
<a href="https://github.com/unclecode/crawl4ai">
|
||||||
|
<img src="https://img.shields.io/badge/Powered%20by-Crawl4AI-blue?style=flat-square" alt="Powered by Crawl4AI"/>
|
||||||
|
</a>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 2. Text Attribution
|
||||||
|
Add this line to your documentation:
|
||||||
|
```
|
||||||
|
This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📚 Citation
|
||||||
|
|
||||||
|
If you use Crawl4AI in your research or project, please cite:
|
||||||
|
|
||||||
|
```bibtex
|
||||||
|
@software{crawl4ai2024,
|
||||||
|
author = {UncleCode},
|
||||||
|
title = {Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper},
|
||||||
|
year = {2024},
|
||||||
|
publisher = {GitHub},
|
||||||
|
journal = {GitHub Repository},
|
||||||
|
howpublished = {\url{https://github.com/unclecode/crawl4ai}},
|
||||||
|
commit = {Please use the commit hash you're working with}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Text citation format:
|
||||||
|
```
|
||||||
|
UncleCode. (2024). Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper [Computer software].
|
||||||
|
GitHub. https://github.com/unclecode/crawl4ai
|
||||||
|
```
|
||||||
|
|
||||||
## 📧 Contact
|
## 📧 Contact
|
||||||
|
|
||||||
|
|||||||
24
cliff.toml
Normal file
24
cliff.toml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
[changelog]
|
||||||
|
# Template format
|
||||||
|
header = """
|
||||||
|
# Changelog\n
|
||||||
|
All notable changes to this project will be documented in this file.\n
|
||||||
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Organize commits by type
|
||||||
|
[git]
|
||||||
|
conventional_commits = true
|
||||||
|
filter_unconventional = true
|
||||||
|
commit_parsers = [
|
||||||
|
{ message = "^feat", group = "Added"},
|
||||||
|
{ message = "^fix", group = "Fixed"},
|
||||||
|
{ message = "^doc", group = "Documentation"},
|
||||||
|
{ message = "^perf", group = "Performance"},
|
||||||
|
{ message = "^refactor", group = "Changed"},
|
||||||
|
{ message = "^style", group = "Changed"},
|
||||||
|
{ message = "^test", group = "Testing"},
|
||||||
|
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||||
|
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||||
|
]
|
||||||
@@ -1,46 +1,147 @@
|
|||||||
# __init__.py
|
# __init__.py
|
||||||
|
import warnings
|
||||||
|
|
||||||
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
||||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig
|
||||||
from .extraction_strategy import ExtractionStrategy, LLMExtractionStrategy, CosineStrategy, JsonCssExtractionStrategy
|
from .content_scraping_strategy import (
|
||||||
|
ContentScrapingStrategy,
|
||||||
|
WebScrapingStrategy,
|
||||||
|
LXMLWebScrapingStrategy,
|
||||||
|
)
|
||||||
|
from .async_logger import (
|
||||||
|
AsyncLoggerBase,
|
||||||
|
AsyncLogger,
|
||||||
|
)
|
||||||
|
from .proxy_strategy import (
|
||||||
|
ProxyRotationStrategy,
|
||||||
|
RoundRobinProxyStrategy,
|
||||||
|
)
|
||||||
|
from .extraction_strategy import (
|
||||||
|
ExtractionStrategy,
|
||||||
|
LLMExtractionStrategy,
|
||||||
|
CosineStrategy,
|
||||||
|
JsonCssExtractionStrategy,
|
||||||
|
JsonXPathExtractionStrategy,
|
||||||
|
)
|
||||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||||
from .content_filter_strategy import PruningContentFilter, BM25ContentFilter
|
from .content_filter_strategy import (
|
||||||
from .models import CrawlResult
|
PruningContentFilter,
|
||||||
from .__version__ import __version__
|
BM25ContentFilter,
|
||||||
|
LLMContentFilter,
|
||||||
|
RelevantContentFilter,
|
||||||
|
)
|
||||||
|
from .models import CrawlResult, MarkdownGenerationResult
|
||||||
|
from .async_dispatcher import (
|
||||||
|
MemoryAdaptiveDispatcher,
|
||||||
|
SemaphoreDispatcher,
|
||||||
|
RateLimiter,
|
||||||
|
CrawlerMonitor,
|
||||||
|
DisplayMode,
|
||||||
|
BaseDispatcher,
|
||||||
|
)
|
||||||
|
from .docker_client import Crawl4aiDockerClient
|
||||||
|
from .hub import CrawlerHub
|
||||||
|
from .browser_profiler import BrowserProfiler
|
||||||
|
from .deep_crawling import (
|
||||||
|
DeepCrawlStrategy,
|
||||||
|
BFSDeepCrawlStrategy,
|
||||||
|
FilterChain,
|
||||||
|
ContentTypeFilter,
|
||||||
|
DomainFilter,
|
||||||
|
URLFilter,
|
||||||
|
FilterStats,
|
||||||
|
SEOFilter,
|
||||||
|
KeywordRelevanceScorer,
|
||||||
|
URLScorer,
|
||||||
|
CompositeScorer,
|
||||||
|
DomainAuthorityScorer,
|
||||||
|
FreshnessScorer,
|
||||||
|
PathDepthScorer,
|
||||||
|
BestFirstCrawlingStrategy,
|
||||||
|
DFSDeepCrawlStrategy,
|
||||||
|
DeepCrawlDecorator,
|
||||||
|
)
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
"AsyncLoggerBase",
|
||||||
|
"AsyncLogger",
|
||||||
"AsyncWebCrawler",
|
"AsyncWebCrawler",
|
||||||
|
"BrowserProfiler",
|
||||||
|
"DeepCrawlStrategy",
|
||||||
|
"BFSDeepCrawlStrategy",
|
||||||
|
"BestFirstCrawlingStrategy",
|
||||||
|
"DFSDeepCrawlStrategy",
|
||||||
|
"FilterChain",
|
||||||
|
"ContentTypeFilter",
|
||||||
|
"DomainFilter",
|
||||||
|
"FilterStats",
|
||||||
|
"URLFilter",
|
||||||
|
"SEOFilter",
|
||||||
|
"KeywordRelevanceScorer",
|
||||||
|
"URLScorer",
|
||||||
|
"CompositeScorer",
|
||||||
|
"DomainAuthorityScorer",
|
||||||
|
"FreshnessScorer",
|
||||||
|
"PathDepthScorer",
|
||||||
|
"DeepCrawlDecorator",
|
||||||
"CrawlResult",
|
"CrawlResult",
|
||||||
|
"CrawlerHub",
|
||||||
"CacheMode",
|
"CacheMode",
|
||||||
'BrowserConfig',
|
"ContentScrapingStrategy",
|
||||||
'CrawlerRunConfig',
|
"WebScrapingStrategy",
|
||||||
'ExtractionStrategy',
|
"LXMLWebScrapingStrategy",
|
||||||
'LLMExtractionStrategy',
|
"BrowserConfig",
|
||||||
'CosineStrategy',
|
"CrawlerRunConfig",
|
||||||
'JsonCssExtractionStrategy',
|
"HTTPCrawlerConfig",
|
||||||
'ChunkingStrategy',
|
"ExtractionStrategy",
|
||||||
'RegexChunking',
|
"LLMExtractionStrategy",
|
||||||
'DefaultMarkdownGenerator',
|
"CosineStrategy",
|
||||||
'PruningContentFilter',
|
"JsonCssExtractionStrategy",
|
||||||
'BM25ContentFilter',
|
"JsonXPathExtractionStrategy",
|
||||||
|
"ChunkingStrategy",
|
||||||
|
"RegexChunking",
|
||||||
|
"DefaultMarkdownGenerator",
|
||||||
|
"RelevantContentFilter",
|
||||||
|
"PruningContentFilter",
|
||||||
|
"BM25ContentFilter",
|
||||||
|
"LLMContentFilter",
|
||||||
|
"BaseDispatcher",
|
||||||
|
"MemoryAdaptiveDispatcher",
|
||||||
|
"SemaphoreDispatcher",
|
||||||
|
"RateLimiter",
|
||||||
|
"CrawlerMonitor",
|
||||||
|
"DisplayMode",
|
||||||
|
"MarkdownGenerationResult",
|
||||||
|
"Crawl4aiDockerClient",
|
||||||
|
"ProxyRotationStrategy",
|
||||||
|
"RoundRobinProxyStrategy",
|
||||||
]
|
]
|
||||||
|
|
||||||
def is_sync_version_installed():
|
|
||||||
try:
|
|
||||||
import selenium
|
|
||||||
return True
|
|
||||||
except ImportError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if is_sync_version_installed():
|
# def is_sync_version_installed():
|
||||||
try:
|
# try:
|
||||||
from .web_crawler import WebCrawler
|
# import selenium # noqa
|
||||||
__all__.append("WebCrawler")
|
|
||||||
except ImportError:
|
# return True
|
||||||
import warnings
|
# except ImportError:
|
||||||
print("Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies.")
|
# return False
|
||||||
else:
|
|
||||||
WebCrawler = None
|
|
||||||
# import warnings
|
# if is_sync_version_installed():
|
||||||
# print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
# try:
|
||||||
|
# from .web_crawler import WebCrawler
|
||||||
|
|
||||||
|
# __all__.append("WebCrawler")
|
||||||
|
# except ImportError:
|
||||||
|
# print(
|
||||||
|
# "Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
|
||||||
|
# )
|
||||||
|
# else:
|
||||||
|
# WebCrawler = None
|
||||||
|
# # import warnings
|
||||||
|
# # print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||||
|
|
||||||
|
# Disable all Pydantic warnings
|
||||||
|
warnings.filterwarnings("ignore", module="pydantic")
|
||||||
|
# pydantic_warnings.filter_warnings()
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
# crawl4ai/_version.py
|
# crawl4ai/_version.py
|
||||||
__version__ = "0.4.245"
|
__version__ = "0.5.0"
|
||||||
|
|||||||
@@ -1,17 +1,154 @@
|
|||||||
|
import os
|
||||||
from .config import (
|
from .config import (
|
||||||
|
DEFAULT_PROVIDER,
|
||||||
MIN_WORD_THRESHOLD,
|
MIN_WORD_THRESHOLD,
|
||||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||||
|
PROVIDER_MODELS,
|
||||||
SCREENSHOT_HEIGHT_TRESHOLD,
|
SCREENSHOT_HEIGHT_TRESHOLD,
|
||||||
PAGE_TIMEOUT,
|
PAGE_TIMEOUT,
|
||||||
IMAGE_SCORE_THRESHOLD,
|
IMAGE_SCORE_THRESHOLD,
|
||||||
SOCIAL_MEDIA_DOMAINS,
|
SOCIAL_MEDIA_DOMAINS,
|
||||||
|
|
||||||
)
|
)
|
||||||
from .user_agent_generator import UserAgentGenerator
|
|
||||||
|
from .user_agent_generator import UAGen, ValidUAGenerator # , OnlineUAGenerator
|
||||||
from .extraction_strategy import ExtractionStrategy
|
from .extraction_strategy import ExtractionStrategy
|
||||||
from .chunking_strategy import ChunkingStrategy
|
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||||
from .markdown_generation_strategy import MarkdownGenerationStrategy
|
from .markdown_generation_strategy import MarkdownGenerationStrategy
|
||||||
|
from .content_scraping_strategy import ContentScrapingStrategy, WebScrapingStrategy
|
||||||
|
from .deep_crawling import DeepCrawlStrategy
|
||||||
from typing import Union, List
|
from typing import Union, List
|
||||||
|
from .cache_context import CacheMode
|
||||||
|
from .proxy_strategy import ProxyRotationStrategy
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
def to_serializable_dict(obj: Any, ignore_default_value : bool = False) -> Dict:
|
||||||
|
"""
|
||||||
|
Recursively convert an object to a serializable dictionary using {type, params} structure
|
||||||
|
for complex objects.
|
||||||
|
"""
|
||||||
|
if obj is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Handle basic types
|
||||||
|
if isinstance(obj, (str, int, float, bool)):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
# Handle Enum
|
||||||
|
if isinstance(obj, Enum):
|
||||||
|
return {"type": obj.__class__.__name__, "params": obj.value}
|
||||||
|
|
||||||
|
# Handle datetime objects
|
||||||
|
if hasattr(obj, "isoformat"):
|
||||||
|
return obj.isoformat()
|
||||||
|
|
||||||
|
# Handle lists, tuples, and sets, and basically any iterable
|
||||||
|
if isinstance(obj, (list, tuple, set)) or hasattr(obj, '__iter__') and not isinstance(obj, dict):
|
||||||
|
return [to_serializable_dict(item) for item in obj]
|
||||||
|
|
||||||
|
# Handle frozensets, which are not iterable
|
||||||
|
if isinstance(obj, frozenset):
|
||||||
|
return [to_serializable_dict(item) for item in list(obj)]
|
||||||
|
|
||||||
|
# Handle dictionaries - preserve them as-is
|
||||||
|
if isinstance(obj, dict):
|
||||||
|
return {
|
||||||
|
"type": "dict", # Mark as plain dictionary
|
||||||
|
"value": {str(k): to_serializable_dict(v) for k, v in obj.items()},
|
||||||
|
}
|
||||||
|
|
||||||
|
_type = obj.__class__.__name__
|
||||||
|
|
||||||
|
# Handle class instances
|
||||||
|
if hasattr(obj, "__class__"):
|
||||||
|
# Get constructor signature
|
||||||
|
sig = inspect.signature(obj.__class__.__init__)
|
||||||
|
params = sig.parameters
|
||||||
|
|
||||||
|
# Get current values
|
||||||
|
current_values = {}
|
||||||
|
for name, param in params.items():
|
||||||
|
if name == "self":
|
||||||
|
continue
|
||||||
|
|
||||||
|
value = getattr(obj, name, param.default)
|
||||||
|
|
||||||
|
# Only include if different from default, considering empty values
|
||||||
|
if not (is_empty_value(value) and is_empty_value(param.default)):
|
||||||
|
if value != param.default and not ignore_default_value:
|
||||||
|
current_values[name] = to_serializable_dict(value)
|
||||||
|
|
||||||
|
if hasattr(obj, '__slots__'):
|
||||||
|
for slot in obj.__slots__:
|
||||||
|
if slot.startswith('_'): # Handle private slots
|
||||||
|
attr_name = slot[1:] # Remove leading '_'
|
||||||
|
value = getattr(obj, slot, None)
|
||||||
|
if value is not None:
|
||||||
|
current_values[attr_name] = to_serializable_dict(value)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": obj.__class__.__name__,
|
||||||
|
"params": current_values
|
||||||
|
}
|
||||||
|
|
||||||
|
return str(obj)
|
||||||
|
|
||||||
|
|
||||||
|
def from_serializable_dict(data: Any) -> Any:
|
||||||
|
"""
|
||||||
|
Recursively convert a serializable dictionary back to an object instance.
|
||||||
|
"""
|
||||||
|
if data is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Handle basic types
|
||||||
|
if isinstance(data, (str, int, float, bool)):
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Handle typed data
|
||||||
|
if isinstance(data, dict) and "type" in data:
|
||||||
|
# Handle plain dictionaries
|
||||||
|
if data["type"] == "dict":
|
||||||
|
return {k: from_serializable_dict(v) for k, v in data["value"].items()}
|
||||||
|
|
||||||
|
# Import from crawl4ai for class instances
|
||||||
|
import crawl4ai
|
||||||
|
|
||||||
|
cls = getattr(crawl4ai, data["type"])
|
||||||
|
|
||||||
|
# Handle Enum
|
||||||
|
if issubclass(cls, Enum):
|
||||||
|
return cls(data["params"])
|
||||||
|
|
||||||
|
# Handle class instances
|
||||||
|
constructor_args = {
|
||||||
|
k: from_serializable_dict(v) for k, v in data["params"].items()
|
||||||
|
}
|
||||||
|
return cls(**constructor_args)
|
||||||
|
|
||||||
|
# Handle lists
|
||||||
|
if isinstance(data, list):
|
||||||
|
return [from_serializable_dict(item) for item in data]
|
||||||
|
|
||||||
|
# Handle raw dictionaries (legacy support)
|
||||||
|
if isinstance(data, dict):
|
||||||
|
return {k: from_serializable_dict(v) for k, v in data.items()}
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def is_empty_value(value: Any) -> bool:
|
||||||
|
"""Check if a value is effectively empty/null."""
|
||||||
|
if value is None:
|
||||||
|
return True
|
||||||
|
if isinstance(value, (list, tuple, set, dict, str)) and len(value) == 0:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class BrowserConfig:
|
class BrowserConfig:
|
||||||
@@ -29,6 +166,7 @@ class BrowserConfig:
|
|||||||
Default: True.
|
Default: True.
|
||||||
use_managed_browser (bool): Launch the browser using a managed approach (e.g., via CDP), allowing
|
use_managed_browser (bool): Launch the browser using a managed approach (e.g., via CDP), allowing
|
||||||
advanced manipulation. Default: False.
|
advanced manipulation. Default: False.
|
||||||
|
cdp_url (str): URL for the Chrome DevTools Protocol (CDP) endpoint. Default: "ws://localhost:9222/devtools/browser/".
|
||||||
debugging_port (int): Port for the browser debugging protocol. Default: 9222.
|
debugging_port (int): Port for the browser debugging protocol. Default: 9222.
|
||||||
use_persistent_context (bool): Use a persistent browser context (like a persistent profile).
|
use_persistent_context (bool): Use a persistent browser context (like a persistent profile).
|
||||||
Automatically sets use_managed_browser=True. Default: False.
|
Automatically sets use_managed_browser=True. Default: False.
|
||||||
@@ -38,12 +176,14 @@ class BrowserConfig:
|
|||||||
is "chromium". Default: "chromium".
|
is "chromium". Default: "chromium".
|
||||||
channel (str): The channel to launch (e.g., "chromium", "chrome", "msedge"). Only applies if browser_type
|
channel (str): The channel to launch (e.g., "chromium", "chrome", "msedge"). Only applies if browser_type
|
||||||
is "chromium". Default: "chromium".
|
is "chromium". Default: "chromium".
|
||||||
proxy (str or None): Proxy server URL (e.g., "http://username:password@proxy:port"). If None, no proxy is used.
|
proxy (Optional[str]): Proxy server URL (e.g., "http://username:password@proxy:port"). If None, no proxy is used.
|
||||||
Default: None.
|
Default: None.
|
||||||
proxy_config (dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
proxy_config (dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||||
If None, no additional proxy config. Default: None.
|
If None, no additional proxy config. Default: None.
|
||||||
viewport_width (int): Default viewport width for pages. Default: 1080.
|
viewport_width (int): Default viewport width for pages. Default: 1080.
|
||||||
viewport_height (int): Default viewport height for pages. Default: 600.
|
viewport_height (int): Default viewport height for pages. Default: 600.
|
||||||
|
viewport (dict): Default viewport dimensions for pages. If set, overrides viewport_width and viewport_height.
|
||||||
|
Default: None.
|
||||||
verbose (bool): Enable verbose logging.
|
verbose (bool): Enable verbose logging.
|
||||||
Default: True.
|
Default: True.
|
||||||
accept_downloads (bool): Whether to allow file downloads. If True, requires a downloads_path.
|
accept_downloads (bool): Whether to allow file downloads. If True, requires a downloads_path.
|
||||||
@@ -77,6 +217,7 @@ class BrowserConfig:
|
|||||||
browser_type: str = "chromium",
|
browser_type: str = "chromium",
|
||||||
headless: bool = True,
|
headless: bool = True,
|
||||||
use_managed_browser: bool = False,
|
use_managed_browser: bool = False,
|
||||||
|
cdp_url: str = None,
|
||||||
use_persistent_context: bool = False,
|
use_persistent_context: bool = False,
|
||||||
user_data_dir: str = None,
|
user_data_dir: str = None,
|
||||||
chrome_channel: str = "chromium",
|
chrome_channel: str = "chromium",
|
||||||
@@ -85,9 +226,10 @@ class BrowserConfig:
|
|||||||
proxy_config: dict = None,
|
proxy_config: dict = None,
|
||||||
viewport_width: int = 1080,
|
viewport_width: int = 1080,
|
||||||
viewport_height: int = 600,
|
viewport_height: int = 600,
|
||||||
|
viewport: dict = None,
|
||||||
accept_downloads: bool = False,
|
accept_downloads: bool = False,
|
||||||
downloads_path: str = None,
|
downloads_path: str = None,
|
||||||
storage_state=None,
|
storage_state: Union[str, dict, None] = None,
|
||||||
ignore_https_errors: bool = True,
|
ignore_https_errors: bool = True,
|
||||||
java_script_enabled: bool = True,
|
java_script_enabled: bool = True,
|
||||||
sleep_on_close: bool = False,
|
sleep_on_close: bool = False,
|
||||||
@@ -95,27 +237,38 @@ class BrowserConfig:
|
|||||||
cookies: list = None,
|
cookies: list = None,
|
||||||
headers: dict = None,
|
headers: dict = None,
|
||||||
user_agent: str = (
|
user_agent: str = (
|
||||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) AppleWebKit/537.36 "
|
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) AppleWebKit/537.36 "
|
||||||
"(KHTML, like Gecko) Chrome/116.0.5845.187 Safari/604.1 Edg/117.0.2045.47"
|
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
|
||||||
|
# "(KHTML, like Gecko) Chrome/116.0.5845.187 Safari/604.1 Edg/117.0.2045.47"
|
||||||
|
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36"
|
||||||
),
|
),
|
||||||
user_agent_mode: str = None,
|
user_agent_mode: str = "",
|
||||||
user_agent_generator_config: dict = None,
|
user_agent_generator_config: dict = {},
|
||||||
text_mode: bool = False,
|
text_mode: bool = False,
|
||||||
light_mode: bool = False,
|
light_mode: bool = False,
|
||||||
extra_args: list = None,
|
extra_args: list = None,
|
||||||
debugging_port : int = 9222,
|
debugging_port: int = 9222,
|
||||||
|
host: str = "localhost",
|
||||||
):
|
):
|
||||||
self.browser_type = browser_type
|
self.browser_type = browser_type
|
||||||
self.headless = headless
|
self.headless = headless
|
||||||
self.use_managed_browser = use_managed_browser
|
self.use_managed_browser = use_managed_browser
|
||||||
|
self.cdp_url = cdp_url
|
||||||
self.use_persistent_context = use_persistent_context
|
self.use_persistent_context = use_persistent_context
|
||||||
self.user_data_dir = user_data_dir
|
self.user_data_dir = user_data_dir
|
||||||
self.chrome_channel = chrome_channel or self.browser_type or "chromium"
|
self.chrome_channel = chrome_channel or self.browser_type or "chromium"
|
||||||
self.channel = channel or self.browser_type or "chromium"
|
self.channel = channel or self.browser_type or "chromium"
|
||||||
|
if self.browser_type in ["firefox", "webkit"]:
|
||||||
|
self.channel = ""
|
||||||
|
self.chrome_channel = ""
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
self.proxy_config = proxy_config
|
self.proxy_config = proxy_config
|
||||||
self.viewport_width = viewport_width
|
self.viewport_width = viewport_width
|
||||||
self.viewport_height = viewport_height
|
self.viewport_height = viewport_height
|
||||||
|
self.viewport = viewport
|
||||||
|
if self.viewport is not None:
|
||||||
|
self.viewport_width = self.viewport.get("width", 1080)
|
||||||
|
self.viewport_height = self.viewport.get("height", 600)
|
||||||
self.accept_downloads = accept_downloads
|
self.accept_downloads = accept_downloads
|
||||||
self.downloads_path = downloads_path
|
self.downloads_path = downloads_path
|
||||||
self.storage_state = storage_state
|
self.storage_state = storage_state
|
||||||
@@ -133,17 +286,15 @@ class BrowserConfig:
|
|||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.debugging_port = debugging_port
|
self.debugging_port = debugging_port
|
||||||
|
|
||||||
user_agenr_generator = UserAgentGenerator()
|
fa_user_agenr_generator = ValidUAGenerator()
|
||||||
if self.user_agent_mode != "random" and self.user_agent_generator_config:
|
if self.user_agent_mode == "random":
|
||||||
self.user_agent = user_agenr_generator.generate(
|
self.user_agent = fa_user_agenr_generator.generate(
|
||||||
**(self.user_agent_generator_config or {})
|
**(self.user_agent_generator_config or {})
|
||||||
)
|
)
|
||||||
elif self.user_agent_mode == "random":
|
|
||||||
self.user_agent = user_agenr_generator.generate()
|
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.browser_hint = user_agenr_generator.generate_client_hints(self.user_agent)
|
self.browser_hint = UAGen.generate_client_hints(self.user_agent)
|
||||||
self.headers.setdefault("sec-ch-ua", self.browser_hint)
|
self.headers.setdefault("sec-ch-ua", self.browser_hint)
|
||||||
|
|
||||||
# If persistent context is requested, ensure managed browser is enabled
|
# If persistent context is requested, ensure managed browser is enabled
|
||||||
@@ -156,6 +307,7 @@ class BrowserConfig:
|
|||||||
browser_type=kwargs.get("browser_type", "chromium"),
|
browser_type=kwargs.get("browser_type", "chromium"),
|
||||||
headless=kwargs.get("headless", True),
|
headless=kwargs.get("headless", True),
|
||||||
use_managed_browser=kwargs.get("use_managed_browser", False),
|
use_managed_browser=kwargs.get("use_managed_browser", False),
|
||||||
|
cdp_url=kwargs.get("cdp_url"),
|
||||||
use_persistent_context=kwargs.get("use_persistent_context", False),
|
use_persistent_context=kwargs.get("use_persistent_context", False),
|
||||||
user_data_dir=kwargs.get("user_data_dir"),
|
user_data_dir=kwargs.get("user_data_dir"),
|
||||||
chrome_channel=kwargs.get("chrome_channel", "chromium"),
|
chrome_channel=kwargs.get("chrome_channel", "chromium"),
|
||||||
@@ -183,8 +335,143 @@ class BrowserConfig:
|
|||||||
extra_args=kwargs.get("extra_args", []),
|
extra_args=kwargs.get("extra_args", []),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
"browser_type": self.browser_type,
|
||||||
|
"headless": self.headless,
|
||||||
|
"use_managed_browser": self.use_managed_browser,
|
||||||
|
"cdp_url": self.cdp_url,
|
||||||
|
"use_persistent_context": self.use_persistent_context,
|
||||||
|
"user_data_dir": self.user_data_dir,
|
||||||
|
"chrome_channel": self.chrome_channel,
|
||||||
|
"channel": self.channel,
|
||||||
|
"proxy": self.proxy,
|
||||||
|
"proxy_config": self.proxy_config,
|
||||||
|
"viewport_width": self.viewport_width,
|
||||||
|
"viewport_height": self.viewport_height,
|
||||||
|
"accept_downloads": self.accept_downloads,
|
||||||
|
"downloads_path": self.downloads_path,
|
||||||
|
"storage_state": self.storage_state,
|
||||||
|
"ignore_https_errors": self.ignore_https_errors,
|
||||||
|
"java_script_enabled": self.java_script_enabled,
|
||||||
|
"cookies": self.cookies,
|
||||||
|
"headers": self.headers,
|
||||||
|
"user_agent": self.user_agent,
|
||||||
|
"user_agent_mode": self.user_agent_mode,
|
||||||
|
"user_agent_generator_config": self.user_agent_generator_config,
|
||||||
|
"text_mode": self.text_mode,
|
||||||
|
"light_mode": self.light_mode,
|
||||||
|
"extra_args": self.extra_args,
|
||||||
|
"sleep_on_close": self.sleep_on_close,
|
||||||
|
"verbose": self.verbose,
|
||||||
|
"debugging_port": self.debugging_port,
|
||||||
|
}
|
||||||
|
|
||||||
|
def clone(self, **kwargs):
|
||||||
|
"""Create a copy of this configuration with updated values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Key-value pairs of configuration options to update
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BrowserConfig: A new instance with the specified updates
|
||||||
|
"""
|
||||||
|
config_dict = self.to_dict()
|
||||||
|
config_dict.update(kwargs)
|
||||||
|
return BrowserConfig.from_kwargs(config_dict)
|
||||||
|
|
||||||
|
# Create a funciton returns dict of the object
|
||||||
|
def dump(self) -> dict:
|
||||||
|
# Serialize the object to a dictionary
|
||||||
|
return to_serializable_dict(self)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(data: dict) -> "BrowserConfig":
|
||||||
|
# Deserialize the object from a dictionary
|
||||||
|
config = from_serializable_dict(data)
|
||||||
|
if isinstance(config, BrowserConfig):
|
||||||
|
return config
|
||||||
|
return BrowserConfig.from_kwargs(config)
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPCrawlerConfig:
|
||||||
|
"""HTTP-specific crawler configuration"""
|
||||||
|
|
||||||
|
method: str = "GET"
|
||||||
|
headers: Optional[Dict[str, str]] = None
|
||||||
|
data: Optional[Dict[str, Any]] = None
|
||||||
|
json: Optional[Dict[str, Any]] = None
|
||||||
|
follow_redirects: bool = True
|
||||||
|
verify_ssl: bool = True
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
method: str = "GET",
|
||||||
|
headers: Optional[Dict[str, str]] = None,
|
||||||
|
data: Optional[Dict[str, Any]] = None,
|
||||||
|
json: Optional[Dict[str, Any]] = None,
|
||||||
|
follow_redirects: bool = True,
|
||||||
|
verify_ssl: bool = True,
|
||||||
|
):
|
||||||
|
self.method = method
|
||||||
|
self.headers = headers
|
||||||
|
self.data = data
|
||||||
|
self.json = json
|
||||||
|
self.follow_redirects = follow_redirects
|
||||||
|
self.verify_ssl = verify_ssl
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_kwargs(kwargs: dict) -> "HTTPCrawlerConfig":
|
||||||
|
return HTTPCrawlerConfig(
|
||||||
|
method=kwargs.get("method", "GET"),
|
||||||
|
headers=kwargs.get("headers"),
|
||||||
|
data=kwargs.get("data"),
|
||||||
|
json=kwargs.get("json"),
|
||||||
|
follow_redirects=kwargs.get("follow_redirects", True),
|
||||||
|
verify_ssl=kwargs.get("verify_ssl", True),
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
"method": self.method,
|
||||||
|
"headers": self.headers,
|
||||||
|
"data": self.data,
|
||||||
|
"json": self.json,
|
||||||
|
"follow_redirects": self.follow_redirects,
|
||||||
|
"verify_ssl": self.verify_ssl,
|
||||||
|
}
|
||||||
|
|
||||||
|
def clone(self, **kwargs):
|
||||||
|
"""Create a copy of this configuration with updated values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Key-value pairs of configuration options to update
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HTTPCrawlerConfig: A new instance with the specified updates
|
||||||
|
"""
|
||||||
|
config_dict = self.to_dict()
|
||||||
|
config_dict.update(kwargs)
|
||||||
|
return HTTPCrawlerConfig.from_kwargs(config_dict)
|
||||||
|
|
||||||
|
def dump(self) -> dict:
|
||||||
|
return to_serializable_dict(self)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(data: dict) -> "HTTPCrawlerConfig":
|
||||||
|
config = from_serializable_dict(data)
|
||||||
|
if isinstance(config, HTTPCrawlerConfig):
|
||||||
|
return config
|
||||||
|
return HTTPCrawlerConfig.from_kwargs(config)
|
||||||
|
|
||||||
|
class CrawlerRunConfig():
|
||||||
|
_UNWANTED_PROPS = {
|
||||||
|
'disable_cache' : 'Instead, use cache_mode=CacheMode.DISABLED',
|
||||||
|
'bypass_cache' : 'Instead, use cache_mode=CacheMode.BYPASS',
|
||||||
|
'no_cache_read' : 'Instead, use cache_mode=CacheMode.WRITE_ONLY',
|
||||||
|
'no_cache_write' : 'Instead, use cache_mode=CacheMode.READ_ONLY',
|
||||||
|
}
|
||||||
|
|
||||||
class CrawlerRunConfig:
|
|
||||||
"""
|
"""
|
||||||
Configuration class for controlling how the crawler runs each crawl operation.
|
Configuration class for controlling how the crawler runs each crawl operation.
|
||||||
This includes parameters for content extraction, page manipulation, waiting conditions,
|
This includes parameters for content extraction, page manipulation, waiting conditions,
|
||||||
@@ -194,6 +481,9 @@ class CrawlerRunConfig:
|
|||||||
By using this class, you have a single place to understand and adjust the crawling options.
|
By using this class, you have a single place to understand and adjust the crawling options.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
|
# Deep Crawl Parameters
|
||||||
|
deep_crawl_strategy (DeepCrawlStrategy or None): Strategy to use for deep crawling.
|
||||||
|
|
||||||
# Content Processing Parameters
|
# Content Processing Parameters
|
||||||
word_count_threshold (int): Minimum word count threshold before processing content.
|
word_count_threshold (int): Minimum word count threshold before processing content.
|
||||||
Default: MIN_WORD_THRESHOLD (typically 200).
|
Default: MIN_WORD_THRESHOLD (typically 200).
|
||||||
@@ -203,8 +493,6 @@ class CrawlerRunConfig:
|
|||||||
Default: RegexChunking().
|
Default: RegexChunking().
|
||||||
markdown_generator (MarkdownGenerationStrategy): Strategy for generating markdown.
|
markdown_generator (MarkdownGenerationStrategy): Strategy for generating markdown.
|
||||||
Default: None.
|
Default: None.
|
||||||
content_filter (RelevantContentFilter or None): Optional filter to prune irrelevant content.
|
|
||||||
Default: None.
|
|
||||||
only_text (bool): If True, attempt to extract text-only content where applicable.
|
only_text (bool): If True, attempt to extract text-only content where applicable.
|
||||||
Default: False.
|
Default: False.
|
||||||
css_selector (str or None): CSS selector to extract a specific portion of the page.
|
css_selector (str or None): CSS selector to extract a specific portion of the page.
|
||||||
@@ -215,17 +503,25 @@ class CrawlerRunConfig:
|
|||||||
Default: None.
|
Default: None.
|
||||||
keep_data_attributes (bool): If True, retain `data-*` attributes while removing unwanted attributes.
|
keep_data_attributes (bool): If True, retain `data-*` attributes while removing unwanted attributes.
|
||||||
Default: False.
|
Default: False.
|
||||||
|
keep_attrs (list of str): List of HTML attributes to keep during processing.
|
||||||
|
Default: [].
|
||||||
remove_forms (bool): If True, remove all `<form>` elements from the HTML.
|
remove_forms (bool): If True, remove all `<form>` elements from the HTML.
|
||||||
Default: False.
|
Default: False.
|
||||||
prettiify (bool): If True, apply `fast_format_html` to produce prettified HTML output.
|
prettiify (bool): If True, apply `fast_format_html` to produce prettified HTML output.
|
||||||
Default: False.
|
Default: False.
|
||||||
parser_type (str): Type of parser to use for HTML parsing.
|
parser_type (str): Type of parser to use for HTML parsing.
|
||||||
Default: "lxml".
|
Default: "lxml".
|
||||||
|
scraping_strategy (ContentScrapingStrategy): Scraping strategy to use.
|
||||||
|
Default: WebScrapingStrategy.
|
||||||
|
proxy_config (dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||||
|
If None, no additional proxy config. Default: None.
|
||||||
|
|
||||||
|
# SSL Parameters
|
||||||
|
fetch_ssl_certificate: bool = False,
|
||||||
# Caching Parameters
|
# Caching Parameters
|
||||||
cache_mode (CacheMode or None): Defines how caching is handled.
|
cache_mode (CacheMode or None): Defines how caching is handled.
|
||||||
If None, defaults to CacheMode.ENABLED internally.
|
If None, defaults to CacheMode.ENABLED internally.
|
||||||
Default: None.
|
Default: CacheMode.BYPASS.
|
||||||
session_id (str or None): Optional session ID to persist the browser context and the created
|
session_id (str or None): Optional session ID to persist the browser context and the created
|
||||||
page instance. If the ID already exists, the crawler does not
|
page instance. If the ID already exists, the crawler does not
|
||||||
create a new page and uses the current page to preserve the state.
|
create a new page and uses the current page to preserve the state.
|
||||||
@@ -237,6 +533,8 @@ class CrawlerRunConfig:
|
|||||||
Default: False.
|
Default: False.
|
||||||
no_cache_write (bool): Legacy parameter, if True acts like CacheMode.READ_ONLY.
|
no_cache_write (bool): Legacy parameter, if True acts like CacheMode.READ_ONLY.
|
||||||
Default: False.
|
Default: False.
|
||||||
|
shared_data (dict or None): Shared data to be passed between hooks.
|
||||||
|
Default: None.
|
||||||
|
|
||||||
# Page Navigation and Timing Parameters
|
# Page Navigation and Timing Parameters
|
||||||
wait_until (str): The condition to wait for when navigating, e.g. "domcontentloaded".
|
wait_until (str): The condition to wait for when navigating, e.g. "domcontentloaded".
|
||||||
@@ -246,7 +544,7 @@ class CrawlerRunConfig:
|
|||||||
wait_for (str or None): A CSS selector or JS condition to wait for before extracting content.
|
wait_for (str or None): A CSS selector or JS condition to wait for before extracting content.
|
||||||
Default: None.
|
Default: None.
|
||||||
wait_for_images (bool): If True, wait for images to load before extracting content.
|
wait_for_images (bool): If True, wait for images to load before extracting content.
|
||||||
Default: True.
|
Default: False.
|
||||||
delay_before_return_html (float): Delay in seconds before retrieving final HTML.
|
delay_before_return_html (float): Delay in seconds before retrieving final HTML.
|
||||||
Default: 0.1.
|
Default: 0.1.
|
||||||
mean_delay (float): Mean base delay between requests when calling arun_many.
|
mean_delay (float): Mean base delay between requests when calling arun_many.
|
||||||
@@ -301,16 +599,42 @@ class CrawlerRunConfig:
|
|||||||
Default: SOCIAL_MEDIA_DOMAINS (from config).
|
Default: SOCIAL_MEDIA_DOMAINS (from config).
|
||||||
exclude_external_links (bool): If True, exclude all external links from the results.
|
exclude_external_links (bool): If True, exclude all external links from the results.
|
||||||
Default: False.
|
Default: False.
|
||||||
|
exclude_internal_links (bool): If True, exclude internal links from the results.
|
||||||
|
Default: False.
|
||||||
exclude_social_media_links (bool): If True, exclude links pointing to social media domains.
|
exclude_social_media_links (bool): If True, exclude links pointing to social media domains.
|
||||||
Default: False.
|
Default: False.
|
||||||
exclude_domains (list of str): List of specific domains to exclude from results.
|
exclude_domains (list of str): List of specific domains to exclude from results.
|
||||||
Default: [].
|
Default: [].
|
||||||
|
exclude_internal_links (bool): If True, exclude internal links from the results.
|
||||||
|
Default: False.
|
||||||
|
|
||||||
# Debugging and Logging Parameters
|
# Debugging and Logging Parameters
|
||||||
verbose (bool): Enable verbose logging.
|
verbose (bool): Enable verbose logging.
|
||||||
Default: True.
|
Default: True.
|
||||||
log_console (bool): If True, log console messages from the page.
|
log_console (bool): If True, log console messages from the page.
|
||||||
Default: False.
|
Default: False.
|
||||||
|
|
||||||
|
# HTTP Crwler Strategy Parameters
|
||||||
|
method (str): HTTP method to use for the request, when using AsyncHTTPCrwalerStrategy.
|
||||||
|
Default: "GET".
|
||||||
|
data (dict): Data to send in the request body, when using AsyncHTTPCrwalerStrategy.
|
||||||
|
Default: None.
|
||||||
|
json (dict): JSON data to send in the request body, when using AsyncHTTPCrwalerStrategy.
|
||||||
|
|
||||||
|
# Connection Parameters
|
||||||
|
stream (bool): If True, enables streaming of crawled URLs as they are processed when used with arun_many.
|
||||||
|
Default: False.
|
||||||
|
|
||||||
|
check_robots_txt (bool): Whether to check robots.txt rules before crawling. Default: False
|
||||||
|
Default: False.
|
||||||
|
user_agent (str): Custom User-Agent string to use.
|
||||||
|
Default: None.
|
||||||
|
user_agent_mode (str or None): Mode for generating the user agent (e.g., "random"). If None, use the provided user_agent as-is.
|
||||||
|
Default: None.
|
||||||
|
user_agent_generator_config (dict or None): Configuration for user agent generation if user_agent_mode is set.
|
||||||
|
Default: None.
|
||||||
|
|
||||||
|
url: str = None # This is not a compulsory parameter
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -318,39 +642,39 @@ class CrawlerRunConfig:
|
|||||||
# Content Processing Parameters
|
# Content Processing Parameters
|
||||||
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
||||||
extraction_strategy: ExtractionStrategy = None,
|
extraction_strategy: ExtractionStrategy = None,
|
||||||
chunking_strategy: ChunkingStrategy = None,
|
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||||
markdown_generator: MarkdownGenerationStrategy = None,
|
markdown_generator: MarkdownGenerationStrategy = None,
|
||||||
content_filter=None,
|
|
||||||
only_text: bool = False,
|
only_text: bool = False,
|
||||||
css_selector: str = None,
|
css_selector: str = None,
|
||||||
excluded_tags: list = None,
|
excluded_tags: list = None,
|
||||||
excluded_selector: str = None,
|
excluded_selector: str = None,
|
||||||
keep_data_attributes: bool = False,
|
keep_data_attributes: bool = False,
|
||||||
|
keep_attrs: list = None,
|
||||||
remove_forms: bool = False,
|
remove_forms: bool = False,
|
||||||
prettiify: bool = False,
|
prettiify: bool = False,
|
||||||
parser_type: str = "lxml",
|
parser_type: str = "lxml",
|
||||||
|
scraping_strategy: ContentScrapingStrategy = None,
|
||||||
|
proxy_config: dict = None,
|
||||||
|
proxy_rotation_strategy: Optional[ProxyRotationStrategy] = None,
|
||||||
# SSL Parameters
|
# SSL Parameters
|
||||||
fetch_ssl_certificate: bool = False,
|
fetch_ssl_certificate: bool = False,
|
||||||
|
|
||||||
# Caching Parameters
|
# Caching Parameters
|
||||||
cache_mode=None,
|
cache_mode: CacheMode = CacheMode.BYPASS,
|
||||||
session_id: str = None,
|
session_id: str = None,
|
||||||
bypass_cache: bool = False,
|
bypass_cache: bool = False,
|
||||||
disable_cache: bool = False,
|
disable_cache: bool = False,
|
||||||
no_cache_read: bool = False,
|
no_cache_read: bool = False,
|
||||||
no_cache_write: bool = False,
|
no_cache_write: bool = False,
|
||||||
|
shared_data: dict = None,
|
||||||
# Page Navigation and Timing Parameters
|
# Page Navigation and Timing Parameters
|
||||||
wait_until: str = "domcontentloaded",
|
wait_until: str = "domcontentloaded",
|
||||||
page_timeout: int = PAGE_TIMEOUT,
|
page_timeout: int = PAGE_TIMEOUT,
|
||||||
wait_for: str = None,
|
wait_for: str = None,
|
||||||
wait_for_images: bool = True,
|
wait_for_images: bool = False,
|
||||||
delay_before_return_html: float = 0.1,
|
delay_before_return_html: float = 0.1,
|
||||||
mean_delay: float = 0.1,
|
mean_delay: float = 0.1,
|
||||||
max_range: float = 0.3,
|
max_range: float = 0.3,
|
||||||
semaphore_count: int = 5,
|
semaphore_count: int = 5,
|
||||||
|
|
||||||
# Page Interaction Parameters
|
# Page Interaction Parameters
|
||||||
js_code: Union[str, List[str]] = None,
|
js_code: Union[str, List[str]] = None,
|
||||||
js_only: bool = False,
|
js_only: bool = False,
|
||||||
@@ -363,7 +687,6 @@ class CrawlerRunConfig:
|
|||||||
override_navigator: bool = False,
|
override_navigator: bool = False,
|
||||||
magic: bool = False,
|
magic: bool = False,
|
||||||
adjust_viewport_to_content: bool = False,
|
adjust_viewport_to_content: bool = False,
|
||||||
|
|
||||||
# Media Handling Parameters
|
# Media Handling Parameters
|
||||||
screenshot: bool = False,
|
screenshot: bool = False,
|
||||||
screenshot_wait_for: float = None,
|
screenshot_wait_for: float = None,
|
||||||
@@ -372,19 +695,27 @@ class CrawlerRunConfig:
|
|||||||
image_description_min_word_threshold: int = IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
image_description_min_word_threshold: int = IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||||
image_score_threshold: int = IMAGE_SCORE_THRESHOLD,
|
image_score_threshold: int = IMAGE_SCORE_THRESHOLD,
|
||||||
exclude_external_images: bool = False,
|
exclude_external_images: bool = False,
|
||||||
|
|
||||||
# Link and Domain Handling Parameters
|
# Link and Domain Handling Parameters
|
||||||
exclude_social_media_domains: list = None,
|
exclude_social_media_domains: list = None,
|
||||||
exclude_external_links: bool = False,
|
exclude_external_links: bool = False,
|
||||||
exclude_social_media_links: bool = False,
|
exclude_social_media_links: bool = False,
|
||||||
exclude_domains: list = None,
|
exclude_domains: list = None,
|
||||||
|
exclude_internal_links: bool = False,
|
||||||
# Debugging and Logging Parameters
|
# Debugging and Logging Parameters
|
||||||
verbose: bool = True,
|
verbose: bool = True,
|
||||||
log_console: bool = False,
|
log_console: bool = False,
|
||||||
|
# Connection Parameters
|
||||||
|
method: str = "GET",
|
||||||
|
stream: bool = False,
|
||||||
url: str = None,
|
url: str = None,
|
||||||
|
check_robots_txt: bool = False,
|
||||||
|
user_agent: str = None,
|
||||||
|
user_agent_mode: str = None,
|
||||||
|
user_agent_generator_config: dict = {},
|
||||||
|
# Deep Crawl Parameters
|
||||||
|
deep_crawl_strategy: Optional[DeepCrawlStrategy] = None,
|
||||||
):
|
):
|
||||||
|
# TODO: Planning to set properties dynamically based on the __init__ signature
|
||||||
self.url = url
|
self.url = url
|
||||||
|
|
||||||
# Content Processing Parameters
|
# Content Processing Parameters
|
||||||
@@ -392,15 +723,18 @@ class CrawlerRunConfig:
|
|||||||
self.extraction_strategy = extraction_strategy
|
self.extraction_strategy = extraction_strategy
|
||||||
self.chunking_strategy = chunking_strategy
|
self.chunking_strategy = chunking_strategy
|
||||||
self.markdown_generator = markdown_generator
|
self.markdown_generator = markdown_generator
|
||||||
self.content_filter = content_filter
|
|
||||||
self.only_text = only_text
|
self.only_text = only_text
|
||||||
self.css_selector = css_selector
|
self.css_selector = css_selector
|
||||||
self.excluded_tags = excluded_tags or []
|
self.excluded_tags = excluded_tags or []
|
||||||
self.excluded_selector = excluded_selector or ""
|
self.excluded_selector = excluded_selector or ""
|
||||||
self.keep_data_attributes = keep_data_attributes
|
self.keep_data_attributes = keep_data_attributes
|
||||||
|
self.keep_attrs = keep_attrs or []
|
||||||
self.remove_forms = remove_forms
|
self.remove_forms = remove_forms
|
||||||
self.prettiify = prettiify
|
self.prettiify = prettiify
|
||||||
self.parser_type = parser_type
|
self.parser_type = parser_type
|
||||||
|
self.scraping_strategy = scraping_strategy or WebScrapingStrategy()
|
||||||
|
self.proxy_config = proxy_config
|
||||||
|
self.proxy_rotation_strategy = proxy_rotation_strategy
|
||||||
|
|
||||||
# SSL Parameters
|
# SSL Parameters
|
||||||
self.fetch_ssl_certificate = fetch_ssl_certificate
|
self.fetch_ssl_certificate = fetch_ssl_certificate
|
||||||
@@ -412,6 +746,7 @@ class CrawlerRunConfig:
|
|||||||
self.disable_cache = disable_cache
|
self.disable_cache = disable_cache
|
||||||
self.no_cache_read = no_cache_read
|
self.no_cache_read = no_cache_read
|
||||||
self.no_cache_write = no_cache_write
|
self.no_cache_write = no_cache_write
|
||||||
|
self.shared_data = shared_data
|
||||||
|
|
||||||
# Page Navigation and Timing Parameters
|
# Page Navigation and Timing Parameters
|
||||||
self.wait_until = wait_until
|
self.wait_until = wait_until
|
||||||
@@ -446,69 +781,108 @@ class CrawlerRunConfig:
|
|||||||
self.exclude_external_images = exclude_external_images
|
self.exclude_external_images = exclude_external_images
|
||||||
|
|
||||||
# Link and Domain Handling Parameters
|
# Link and Domain Handling Parameters
|
||||||
self.exclude_social_media_domains = exclude_social_media_domains or SOCIAL_MEDIA_DOMAINS
|
self.exclude_social_media_domains = (
|
||||||
|
exclude_social_media_domains or SOCIAL_MEDIA_DOMAINS
|
||||||
|
)
|
||||||
self.exclude_external_links = exclude_external_links
|
self.exclude_external_links = exclude_external_links
|
||||||
self.exclude_social_media_links = exclude_social_media_links
|
self.exclude_social_media_links = exclude_social_media_links
|
||||||
self.exclude_domains = exclude_domains or []
|
self.exclude_domains = exclude_domains or []
|
||||||
|
self.exclude_internal_links = exclude_internal_links
|
||||||
|
|
||||||
# Debugging and Logging Parameters
|
# Debugging and Logging Parameters
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.log_console = log_console
|
self.log_console = log_console
|
||||||
|
|
||||||
|
# Connection Parameters
|
||||||
|
self.stream = stream
|
||||||
|
self.method = method
|
||||||
|
|
||||||
|
# Robots.txt Handling Parameters
|
||||||
|
self.check_robots_txt = check_robots_txt
|
||||||
|
|
||||||
|
# User Agent Parameters
|
||||||
|
self.user_agent = user_agent
|
||||||
|
self.user_agent_mode = user_agent_mode
|
||||||
|
self.user_agent_generator_config = user_agent_generator_config
|
||||||
|
|
||||||
# Validate type of extraction strategy and chunking strategy if they are provided
|
# Validate type of extraction strategy and chunking strategy if they are provided
|
||||||
if self.extraction_strategy is not None and not isinstance(
|
if self.extraction_strategy is not None and not isinstance(
|
||||||
self.extraction_strategy, ExtractionStrategy
|
self.extraction_strategy, ExtractionStrategy
|
||||||
):
|
):
|
||||||
raise ValueError("extraction_strategy must be an instance of ExtractionStrategy")
|
raise ValueError(
|
||||||
|
"extraction_strategy must be an instance of ExtractionStrategy"
|
||||||
|
)
|
||||||
if self.chunking_strategy is not None and not isinstance(
|
if self.chunking_strategy is not None and not isinstance(
|
||||||
self.chunking_strategy, ChunkingStrategy
|
self.chunking_strategy, ChunkingStrategy
|
||||||
):
|
):
|
||||||
raise ValueError("chunking_strategy must be an instance of ChunkingStrategy")
|
raise ValueError(
|
||||||
|
"chunking_strategy must be an instance of ChunkingStrategy"
|
||||||
|
)
|
||||||
|
|
||||||
# Set default chunking strategy if None
|
# Set default chunking strategy if None
|
||||||
if self.chunking_strategy is None:
|
if self.chunking_strategy is None:
|
||||||
from .chunking_strategy import RegexChunking
|
|
||||||
self.chunking_strategy = RegexChunking()
|
self.chunking_strategy = RegexChunking()
|
||||||
|
|
||||||
|
# Deep Crawl Parameters
|
||||||
|
self.deep_crawl_strategy = deep_crawl_strategy
|
||||||
|
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
"""Handle attribute access."""
|
||||||
|
if name in self._UNWANTED_PROPS:
|
||||||
|
raise AttributeError(f"Getting '{name}' is deprecated. {self._UNWANTED_PROPS[name]}")
|
||||||
|
raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{name}'")
|
||||||
|
|
||||||
|
def __setattr__(self, name, value):
|
||||||
|
"""Handle attribute setting."""
|
||||||
|
# TODO: Planning to set properties dynamically based on the __init__ signature
|
||||||
|
sig = inspect.signature(self.__init__)
|
||||||
|
all_params = sig.parameters # Dictionary of parameter names and their details
|
||||||
|
|
||||||
|
if name in self._UNWANTED_PROPS and value is not all_params[name].default:
|
||||||
|
raise AttributeError(f"Setting '{name}' is deprecated. {self._UNWANTED_PROPS[name]}")
|
||||||
|
|
||||||
|
super().__setattr__(name, value)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_kwargs(kwargs: dict) -> "CrawlerRunConfig":
|
def from_kwargs(kwargs: dict) -> "CrawlerRunConfig":
|
||||||
return CrawlerRunConfig(
|
return CrawlerRunConfig(
|
||||||
# Content Processing Parameters
|
# Content Processing Parameters
|
||||||
word_count_threshold=kwargs.get("word_count_threshold", 200),
|
word_count_threshold=kwargs.get("word_count_threshold", 200),
|
||||||
extraction_strategy=kwargs.get("extraction_strategy"),
|
extraction_strategy=kwargs.get("extraction_strategy"),
|
||||||
chunking_strategy=kwargs.get("chunking_strategy"),
|
chunking_strategy=kwargs.get("chunking_strategy", RegexChunking()),
|
||||||
markdown_generator=kwargs.get("markdown_generator"),
|
markdown_generator=kwargs.get("markdown_generator"),
|
||||||
content_filter=kwargs.get("content_filter"),
|
|
||||||
only_text=kwargs.get("only_text", False),
|
only_text=kwargs.get("only_text", False),
|
||||||
css_selector=kwargs.get("css_selector"),
|
css_selector=kwargs.get("css_selector"),
|
||||||
excluded_tags=kwargs.get("excluded_tags", []),
|
excluded_tags=kwargs.get("excluded_tags", []),
|
||||||
excluded_selector=kwargs.get("excluded_selector", ""),
|
excluded_selector=kwargs.get("excluded_selector", ""),
|
||||||
keep_data_attributes=kwargs.get("keep_data_attributes", False),
|
keep_data_attributes=kwargs.get("keep_data_attributes", False),
|
||||||
|
keep_attrs=kwargs.get("keep_attrs", []),
|
||||||
remove_forms=kwargs.get("remove_forms", False),
|
remove_forms=kwargs.get("remove_forms", False),
|
||||||
prettiify=kwargs.get("prettiify", False),
|
prettiify=kwargs.get("prettiify", False),
|
||||||
parser_type=kwargs.get("parser_type", "lxml"),
|
parser_type=kwargs.get("parser_type", "lxml"),
|
||||||
|
scraping_strategy=kwargs.get("scraping_strategy"),
|
||||||
|
proxy_config=kwargs.get("proxy_config"),
|
||||||
|
proxy_rotation_strategy=kwargs.get("proxy_rotation_strategy"),
|
||||||
# SSL Parameters
|
# SSL Parameters
|
||||||
fetch_ssl_certificate=kwargs.get("fetch_ssl_certificate", False),
|
fetch_ssl_certificate=kwargs.get("fetch_ssl_certificate", False),
|
||||||
|
|
||||||
# Caching Parameters
|
# Caching Parameters
|
||||||
cache_mode=kwargs.get("cache_mode"),
|
cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS),
|
||||||
session_id=kwargs.get("session_id"),
|
session_id=kwargs.get("session_id"),
|
||||||
bypass_cache=kwargs.get("bypass_cache", False),
|
bypass_cache=kwargs.get("bypass_cache", False),
|
||||||
disable_cache=kwargs.get("disable_cache", False),
|
disable_cache=kwargs.get("disable_cache", False),
|
||||||
no_cache_read=kwargs.get("no_cache_read", False),
|
no_cache_read=kwargs.get("no_cache_read", False),
|
||||||
no_cache_write=kwargs.get("no_cache_write", False),
|
no_cache_write=kwargs.get("no_cache_write", False),
|
||||||
|
shared_data=kwargs.get("shared_data", None),
|
||||||
# Page Navigation and Timing Parameters
|
# Page Navigation and Timing Parameters
|
||||||
wait_until=kwargs.get("wait_until", "domcontentloaded"),
|
wait_until=kwargs.get("wait_until", "domcontentloaded"),
|
||||||
page_timeout=kwargs.get("page_timeout", 60000),
|
page_timeout=kwargs.get("page_timeout", 60000),
|
||||||
wait_for=kwargs.get("wait_for"),
|
wait_for=kwargs.get("wait_for"),
|
||||||
wait_for_images=kwargs.get("wait_for_images", True),
|
wait_for_images=kwargs.get("wait_for_images", False),
|
||||||
delay_before_return_html=kwargs.get("delay_before_return_html", 0.1),
|
delay_before_return_html=kwargs.get("delay_before_return_html", 0.1),
|
||||||
mean_delay=kwargs.get("mean_delay", 0.1),
|
mean_delay=kwargs.get("mean_delay", 0.1),
|
||||||
max_range=kwargs.get("max_range", 0.3),
|
max_range=kwargs.get("max_range", 0.3),
|
||||||
semaphore_count=kwargs.get("semaphore_count", 5),
|
semaphore_count=kwargs.get("semaphore_count", 5),
|
||||||
|
|
||||||
# Page Interaction Parameters
|
# Page Interaction Parameters
|
||||||
js_code=kwargs.get("js_code"),
|
js_code=kwargs.get("js_code"),
|
||||||
js_only=kwargs.get("js_only", False),
|
js_only=kwargs.get("js_only", False),
|
||||||
@@ -521,45 +895,75 @@ class CrawlerRunConfig:
|
|||||||
override_navigator=kwargs.get("override_navigator", False),
|
override_navigator=kwargs.get("override_navigator", False),
|
||||||
magic=kwargs.get("magic", False),
|
magic=kwargs.get("magic", False),
|
||||||
adjust_viewport_to_content=kwargs.get("adjust_viewport_to_content", False),
|
adjust_viewport_to_content=kwargs.get("adjust_viewport_to_content", False),
|
||||||
|
|
||||||
# Media Handling Parameters
|
# Media Handling Parameters
|
||||||
screenshot=kwargs.get("screenshot", False),
|
screenshot=kwargs.get("screenshot", False),
|
||||||
screenshot_wait_for=kwargs.get("screenshot_wait_for"),
|
screenshot_wait_for=kwargs.get("screenshot_wait_for"),
|
||||||
screenshot_height_threshold=kwargs.get("screenshot_height_threshold", SCREENSHOT_HEIGHT_TRESHOLD),
|
screenshot_height_threshold=kwargs.get(
|
||||||
|
"screenshot_height_threshold", SCREENSHOT_HEIGHT_TRESHOLD
|
||||||
|
),
|
||||||
pdf=kwargs.get("pdf", False),
|
pdf=kwargs.get("pdf", False),
|
||||||
image_description_min_word_threshold=kwargs.get("image_description_min_word_threshold", IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD),
|
image_description_min_word_threshold=kwargs.get(
|
||||||
image_score_threshold=kwargs.get("image_score_threshold", IMAGE_SCORE_THRESHOLD),
|
"image_description_min_word_threshold",
|
||||||
|
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||||
|
),
|
||||||
|
image_score_threshold=kwargs.get(
|
||||||
|
"image_score_threshold", IMAGE_SCORE_THRESHOLD
|
||||||
|
),
|
||||||
exclude_external_images=kwargs.get("exclude_external_images", False),
|
exclude_external_images=kwargs.get("exclude_external_images", False),
|
||||||
|
|
||||||
# Link and Domain Handling Parameters
|
# Link and Domain Handling Parameters
|
||||||
exclude_social_media_domains=kwargs.get("exclude_social_media_domains", SOCIAL_MEDIA_DOMAINS),
|
exclude_social_media_domains=kwargs.get(
|
||||||
|
"exclude_social_media_domains", SOCIAL_MEDIA_DOMAINS
|
||||||
|
),
|
||||||
exclude_external_links=kwargs.get("exclude_external_links", False),
|
exclude_external_links=kwargs.get("exclude_external_links", False),
|
||||||
exclude_social_media_links=kwargs.get("exclude_social_media_links", False),
|
exclude_social_media_links=kwargs.get("exclude_social_media_links", False),
|
||||||
exclude_domains=kwargs.get("exclude_domains", []),
|
exclude_domains=kwargs.get("exclude_domains", []),
|
||||||
|
exclude_internal_links=kwargs.get("exclude_internal_links", False),
|
||||||
# Debugging and Logging Parameters
|
# Debugging and Logging Parameters
|
||||||
verbose=kwargs.get("verbose", True),
|
verbose=kwargs.get("verbose", True),
|
||||||
log_console=kwargs.get("log_console", False),
|
log_console=kwargs.get("log_console", False),
|
||||||
|
# Connection Parameters
|
||||||
|
method=kwargs.get("method", "GET"),
|
||||||
|
stream=kwargs.get("stream", False),
|
||||||
|
check_robots_txt=kwargs.get("check_robots_txt", False),
|
||||||
|
user_agent=kwargs.get("user_agent"),
|
||||||
|
user_agent_mode=kwargs.get("user_agent_mode"),
|
||||||
|
user_agent_generator_config=kwargs.get("user_agent_generator_config", {}),
|
||||||
|
# Deep Crawl Parameters
|
||||||
|
deep_crawl_strategy=kwargs.get("deep_crawl_strategy"),
|
||||||
url=kwargs.get("url"),
|
url=kwargs.get("url"),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create a funciton returns dict of the object
|
# Create a funciton returns dict of the object
|
||||||
|
def dump(self) -> dict:
|
||||||
|
# Serialize the object to a dictionary
|
||||||
|
return to_serializable_dict(self)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(data: dict) -> "CrawlerRunConfig":
|
||||||
|
# Deserialize the object from a dictionary
|
||||||
|
config = from_serializable_dict(data)
|
||||||
|
if isinstance(config, CrawlerRunConfig):
|
||||||
|
return config
|
||||||
|
return CrawlerRunConfig.from_kwargs(config)
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
return {
|
return {
|
||||||
"word_count_threshold": self.word_count_threshold,
|
"word_count_threshold": self.word_count_threshold,
|
||||||
"extraction_strategy": self.extraction_strategy,
|
"extraction_strategy": self.extraction_strategy,
|
||||||
"chunking_strategy": self.chunking_strategy,
|
"chunking_strategy": self.chunking_strategy,
|
||||||
"markdown_generator": self.markdown_generator,
|
"markdown_generator": self.markdown_generator,
|
||||||
"content_filter": self.content_filter,
|
|
||||||
"only_text": self.only_text,
|
"only_text": self.only_text,
|
||||||
"css_selector": self.css_selector,
|
"css_selector": self.css_selector,
|
||||||
"excluded_tags": self.excluded_tags,
|
"excluded_tags": self.excluded_tags,
|
||||||
"excluded_selector": self.excluded_selector,
|
"excluded_selector": self.excluded_selector,
|
||||||
"keep_data_attributes": self.keep_data_attributes,
|
"keep_data_attributes": self.keep_data_attributes,
|
||||||
|
"keep_attrs": self.keep_attrs,
|
||||||
"remove_forms": self.remove_forms,
|
"remove_forms": self.remove_forms,
|
||||||
"prettiify": self.prettiify,
|
"prettiify": self.prettiify,
|
||||||
"parser_type": self.parser_type,
|
"parser_type": self.parser_type,
|
||||||
|
"scraping_strategy": self.scraping_strategy,
|
||||||
|
"proxy_config": self.proxy_config,
|
||||||
|
"proxy_rotation_strategy": self.proxy_rotation_strategy,
|
||||||
"fetch_ssl_certificate": self.fetch_ssl_certificate,
|
"fetch_ssl_certificate": self.fetch_ssl_certificate,
|
||||||
"cache_mode": self.cache_mode,
|
"cache_mode": self.cache_mode,
|
||||||
"session_id": self.session_id,
|
"session_id": self.session_id,
|
||||||
@@ -567,6 +971,7 @@ class CrawlerRunConfig:
|
|||||||
"disable_cache": self.disable_cache,
|
"disable_cache": self.disable_cache,
|
||||||
"no_cache_read": self.no_cache_read,
|
"no_cache_read": self.no_cache_read,
|
||||||
"no_cache_write": self.no_cache_write,
|
"no_cache_write": self.no_cache_write,
|
||||||
|
"shared_data": self.shared_data,
|
||||||
"wait_until": self.wait_until,
|
"wait_until": self.wait_until,
|
||||||
"page_timeout": self.page_timeout,
|
"page_timeout": self.page_timeout,
|
||||||
"wait_for": self.wait_for,
|
"wait_for": self.wait_for,
|
||||||
@@ -597,7 +1002,90 @@ class CrawlerRunConfig:
|
|||||||
"exclude_external_links": self.exclude_external_links,
|
"exclude_external_links": self.exclude_external_links,
|
||||||
"exclude_social_media_links": self.exclude_social_media_links,
|
"exclude_social_media_links": self.exclude_social_media_links,
|
||||||
"exclude_domains": self.exclude_domains,
|
"exclude_domains": self.exclude_domains,
|
||||||
|
"exclude_internal_links": self.exclude_internal_links,
|
||||||
"verbose": self.verbose,
|
"verbose": self.verbose,
|
||||||
"log_console": self.log_console,
|
"log_console": self.log_console,
|
||||||
|
"method": self.method,
|
||||||
|
"stream": self.stream,
|
||||||
|
"check_robots_txt": self.check_robots_txt,
|
||||||
|
"user_agent": self.user_agent,
|
||||||
|
"user_agent_mode": self.user_agent_mode,
|
||||||
|
"user_agent_generator_config": self.user_agent_generator_config,
|
||||||
|
"deep_crawl_strategy": self.deep_crawl_strategy,
|
||||||
"url": self.url,
|
"url": self.url,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def clone(self, **kwargs):
|
||||||
|
"""Create a copy of this configuration with updated values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Key-value pairs of configuration options to update
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CrawlerRunConfig: A new instance with the specified updates
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
# Create a new config with streaming enabled
|
||||||
|
stream_config = config.clone(stream=True)
|
||||||
|
|
||||||
|
# Create a new config with multiple updates
|
||||||
|
new_config = config.clone(
|
||||||
|
stream=True,
|
||||||
|
cache_mode=CacheMode.BYPASS,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
config_dict = self.to_dict()
|
||||||
|
config_dict.update(kwargs)
|
||||||
|
return CrawlerRunConfig.from_kwargs(config_dict)
|
||||||
|
|
||||||
|
|
||||||
|
class LlmConfig:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
provider: str = DEFAULT_PROVIDER,
|
||||||
|
api_token: Optional[str] = None,
|
||||||
|
base_url: Optional[str] = None,
|
||||||
|
):
|
||||||
|
"""Configuaration class for LLM provider and API token."""
|
||||||
|
self.provider = provider
|
||||||
|
if api_token and not api_token.startswith("env:"):
|
||||||
|
self.api_token = api_token
|
||||||
|
elif api_token and api_token.startswith("env:"):
|
||||||
|
self.api_token = os.getenv(api_token[4:])
|
||||||
|
else:
|
||||||
|
self.api_token = PROVIDER_MODELS.get(provider, "no-token") or os.getenv(
|
||||||
|
"OPENAI_API_KEY"
|
||||||
|
)
|
||||||
|
self.base_url = base_url
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_kwargs(kwargs: dict) -> "LlmConfig":
|
||||||
|
return LlmConfig(
|
||||||
|
provider=kwargs.get("provider", DEFAULT_PROVIDER),
|
||||||
|
api_token=kwargs.get("api_token"),
|
||||||
|
base_url=kwargs.get("base_url"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
"provider": self.provider,
|
||||||
|
"api_token": self.api_token,
|
||||||
|
"base_url": self.base_url
|
||||||
|
}
|
||||||
|
|
||||||
|
def clone(self, **kwargs):
|
||||||
|
"""Create a copy of this configuration with updated values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Key-value pairs of configuration options to update
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
LLMConfig: A new instance with the specified updates
|
||||||
|
"""
|
||||||
|
config_dict = self.to_dict()
|
||||||
|
config_dict.update(kwargs)
|
||||||
|
return LlmConfig.from_kwargs(config_dict)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,27 +1,30 @@
|
|||||||
import os, sys
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import aiosqlite
|
import aiosqlite
|
||||||
import asyncio
|
import asyncio
|
||||||
from typing import Optional, Tuple, Dict
|
from typing import Optional, Dict
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
import logging
|
import logging
|
||||||
import json # Added for serialization/deserialization
|
import json # Added for serialization/deserialization
|
||||||
from .utils import ensure_content_dirs, generate_content_hash
|
from .utils import ensure_content_dirs, generate_content_hash
|
||||||
from .models import CrawlResult, MarkdownGenerationResult
|
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
|
||||||
import xxhash
|
|
||||||
import aiofiles
|
import aiofiles
|
||||||
from .config import NEED_MIGRATION
|
from .utils import VersionManager
|
||||||
from .version_manager import VersionManager
|
|
||||||
from .async_logger import AsyncLogger
|
from .async_logger import AsyncLogger
|
||||||
from .utils import get_error_context, create_box_message
|
from .utils import get_error_context, create_box_message
|
||||||
# Set up logging
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
base_directory = DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
# Set up logging
|
||||||
|
# logging.basicConfig(level=logging.INFO)
|
||||||
|
# logger = logging.getLogger(__name__)
|
||||||
|
# logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
base_directory = DB_PATH = os.path.join(
|
||||||
|
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||||
|
)
|
||||||
os.makedirs(DB_PATH, exist_ok=True)
|
os.makedirs(DB_PATH, exist_ok=True)
|
||||||
DB_PATH = os.path.join(base_directory, "crawl4ai.db")
|
DB_PATH = os.path.join(base_directory, "crawl4ai.db")
|
||||||
|
|
||||||
|
|
||||||
class AsyncDatabaseManager:
|
class AsyncDatabaseManager:
|
||||||
def __init__(self, pool_size: int = 10, max_retries: int = 3):
|
def __init__(self, pool_size: int = 10, max_retries: int = 3):
|
||||||
self.db_path = DB_PATH
|
self.db_path = DB_PATH
|
||||||
@@ -37,10 +40,9 @@ class AsyncDatabaseManager:
|
|||||||
self.logger = AsyncLogger(
|
self.logger = AsyncLogger(
|
||||||
log_file=os.path.join(base_directory, ".crawl4ai", "crawler_db.log"),
|
log_file=os.path.join(base_directory, ".crawl4ai", "crawler_db.log"),
|
||||||
verbose=False,
|
verbose=False,
|
||||||
tag_width=10
|
tag_width=10,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
"""Initialize the database and connection pool"""
|
"""Initialize the database and connection pool"""
|
||||||
try:
|
try:
|
||||||
@@ -67,28 +69,32 @@ class AsyncDatabaseManager:
|
|||||||
if needs_update:
|
if needs_update:
|
||||||
self.logger.info("New version detected, running updates", tag="INIT")
|
self.logger.info("New version detected, running updates", tag="INIT")
|
||||||
await self.update_db_schema()
|
await self.update_db_schema()
|
||||||
from .migrations import run_migration # Import here to avoid circular imports
|
from .migrations import (
|
||||||
|
run_migration,
|
||||||
|
) # Import here to avoid circular imports
|
||||||
|
|
||||||
await run_migration()
|
await run_migration()
|
||||||
self.version_manager.update_version() # Update stored version after successful migration
|
self.version_manager.update_version() # Update stored version after successful migration
|
||||||
self.logger.success("Version update completed successfully", tag="COMPLETE")
|
self.logger.success(
|
||||||
|
"Version update completed successfully", tag="COMPLETE"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.logger.success("Database initialization completed successfully", tag="COMPLETE")
|
self.logger.success(
|
||||||
|
"Database initialization completed successfully", tag="COMPLETE"
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
message="Database initialization error: {error}",
|
message="Database initialization error: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
params={"error": str(e)}
|
params={"error": str(e)},
|
||||||
)
|
)
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
message="Database will be initialized on first use",
|
message="Database will be initialized on first use", tag="INIT"
|
||||||
tag="INIT"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
async def cleanup(self):
|
async def cleanup(self):
|
||||||
"""Cleanup connections when shutting down"""
|
"""Cleanup connections when shutting down"""
|
||||||
async with self.pool_lock:
|
async with self.pool_lock:
|
||||||
@@ -107,6 +113,7 @@ class AsyncDatabaseManager:
|
|||||||
self._initialized = True
|
self._initialized = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
error_context = get_error_context(sys.exc_info())
|
error_context = get_error_context(sys.exc_info())
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
message="Database initialization failed:\n{error}\n\nContext:\n{context}\n\nTraceback:\n{traceback}",
|
message="Database initialization failed:\n{error}\n\nContext:\n{context}\n\nTraceback:\n{traceback}",
|
||||||
@@ -115,8 +122,8 @@ class AsyncDatabaseManager:
|
|||||||
params={
|
params={
|
||||||
"error": str(e),
|
"error": str(e),
|
||||||
"context": error_context["code_context"],
|
"context": error_context["code_context"],
|
||||||
"traceback": error_context["full_traceback"]
|
"traceback": error_context["full_traceback"],
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@@ -127,29 +134,40 @@ class AsyncDatabaseManager:
|
|||||||
async with self.pool_lock:
|
async with self.pool_lock:
|
||||||
if task_id not in self.connection_pool:
|
if task_id not in self.connection_pool:
|
||||||
try:
|
try:
|
||||||
conn = await aiosqlite.connect(
|
conn = await aiosqlite.connect(self.db_path, timeout=30.0)
|
||||||
self.db_path,
|
await conn.execute("PRAGMA journal_mode = WAL")
|
||||||
timeout=30.0
|
await conn.execute("PRAGMA busy_timeout = 5000")
|
||||||
)
|
|
||||||
await conn.execute('PRAGMA journal_mode = WAL')
|
|
||||||
await conn.execute('PRAGMA busy_timeout = 5000')
|
|
||||||
|
|
||||||
# Verify database structure
|
# Verify database structure
|
||||||
async with conn.execute("PRAGMA table_info(crawled_data)") as cursor:
|
async with conn.execute(
|
||||||
|
"PRAGMA table_info(crawled_data)"
|
||||||
|
) as cursor:
|
||||||
columns = await cursor.fetchall()
|
columns = await cursor.fetchall()
|
||||||
column_names = [col[1] for col in columns]
|
column_names = [col[1] for col in columns]
|
||||||
expected_columns = {
|
expected_columns = {
|
||||||
'url', 'html', 'cleaned_html', 'markdown', 'extracted_content',
|
"url",
|
||||||
'success', 'media', 'links', 'metadata', 'screenshot',
|
"html",
|
||||||
'response_headers', 'downloaded_files'
|
"cleaned_html",
|
||||||
|
"markdown",
|
||||||
|
"extracted_content",
|
||||||
|
"success",
|
||||||
|
"media",
|
||||||
|
"links",
|
||||||
|
"metadata",
|
||||||
|
"screenshot",
|
||||||
|
"response_headers",
|
||||||
|
"downloaded_files",
|
||||||
}
|
}
|
||||||
missing_columns = expected_columns - set(column_names)
|
missing_columns = expected_columns - set(column_names)
|
||||||
if missing_columns:
|
if missing_columns:
|
||||||
raise ValueError(f"Database missing columns: {missing_columns}")
|
raise ValueError(
|
||||||
|
f"Database missing columns: {missing_columns}"
|
||||||
|
)
|
||||||
|
|
||||||
self.connection_pool[task_id] = conn
|
self.connection_pool[task_id] = conn
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
error_context = get_error_context(sys.exc_info())
|
error_context = get_error_context(sys.exc_info())
|
||||||
error_message = (
|
error_message = (
|
||||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||||
@@ -158,7 +176,7 @@ class AsyncDatabaseManager:
|
|||||||
f"Code context:\n{error_context['code_context']}"
|
f"Code context:\n{error_context['code_context']}"
|
||||||
)
|
)
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
message=create_box_message(error_message, type= "error"),
|
message=create_box_message(error_message, type="error"),
|
||||||
)
|
)
|
||||||
|
|
||||||
raise
|
raise
|
||||||
@@ -167,6 +185,7 @@ class AsyncDatabaseManager:
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
error_context = get_error_context(sys.exc_info())
|
error_context = get_error_context(sys.exc_info())
|
||||||
error_message = (
|
error_message = (
|
||||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||||
@@ -175,7 +194,7 @@ class AsyncDatabaseManager:
|
|||||||
f"Code context:\n{error_context['code_context']}"
|
f"Code context:\n{error_context['code_context']}"
|
||||||
)
|
)
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
message=create_box_message(error_message, type= "error"),
|
message=create_box_message(error_message, type="error"),
|
||||||
)
|
)
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
@@ -185,7 +204,6 @@ class AsyncDatabaseManager:
|
|||||||
del self.connection_pool[task_id]
|
del self.connection_pool[task_id]
|
||||||
self.connection_semaphore.release()
|
self.connection_semaphore.release()
|
||||||
|
|
||||||
|
|
||||||
async def execute_with_retry(self, operation, *args):
|
async def execute_with_retry(self, operation, *args):
|
||||||
"""Execute database operations with retry logic"""
|
"""Execute database operations with retry logic"""
|
||||||
for attempt in range(self.max_retries):
|
for attempt in range(self.max_retries):
|
||||||
@@ -200,10 +218,7 @@ class AsyncDatabaseManager:
|
|||||||
message="Operation failed after {retries} attempts: {error}",
|
message="Operation failed after {retries} attempts: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
force_verbose=True,
|
force_verbose=True,
|
||||||
params={
|
params={"retries": self.max_retries, "error": str(e)},
|
||||||
"retries": self.max_retries,
|
|
||||||
"error": str(e)
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
raise
|
raise
|
||||||
await asyncio.sleep(1 * (attempt + 1)) # Exponential backoff
|
await asyncio.sleep(1 * (attempt + 1)) # Exponential backoff
|
||||||
@@ -211,7 +226,8 @@ class AsyncDatabaseManager:
|
|||||||
async def ainit_db(self):
|
async def ainit_db(self):
|
||||||
"""Initialize database schema"""
|
"""Initialize database schema"""
|
||||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||||
await db.execute('''
|
await db.execute(
|
||||||
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||||
url TEXT PRIMARY KEY,
|
url TEXT PRIMARY KEY,
|
||||||
html TEXT,
|
html TEXT,
|
||||||
@@ -226,11 +242,10 @@ class AsyncDatabaseManager:
|
|||||||
response_headers TEXT DEFAULT "{}",
|
response_headers TEXT DEFAULT "{}",
|
||||||
downloaded_files TEXT DEFAULT "{}" -- New column added
|
downloaded_files TEXT DEFAULT "{}" -- New column added
|
||||||
)
|
)
|
||||||
''')
|
"""
|
||||||
|
)
|
||||||
await db.commit()
|
await db.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
async def update_db_schema(self):
|
async def update_db_schema(self):
|
||||||
"""Update database schema if needed"""
|
"""Update database schema if needed"""
|
||||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||||
@@ -239,7 +254,14 @@ class AsyncDatabaseManager:
|
|||||||
column_names = [column[1] for column in columns]
|
column_names = [column[1] for column in columns]
|
||||||
|
|
||||||
# List of new columns to add
|
# List of new columns to add
|
||||||
new_columns = ['media', 'links', 'metadata', 'screenshot', 'response_headers', 'downloaded_files']
|
new_columns = [
|
||||||
|
"media",
|
||||||
|
"links",
|
||||||
|
"metadata",
|
||||||
|
"screenshot",
|
||||||
|
"response_headers",
|
||||||
|
"downloaded_files",
|
||||||
|
]
|
||||||
|
|
||||||
for column in new_columns:
|
for column in new_columns:
|
||||||
if column not in column_names:
|
if column not in column_names:
|
||||||
@@ -248,22 +270,26 @@ class AsyncDatabaseManager:
|
|||||||
|
|
||||||
async def aalter_db_add_column(self, new_column: str, db):
|
async def aalter_db_add_column(self, new_column: str, db):
|
||||||
"""Add new column to the database"""
|
"""Add new column to the database"""
|
||||||
if new_column == 'response_headers':
|
if new_column == "response_headers":
|
||||||
await db.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"')
|
await db.execute(
|
||||||
|
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"'
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
await db.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
|
await db.execute(
|
||||||
|
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||||
|
)
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
message="Added column '{column}' to the database",
|
message="Added column '{column}' to the database",
|
||||||
tag="INIT",
|
tag="INIT",
|
||||||
params={"column": new_column}
|
params={"column": new_column},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def aget_cached_url(self, url: str) -> Optional[CrawlResult]:
|
async def aget_cached_url(self, url: str) -> Optional[CrawlResult]:
|
||||||
"""Retrieve cached URL data as CrawlResult"""
|
"""Retrieve cached URL data as CrawlResult"""
|
||||||
|
|
||||||
async def _get(db):
|
async def _get(db):
|
||||||
async with db.execute(
|
async with db.execute(
|
||||||
'SELECT * FROM crawled_data WHERE url = ?', (url,)
|
"SELECT * FROM crawled_data WHERE url = ?", (url,)
|
||||||
) as cursor:
|
) as cursor:
|
||||||
row = await cursor.fetchone()
|
row = await cursor.fetchone()
|
||||||
if not row:
|
if not row:
|
||||||
@@ -276,47 +302,68 @@ class AsyncDatabaseManager:
|
|||||||
|
|
||||||
# Load content from files using stored hashes
|
# Load content from files using stored hashes
|
||||||
content_fields = {
|
content_fields = {
|
||||||
'html': row_dict['html'],
|
"html": row_dict["html"],
|
||||||
'cleaned_html': row_dict['cleaned_html'],
|
"cleaned_html": row_dict["cleaned_html"],
|
||||||
'markdown': row_dict['markdown'],
|
"markdown": row_dict["markdown"],
|
||||||
'extracted_content': row_dict['extracted_content'],
|
"extracted_content": row_dict["extracted_content"],
|
||||||
'screenshot': row_dict['screenshot'],
|
"screenshot": row_dict["screenshot"],
|
||||||
'screenshots': row_dict['screenshot'],
|
"screenshots": row_dict["screenshot"],
|
||||||
}
|
}
|
||||||
|
|
||||||
for field, hash_value in content_fields.items():
|
for field, hash_value in content_fields.items():
|
||||||
if hash_value:
|
if hash_value:
|
||||||
content = await self._load_content(
|
content = await self._load_content(
|
||||||
hash_value,
|
hash_value,
|
||||||
field.split('_')[0] # Get content type from field name
|
field.split("_")[0], # Get content type from field name
|
||||||
)
|
)
|
||||||
row_dict[field] = content or ""
|
row_dict[field] = content or ""
|
||||||
else:
|
else:
|
||||||
row_dict[field] = ""
|
row_dict[field] = ""
|
||||||
|
|
||||||
# Parse JSON fields
|
# Parse JSON fields
|
||||||
json_fields = ['media', 'links', 'metadata', 'response_headers', 'markdown']
|
json_fields = [
|
||||||
|
"media",
|
||||||
|
"links",
|
||||||
|
"metadata",
|
||||||
|
"response_headers",
|
||||||
|
"markdown",
|
||||||
|
]
|
||||||
for field in json_fields:
|
for field in json_fields:
|
||||||
try:
|
try:
|
||||||
row_dict[field] = json.loads(row_dict[field]) if row_dict[field] else {}
|
row_dict[field] = (
|
||||||
|
json.loads(row_dict[field]) if row_dict[field] else {}
|
||||||
|
)
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
row_dict[field] = {}
|
# Very UGLY, never mention it to me please
|
||||||
|
if field == "markdown" and isinstance(row_dict[field], str):
|
||||||
|
row_dict[field] = MarkdownGenerationResult(
|
||||||
|
raw_markdown=row_dict[field] or "",
|
||||||
|
markdown_with_citations="",
|
||||||
|
references_markdown="",
|
||||||
|
fit_markdown="",
|
||||||
|
fit_html="",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
row_dict[field] = {}
|
||||||
|
|
||||||
if isinstance(row_dict['markdown'], Dict):
|
if isinstance(row_dict["markdown"], Dict):
|
||||||
row_dict['markdown_v2'] = row_dict['markdown']
|
if row_dict["markdown"].get("raw_markdown"):
|
||||||
if row_dict['markdown'].get('raw_markdown'):
|
row_dict["markdown"] = row_dict["markdown"]["raw_markdown"]
|
||||||
row_dict['markdown'] = row_dict['markdown']['raw_markdown']
|
|
||||||
|
|
||||||
# Parse downloaded_files
|
# Parse downloaded_files
|
||||||
try:
|
try:
|
||||||
row_dict['downloaded_files'] = json.loads(row_dict['downloaded_files']) if row_dict['downloaded_files'] else []
|
row_dict["downloaded_files"] = (
|
||||||
|
json.loads(row_dict["downloaded_files"])
|
||||||
|
if row_dict["downloaded_files"]
|
||||||
|
else []
|
||||||
|
)
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
row_dict['downloaded_files'] = []
|
row_dict["downloaded_files"] = []
|
||||||
|
|
||||||
# Remove any fields not in CrawlResult model
|
# Remove any fields not in CrawlResult model
|
||||||
valid_fields = CrawlResult.__annotations__.keys()
|
valid_fields = CrawlResult.__annotations__.keys()
|
||||||
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
|
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
|
||||||
|
filtered_dict["markdown"] = row_dict["markdown"]
|
||||||
return CrawlResult(**filtered_dict)
|
return CrawlResult(**filtered_dict)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -326,7 +373,7 @@ class AsyncDatabaseManager:
|
|||||||
message="Error retrieving cached URL: {error}",
|
message="Error retrieving cached URL: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
force_verbose=True,
|
force_verbose=True,
|
||||||
params={"error": str(e)}
|
params={"error": str(e)},
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -334,37 +381,52 @@ class AsyncDatabaseManager:
|
|||||||
"""Cache CrawlResult data"""
|
"""Cache CrawlResult data"""
|
||||||
# Store content files and get hashes
|
# Store content files and get hashes
|
||||||
content_map = {
|
content_map = {
|
||||||
'html': (result.html, 'html'),
|
"html": (result.html, "html"),
|
||||||
'cleaned_html': (result.cleaned_html or "", 'cleaned'),
|
"cleaned_html": (result.cleaned_html or "", "cleaned"),
|
||||||
'markdown': None,
|
"markdown": None,
|
||||||
'extracted_content': (result.extracted_content or "", 'extracted'),
|
"extracted_content": (result.extracted_content or "", "extracted"),
|
||||||
'screenshot': (result.screenshot or "", 'screenshots')
|
"screenshot": (result.screenshot or "", "screenshots"),
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if isinstance(result.markdown, MarkdownGenerationResult):
|
if isinstance(result.markdown, StringCompatibleMarkdown):
|
||||||
content_map['markdown'] = (result.markdown.model_dump_json(), 'markdown')
|
content_map["markdown"] = (
|
||||||
elif hasattr(result, 'markdown_v2'):
|
result.markdown,
|
||||||
content_map['markdown'] = (result.markdown_v2.model_dump_json(), 'markdown')
|
"markdown",
|
||||||
|
)
|
||||||
|
elif isinstance(result.markdown, MarkdownGenerationResult):
|
||||||
|
content_map["markdown"] = (
|
||||||
|
result.markdown.model_dump_json(),
|
||||||
|
"markdown",
|
||||||
|
)
|
||||||
elif isinstance(result.markdown, str):
|
elif isinstance(result.markdown, str):
|
||||||
markdown_result = MarkdownGenerationResult(raw_markdown=result.markdown)
|
markdown_result = MarkdownGenerationResult(raw_markdown=result.markdown)
|
||||||
content_map['markdown'] = (markdown_result.model_dump_json(), 'markdown')
|
content_map["markdown"] = (
|
||||||
|
markdown_result.model_dump_json(),
|
||||||
|
"markdown",
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
content_map['markdown'] = (MarkdownGenerationResult().model_dump_json(), 'markdown')
|
content_map["markdown"] = (
|
||||||
|
MarkdownGenerationResult().model_dump_json(),
|
||||||
|
"markdown",
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning(
|
self.logger.warning(
|
||||||
message=f"Error processing markdown content: {str(e)}",
|
message=f"Error processing markdown content: {str(e)}", tag="WARNING"
|
||||||
tag="WARNING"
|
|
||||||
)
|
)
|
||||||
# Fallback to empty markdown result
|
# Fallback to empty markdown result
|
||||||
content_map['markdown'] = (MarkdownGenerationResult().model_dump_json(), 'markdown')
|
content_map["markdown"] = (
|
||||||
|
MarkdownGenerationResult().model_dump_json(),
|
||||||
|
"markdown",
|
||||||
|
)
|
||||||
|
|
||||||
content_hashes = {}
|
content_hashes = {}
|
||||||
for field, (content, content_type) in content_map.items():
|
for field, (content, content_type) in content_map.items():
|
||||||
content_hashes[field] = await self._store_content(content, content_type)
|
content_hashes[field] = await self._store_content(content, content_type)
|
||||||
|
|
||||||
async def _cache(db):
|
async def _cache(db):
|
||||||
await db.execute('''
|
await db.execute(
|
||||||
|
"""
|
||||||
INSERT INTO crawled_data (
|
INSERT INTO crawled_data (
|
||||||
url, html, cleaned_html, markdown,
|
url, html, cleaned_html, markdown,
|
||||||
extracted_content, success, media, links, metadata,
|
extracted_content, success, media, links, metadata,
|
||||||
@@ -383,20 +445,22 @@ class AsyncDatabaseManager:
|
|||||||
screenshot = excluded.screenshot,
|
screenshot = excluded.screenshot,
|
||||||
response_headers = excluded.response_headers,
|
response_headers = excluded.response_headers,
|
||||||
downloaded_files = excluded.downloaded_files
|
downloaded_files = excluded.downloaded_files
|
||||||
''', (
|
""",
|
||||||
result.url,
|
(
|
||||||
content_hashes['html'],
|
result.url,
|
||||||
content_hashes['cleaned_html'],
|
content_hashes["html"],
|
||||||
content_hashes['markdown'],
|
content_hashes["cleaned_html"],
|
||||||
content_hashes['extracted_content'],
|
content_hashes["markdown"],
|
||||||
result.success,
|
content_hashes["extracted_content"],
|
||||||
json.dumps(result.media),
|
result.success,
|
||||||
json.dumps(result.links),
|
json.dumps(result.media),
|
||||||
json.dumps(result.metadata or {}),
|
json.dumps(result.links),
|
||||||
content_hashes['screenshot'],
|
json.dumps(result.metadata or {}),
|
||||||
json.dumps(result.response_headers or {}),
|
content_hashes["screenshot"],
|
||||||
json.dumps(result.downloaded_files or [])
|
json.dumps(result.response_headers or {}),
|
||||||
))
|
json.dumps(result.downloaded_files or []),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.execute_with_retry(_cache)
|
await self.execute_with_retry(_cache)
|
||||||
@@ -405,14 +469,14 @@ class AsyncDatabaseManager:
|
|||||||
message="Error caching URL: {error}",
|
message="Error caching URL: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
force_verbose=True,
|
force_verbose=True,
|
||||||
params={"error": str(e)}
|
params={"error": str(e)},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def aget_total_count(self) -> int:
|
async def aget_total_count(self) -> int:
|
||||||
"""Get total number of cached URLs"""
|
"""Get total number of cached URLs"""
|
||||||
|
|
||||||
async def _count(db):
|
async def _count(db):
|
||||||
async with db.execute('SELECT COUNT(*) FROM crawled_data') as cursor:
|
async with db.execute("SELECT COUNT(*) FROM crawled_data") as cursor:
|
||||||
result = await cursor.fetchone()
|
result = await cursor.fetchone()
|
||||||
return result[0] if result else 0
|
return result[0] if result else 0
|
||||||
|
|
||||||
@@ -423,14 +487,15 @@ class AsyncDatabaseManager:
|
|||||||
message="Error getting total count: {error}",
|
message="Error getting total count: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
force_verbose=True,
|
force_verbose=True,
|
||||||
params={"error": str(e)}
|
params={"error": str(e)},
|
||||||
)
|
)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
async def aclear_db(self):
|
async def aclear_db(self):
|
||||||
"""Clear all data from the database"""
|
"""Clear all data from the database"""
|
||||||
|
|
||||||
async def _clear(db):
|
async def _clear(db):
|
||||||
await db.execute('DELETE FROM crawled_data')
|
await db.execute("DELETE FROM crawled_data")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.execute_with_retry(_clear)
|
await self.execute_with_retry(_clear)
|
||||||
@@ -439,13 +504,14 @@ class AsyncDatabaseManager:
|
|||||||
message="Error clearing database: {error}",
|
message="Error clearing database: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
force_verbose=True,
|
force_verbose=True,
|
||||||
params={"error": str(e)}
|
params={"error": str(e)},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def aflush_db(self):
|
async def aflush_db(self):
|
||||||
"""Drop the entire table"""
|
"""Drop the entire table"""
|
||||||
|
|
||||||
async def _flush(db):
|
async def _flush(db):
|
||||||
await db.execute('DROP TABLE IF EXISTS crawled_data')
|
await db.execute("DROP TABLE IF EXISTS crawled_data")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.execute_with_retry(_flush)
|
await self.execute_with_retry(_flush)
|
||||||
@@ -454,10 +520,9 @@ class AsyncDatabaseManager:
|
|||||||
message="Error flushing database: {error}",
|
message="Error flushing database: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
force_verbose=True,
|
force_verbose=True,
|
||||||
params={"error": str(e)}
|
params={"error": str(e)},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def _store_content(self, content: str, content_type: str) -> str:
|
async def _store_content(self, content: str, content_type: str) -> str:
|
||||||
"""Store content in filesystem and return hash"""
|
"""Store content in filesystem and return hash"""
|
||||||
if not content:
|
if not content:
|
||||||
@@ -468,28 +533,31 @@ class AsyncDatabaseManager:
|
|||||||
|
|
||||||
# Only write if file doesn't exist
|
# Only write if file doesn't exist
|
||||||
if not os.path.exists(file_path):
|
if not os.path.exists(file_path):
|
||||||
async with aiofiles.open(file_path, 'w', encoding='utf-8') as f:
|
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||||
await f.write(content)
|
await f.write(content)
|
||||||
|
|
||||||
return content_hash
|
return content_hash
|
||||||
|
|
||||||
async def _load_content(self, content_hash: str, content_type: str) -> Optional[str]:
|
async def _load_content(
|
||||||
|
self, content_hash: str, content_type: str
|
||||||
|
) -> Optional[str]:
|
||||||
"""Load content from filesystem by hash"""
|
"""Load content from filesystem by hash"""
|
||||||
if not content_hash:
|
if not content_hash:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||||
try:
|
try:
|
||||||
async with aiofiles.open(file_path, 'r', encoding='utf-8') as f:
|
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
||||||
return await f.read()
|
return await f.read()
|
||||||
except:
|
except:
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
message="Failed to load content: {file_path}",
|
message="Failed to load content: {file_path}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
force_verbose=True,
|
force_verbose=True,
|
||||||
params={"file_path": file_path}
|
params={"file_path": file_path},
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
# Create a singleton instance
|
# Create a singleton instance
|
||||||
async_db_manager = AsyncDatabaseManager()
|
async_db_manager = AsyncDatabaseManager()
|
||||||
|
|||||||
647
crawl4ai/async_dispatcher.py
Normal file
647
crawl4ai/async_dispatcher.py
Normal file
@@ -0,0 +1,647 @@
|
|||||||
|
from typing import Dict, Optional, List, Tuple
|
||||||
|
from .async_configs import CrawlerRunConfig
|
||||||
|
from .models import (
|
||||||
|
CrawlResult,
|
||||||
|
CrawlerTaskResult,
|
||||||
|
CrawlStatus,
|
||||||
|
DisplayMode,
|
||||||
|
CrawlStats,
|
||||||
|
DomainState,
|
||||||
|
)
|
||||||
|
|
||||||
|
from rich.live import Live
|
||||||
|
from rich.table import Table
|
||||||
|
from rich.console import Console
|
||||||
|
from rich import box
|
||||||
|
from datetime import timedelta
|
||||||
|
from collections.abc import AsyncGenerator
|
||||||
|
import time
|
||||||
|
import psutil
|
||||||
|
import asyncio
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
import random
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from math import inf as infinity
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimiter:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
base_delay: Tuple[float, float] = (1.0, 3.0),
|
||||||
|
max_delay: float = 60.0,
|
||||||
|
max_retries: int = 3,
|
||||||
|
rate_limit_codes: List[int] = None,
|
||||||
|
):
|
||||||
|
self.base_delay = base_delay
|
||||||
|
self.max_delay = max_delay
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.rate_limit_codes = rate_limit_codes or [429, 503]
|
||||||
|
self.domains: Dict[str, DomainState] = {}
|
||||||
|
|
||||||
|
def get_domain(self, url: str) -> str:
|
||||||
|
return urlparse(url).netloc
|
||||||
|
|
||||||
|
async def wait_if_needed(self, url: str) -> None:
|
||||||
|
domain = self.get_domain(url)
|
||||||
|
state = self.domains.get(domain)
|
||||||
|
|
||||||
|
if not state:
|
||||||
|
self.domains[domain] = DomainState()
|
||||||
|
state = self.domains[domain]
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
if state.last_request_time:
|
||||||
|
wait_time = max(0, state.current_delay - (now - state.last_request_time))
|
||||||
|
if wait_time > 0:
|
||||||
|
await asyncio.sleep(wait_time)
|
||||||
|
|
||||||
|
# Random delay within base range if no current delay
|
||||||
|
if state.current_delay == 0:
|
||||||
|
state.current_delay = random.uniform(*self.base_delay)
|
||||||
|
|
||||||
|
state.last_request_time = time.time()
|
||||||
|
|
||||||
|
def update_delay(self, url: str, status_code: int) -> bool:
|
||||||
|
domain = self.get_domain(url)
|
||||||
|
state = self.domains[domain]
|
||||||
|
|
||||||
|
if status_code in self.rate_limit_codes:
|
||||||
|
state.fail_count += 1
|
||||||
|
if state.fail_count > self.max_retries:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Exponential backoff with random jitter
|
||||||
|
state.current_delay = min(
|
||||||
|
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Gradually reduce delay on success
|
||||||
|
state.current_delay = max(
|
||||||
|
random.uniform(*self.base_delay), state.current_delay * 0.75
|
||||||
|
)
|
||||||
|
state.fail_count = 0
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class CrawlerMonitor:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
max_visible_rows: int = 15,
|
||||||
|
display_mode: DisplayMode = DisplayMode.DETAILED,
|
||||||
|
):
|
||||||
|
self.console = Console()
|
||||||
|
self.max_visible_rows = max_visible_rows
|
||||||
|
self.display_mode = display_mode
|
||||||
|
self.stats: Dict[str, CrawlStats] = {}
|
||||||
|
self.process = psutil.Process()
|
||||||
|
self.start_time = time.time()
|
||||||
|
self.live = Live(self._create_table(), refresh_per_second=2)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.live.start()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.live.stop()
|
||||||
|
|
||||||
|
def add_task(self, task_id: str, url: str):
|
||||||
|
self.stats[task_id] = CrawlStats(
|
||||||
|
task_id=task_id, url=url, status=CrawlStatus.QUEUED
|
||||||
|
)
|
||||||
|
self.live.update(self._create_table())
|
||||||
|
|
||||||
|
def update_task(self, task_id: str, **kwargs):
|
||||||
|
if task_id in self.stats:
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
setattr(self.stats[task_id], key, value)
|
||||||
|
self.live.update(self._create_table())
|
||||||
|
|
||||||
|
def _create_aggregated_table(self) -> Table:
|
||||||
|
"""Creates a compact table showing only aggregated statistics"""
|
||||||
|
table = Table(
|
||||||
|
box=box.ROUNDED,
|
||||||
|
title="Crawler Status Overview",
|
||||||
|
title_style="bold magenta",
|
||||||
|
header_style="bold blue",
|
||||||
|
show_lines=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate statistics
|
||||||
|
total_tasks = len(self.stats)
|
||||||
|
queued = sum(
|
||||||
|
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
|
||||||
|
)
|
||||||
|
in_progress = sum(
|
||||||
|
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||||
|
)
|
||||||
|
completed = sum(
|
||||||
|
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||||
|
)
|
||||||
|
failed = sum(
|
||||||
|
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||||
|
)
|
||||||
|
|
||||||
|
# Memory statistics
|
||||||
|
current_memory = self.process.memory_info().rss / (1024 * 1024)
|
||||||
|
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||||
|
peak_memory = max(
|
||||||
|
(stat.peak_memory for stat in self.stats.values()), default=0.0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Duration
|
||||||
|
duration = time.time() - self.start_time
|
||||||
|
|
||||||
|
# Create status row
|
||||||
|
table.add_column("Status", style="bold cyan")
|
||||||
|
table.add_column("Count", justify="right")
|
||||||
|
table.add_column("Percentage", justify="right")
|
||||||
|
|
||||||
|
table.add_row("Total Tasks", str(total_tasks), "100%")
|
||||||
|
table.add_row(
|
||||||
|
"[yellow]In Queue[/yellow]",
|
||||||
|
str(queued),
|
||||||
|
f"{(queued / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||||
|
)
|
||||||
|
table.add_row(
|
||||||
|
"[blue]In Progress[/blue]",
|
||||||
|
str(in_progress),
|
||||||
|
f"{(in_progress / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||||
|
)
|
||||||
|
table.add_row(
|
||||||
|
"[green]Completed[/green]",
|
||||||
|
str(completed),
|
||||||
|
f"{(completed / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||||
|
)
|
||||||
|
table.add_row(
|
||||||
|
"[red]Failed[/red]",
|
||||||
|
str(failed),
|
||||||
|
f"{(failed / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add memory information
|
||||||
|
table.add_section()
|
||||||
|
table.add_row(
|
||||||
|
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
|
||||||
|
)
|
||||||
|
table.add_row(
|
||||||
|
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
|
||||||
|
)
|
||||||
|
table.add_row(
|
||||||
|
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
|
||||||
|
)
|
||||||
|
table.add_row(
|
||||||
|
"[yellow]Runtime[/yellow]",
|
||||||
|
str(timedelta(seconds=int(duration))),
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
|
||||||
|
return table
|
||||||
|
|
||||||
|
def _create_detailed_table(self) -> Table:
|
||||||
|
table = Table(
|
||||||
|
box=box.ROUNDED,
|
||||||
|
title="Crawler Performance Monitor",
|
||||||
|
title_style="bold magenta",
|
||||||
|
header_style="bold blue",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add columns
|
||||||
|
table.add_column("Task ID", style="cyan", no_wrap=True)
|
||||||
|
table.add_column("URL", style="cyan", no_wrap=True)
|
||||||
|
table.add_column("Status", style="bold")
|
||||||
|
table.add_column("Memory (MB)", justify="right")
|
||||||
|
table.add_column("Peak (MB)", justify="right")
|
||||||
|
table.add_column("Duration", justify="right")
|
||||||
|
table.add_column("Info", style="italic")
|
||||||
|
|
||||||
|
# Add summary row
|
||||||
|
total_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||||
|
active_count = sum(
|
||||||
|
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||||
|
)
|
||||||
|
completed_count = sum(
|
||||||
|
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||||
|
)
|
||||||
|
failed_count = sum(
|
||||||
|
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||||
|
)
|
||||||
|
|
||||||
|
table.add_row(
|
||||||
|
"[bold yellow]SUMMARY",
|
||||||
|
f"Total: {len(self.stats)}",
|
||||||
|
f"Active: {active_count}",
|
||||||
|
f"{total_memory:.1f}",
|
||||||
|
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
|
||||||
|
str(
|
||||||
|
timedelta(
|
||||||
|
seconds=int(time.time() - self.start_time)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
f"✓{completed_count} ✗{failed_count}",
|
||||||
|
style="bold",
|
||||||
|
)
|
||||||
|
|
||||||
|
table.add_section()
|
||||||
|
|
||||||
|
# Add rows for each task
|
||||||
|
visible_stats = sorted(
|
||||||
|
self.stats.values(),
|
||||||
|
key=lambda x: (
|
||||||
|
x.status != CrawlStatus.IN_PROGRESS,
|
||||||
|
x.status != CrawlStatus.QUEUED,
|
||||||
|
x.end_time or infinity,
|
||||||
|
),
|
||||||
|
)[: self.max_visible_rows]
|
||||||
|
|
||||||
|
for stat in visible_stats:
|
||||||
|
status_style = {
|
||||||
|
CrawlStatus.QUEUED: "white",
|
||||||
|
CrawlStatus.IN_PROGRESS: "yellow",
|
||||||
|
CrawlStatus.COMPLETED: "green",
|
||||||
|
CrawlStatus.FAILED: "red",
|
||||||
|
}[stat.status]
|
||||||
|
|
||||||
|
table.add_row(
|
||||||
|
stat.task_id[:8], # Show first 8 chars of task ID
|
||||||
|
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
|
||||||
|
f"[{status_style}]{stat.status.value}[/{status_style}]",
|
||||||
|
f"{stat.memory_usage:.1f}",
|
||||||
|
f"{stat.peak_memory:.1f}",
|
||||||
|
stat.duration,
|
||||||
|
stat.error_message[:40] if stat.error_message else "",
|
||||||
|
)
|
||||||
|
|
||||||
|
return table
|
||||||
|
|
||||||
|
def _create_table(self) -> Table:
|
||||||
|
"""Creates the appropriate table based on display mode"""
|
||||||
|
if self.display_mode == DisplayMode.AGGREGATED:
|
||||||
|
return self._create_aggregated_table()
|
||||||
|
return self._create_detailed_table()
|
||||||
|
|
||||||
|
|
||||||
|
class BaseDispatcher(ABC):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
rate_limiter: Optional[RateLimiter] = None,
|
||||||
|
monitor: Optional[CrawlerMonitor] = None,
|
||||||
|
):
|
||||||
|
self.crawler = None
|
||||||
|
self._domain_last_hit: Dict[str, float] = {}
|
||||||
|
self.concurrent_sessions = 0
|
||||||
|
self.rate_limiter = rate_limiter
|
||||||
|
self.monitor = monitor
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def crawl_url(
|
||||||
|
self,
|
||||||
|
url: str,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
task_id: str,
|
||||||
|
monitor: Optional[CrawlerMonitor] = None,
|
||||||
|
) -> CrawlerTaskResult:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def run_urls(
|
||||||
|
self,
|
||||||
|
urls: List[str],
|
||||||
|
crawler: "AsyncWebCrawler", # noqa: F821
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
monitor: Optional[CrawlerMonitor] = None,
|
||||||
|
) -> List[CrawlerTaskResult]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
memory_threshold_percent: float = 90.0,
|
||||||
|
check_interval: float = 1.0,
|
||||||
|
max_session_permit: int = 20,
|
||||||
|
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
|
||||||
|
rate_limiter: Optional[RateLimiter] = None,
|
||||||
|
monitor: Optional[CrawlerMonitor] = None,
|
||||||
|
):
|
||||||
|
super().__init__(rate_limiter, monitor)
|
||||||
|
self.memory_threshold_percent = memory_threshold_percent
|
||||||
|
self.check_interval = check_interval
|
||||||
|
self.max_session_permit = max_session_permit
|
||||||
|
self.memory_wait_timeout = memory_wait_timeout
|
||||||
|
self.result_queue = asyncio.Queue() # Queue for storing results
|
||||||
|
|
||||||
|
async def crawl_url(
|
||||||
|
self,
|
||||||
|
url: str,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
task_id: str,
|
||||||
|
) -> CrawlerTaskResult:
|
||||||
|
start_time = time.time()
|
||||||
|
error_message = ""
|
||||||
|
memory_usage = peak_memory = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(
|
||||||
|
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||||
|
)
|
||||||
|
self.concurrent_sessions += 1
|
||||||
|
|
||||||
|
if self.rate_limiter:
|
||||||
|
await self.rate_limiter.wait_if_needed(url)
|
||||||
|
|
||||||
|
process = psutil.Process()
|
||||||
|
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||||
|
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||||
|
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||||
|
|
||||||
|
memory_usage = peak_memory = end_memory - start_memory
|
||||||
|
|
||||||
|
if self.rate_limiter and result.status_code:
|
||||||
|
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||||
|
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||||
|
result = CrawlerTaskResult(
|
||||||
|
task_id=task_id,
|
||||||
|
url=url,
|
||||||
|
result=result,
|
||||||
|
memory_usage=memory_usage,
|
||||||
|
peak_memory=peak_memory,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=time.time(),
|
||||||
|
error_message=error_message,
|
||||||
|
)
|
||||||
|
await self.result_queue.put(result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
error_message = result.error_message
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||||
|
elif self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_message = str(e)
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||||
|
result = CrawlResult(
|
||||||
|
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||||
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
end_time = time.time()
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(
|
||||||
|
task_id,
|
||||||
|
end_time=end_time,
|
||||||
|
memory_usage=memory_usage,
|
||||||
|
peak_memory=peak_memory,
|
||||||
|
error_message=error_message,
|
||||||
|
)
|
||||||
|
self.concurrent_sessions -= 1
|
||||||
|
|
||||||
|
return CrawlerTaskResult(
|
||||||
|
task_id=task_id,
|
||||||
|
url=url,
|
||||||
|
result=result,
|
||||||
|
memory_usage=memory_usage,
|
||||||
|
peak_memory=peak_memory,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
error_message=error_message,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run_urls(
|
||||||
|
self,
|
||||||
|
urls: List[str],
|
||||||
|
crawler: "AsyncWebCrawler", # noqa: F821
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> List[CrawlerTaskResult]:
|
||||||
|
self.crawler = crawler
|
||||||
|
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
pending_tasks = []
|
||||||
|
active_tasks = []
|
||||||
|
task_queue = []
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
task_id = str(uuid.uuid4())
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.add_task(task_id, url)
|
||||||
|
task_queue.append((url, task_id))
|
||||||
|
|
||||||
|
while task_queue or active_tasks:
|
||||||
|
wait_start_time = time.time()
|
||||||
|
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||||
|
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||||
|
# Check if we've exceeded the timeout
|
||||||
|
if time.time() - wait_start_time > self.memory_wait_timeout:
|
||||||
|
raise MemoryError(
|
||||||
|
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
|
||||||
|
)
|
||||||
|
await asyncio.sleep(self.check_interval)
|
||||||
|
continue
|
||||||
|
|
||||||
|
url, task_id = task_queue.pop(0)
|
||||||
|
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||||
|
active_tasks.append(task)
|
||||||
|
|
||||||
|
if not active_tasks:
|
||||||
|
await asyncio.sleep(self.check_interval)
|
||||||
|
continue
|
||||||
|
|
||||||
|
done, pending = await asyncio.wait(
|
||||||
|
active_tasks, return_when=asyncio.FIRST_COMPLETED
|
||||||
|
)
|
||||||
|
|
||||||
|
pending_tasks.extend(done)
|
||||||
|
active_tasks = list(pending)
|
||||||
|
|
||||||
|
return await asyncio.gather(*pending_tasks)
|
||||||
|
finally:
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.stop()
|
||||||
|
|
||||||
|
async def run_urls_stream(
|
||||||
|
self,
|
||||||
|
urls: List[str],
|
||||||
|
crawler: "AsyncWebCrawler", # noqa: F821
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||||
|
self.crawler = crawler
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
active_tasks = []
|
||||||
|
task_queue = []
|
||||||
|
completed_count = 0
|
||||||
|
total_urls = len(urls)
|
||||||
|
|
||||||
|
# Initialize task queue
|
||||||
|
for url in urls:
|
||||||
|
task_id = str(uuid.uuid4())
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.add_task(task_id, url)
|
||||||
|
task_queue.append((url, task_id))
|
||||||
|
|
||||||
|
while completed_count < total_urls:
|
||||||
|
# Start new tasks if memory permits
|
||||||
|
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||||
|
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||||
|
await asyncio.sleep(self.check_interval)
|
||||||
|
continue
|
||||||
|
|
||||||
|
url, task_id = task_queue.pop(0)
|
||||||
|
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||||
|
active_tasks.append(task)
|
||||||
|
|
||||||
|
if not active_tasks and not task_queue:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Wait for any task to complete and yield results
|
||||||
|
if active_tasks:
|
||||||
|
done, pending = await asyncio.wait(
|
||||||
|
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||||
|
)
|
||||||
|
for completed_task in done:
|
||||||
|
result = await completed_task
|
||||||
|
completed_count += 1
|
||||||
|
yield result
|
||||||
|
active_tasks = list(pending)
|
||||||
|
else:
|
||||||
|
await asyncio.sleep(self.check_interval)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class SemaphoreDispatcher(BaseDispatcher):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
semaphore_count: int = 5,
|
||||||
|
max_session_permit: int = 20,
|
||||||
|
rate_limiter: Optional[RateLimiter] = None,
|
||||||
|
monitor: Optional[CrawlerMonitor] = None,
|
||||||
|
):
|
||||||
|
super().__init__(rate_limiter, monitor)
|
||||||
|
self.semaphore_count = semaphore_count
|
||||||
|
self.max_session_permit = max_session_permit
|
||||||
|
|
||||||
|
async def crawl_url(
|
||||||
|
self,
|
||||||
|
url: str,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
task_id: str,
|
||||||
|
semaphore: asyncio.Semaphore = None,
|
||||||
|
) -> CrawlerTaskResult:
|
||||||
|
start_time = time.time()
|
||||||
|
error_message = ""
|
||||||
|
memory_usage = peak_memory = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(
|
||||||
|
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.rate_limiter:
|
||||||
|
await self.rate_limiter.wait_if_needed(url)
|
||||||
|
|
||||||
|
async with semaphore:
|
||||||
|
process = psutil.Process()
|
||||||
|
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||||
|
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||||
|
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||||
|
|
||||||
|
memory_usage = peak_memory = end_memory - start_memory
|
||||||
|
|
||||||
|
if self.rate_limiter and result.status_code:
|
||||||
|
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||||
|
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||||
|
return CrawlerTaskResult(
|
||||||
|
task_id=task_id,
|
||||||
|
url=url,
|
||||||
|
result=result,
|
||||||
|
memory_usage=memory_usage,
|
||||||
|
peak_memory=peak_memory,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=time.time(),
|
||||||
|
error_message=error_message,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
error_message = result.error_message
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||||
|
elif self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_message = str(e)
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||||
|
result = CrawlResult(
|
||||||
|
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||||
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
end_time = time.time()
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.update_task(
|
||||||
|
task_id,
|
||||||
|
end_time=end_time,
|
||||||
|
memory_usage=memory_usage,
|
||||||
|
peak_memory=peak_memory,
|
||||||
|
error_message=error_message,
|
||||||
|
)
|
||||||
|
|
||||||
|
return CrawlerTaskResult(
|
||||||
|
task_id=task_id,
|
||||||
|
url=url,
|
||||||
|
result=result,
|
||||||
|
memory_usage=memory_usage,
|
||||||
|
peak_memory=peak_memory,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
error_message=error_message,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run_urls(
|
||||||
|
self,
|
||||||
|
crawler: "AsyncWebCrawler", # noqa: F821
|
||||||
|
urls: List[str],
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> List[CrawlerTaskResult]:
|
||||||
|
self.crawler = crawler
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
semaphore = asyncio.Semaphore(self.semaphore_count)
|
||||||
|
tasks = []
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
task_id = str(uuid.uuid4())
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.add_task(task_id, url)
|
||||||
|
task = asyncio.create_task(
|
||||||
|
self.crawl_url(url, config, task_id, semaphore)
|
||||||
|
)
|
||||||
|
tasks.append(task)
|
||||||
|
|
||||||
|
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||||
|
finally:
|
||||||
|
if self.monitor:
|
||||||
|
self.monitor.stop()
|
||||||
@@ -1,10 +1,11 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional, Dict, Any, Union
|
from typing import Optional, Dict, Any
|
||||||
from colorama import Fore, Back, Style, init
|
from colorama import Fore, Style, init
|
||||||
import time
|
|
||||||
import os
|
import os
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
class LogLevel(Enum):
|
class LogLevel(Enum):
|
||||||
DEBUG = 1
|
DEBUG = 1
|
||||||
INFO = 2
|
INFO = 2
|
||||||
@@ -12,23 +13,54 @@ class LogLevel(Enum):
|
|||||||
WARNING = 4
|
WARNING = 4
|
||||||
ERROR = 5
|
ERROR = 5
|
||||||
|
|
||||||
class AsyncLogger:
|
|
||||||
|
|
||||||
|
class AsyncLoggerBase(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 50):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 50):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class AsyncLogger(AsyncLoggerBase):
|
||||||
"""
|
"""
|
||||||
Asynchronous logger with support for colored console output and file logging.
|
Asynchronous logger with support for colored console output and file logging.
|
||||||
Supports templated messages with colored components.
|
Supports templated messages with colored components.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DEFAULT_ICONS = {
|
DEFAULT_ICONS = {
|
||||||
'INIT': '→',
|
"INIT": "→",
|
||||||
'READY': '✓',
|
"READY": "✓",
|
||||||
'FETCH': '↓',
|
"FETCH": "↓",
|
||||||
'SCRAPE': '◆',
|
"SCRAPE": "◆",
|
||||||
'EXTRACT': '■',
|
"EXTRACT": "■",
|
||||||
'COMPLETE': '●',
|
"COMPLETE": "●",
|
||||||
'ERROR': '×',
|
"ERROR": "×",
|
||||||
'DEBUG': '⋯',
|
"DEBUG": "⋯",
|
||||||
'INFO': 'ℹ',
|
"INFO": "ℹ",
|
||||||
'WARNING': '⚠',
|
"WARNING": "⚠",
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_COLORS = {
|
DEFAULT_COLORS = {
|
||||||
@@ -46,7 +78,7 @@ class AsyncLogger:
|
|||||||
tag_width: int = 10,
|
tag_width: int = 10,
|
||||||
icons: Optional[Dict[str, str]] = None,
|
icons: Optional[Dict[str, str]] = None,
|
||||||
colors: Optional[Dict[LogLevel, str]] = None,
|
colors: Optional[Dict[LogLevel, str]] = None,
|
||||||
verbose: bool = True
|
verbose: bool = True,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize the logger.
|
Initialize the logger.
|
||||||
@@ -77,18 +109,20 @@ class AsyncLogger:
|
|||||||
|
|
||||||
def _get_icon(self, tag: str) -> str:
|
def _get_icon(self, tag: str) -> str:
|
||||||
"""Get the icon for a tag, defaulting to info icon if not found."""
|
"""Get the icon for a tag, defaulting to info icon if not found."""
|
||||||
return self.icons.get(tag, self.icons['INFO'])
|
return self.icons.get(tag, self.icons["INFO"])
|
||||||
|
|
||||||
def _write_to_file(self, message: str):
|
def _write_to_file(self, message: str):
|
||||||
"""Write a message to the log file if configured."""
|
"""Write a message to the log file if configured."""
|
||||||
if self.log_file:
|
if self.log_file:
|
||||||
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||||
with open(self.log_file, 'a', encoding='utf-8') as f:
|
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||||
# Strip ANSI color codes for file output
|
# Strip ANSI color codes for file output
|
||||||
clean_message = message.replace(Fore.RESET, '').replace(Style.RESET_ALL, '')
|
clean_message = message.replace(Fore.RESET, "").replace(
|
||||||
|
Style.RESET_ALL, ""
|
||||||
|
)
|
||||||
for color in vars(Fore).values():
|
for color in vars(Fore).values():
|
||||||
if isinstance(color, str):
|
if isinstance(color, str):
|
||||||
clean_message = clean_message.replace(color, '')
|
clean_message = clean_message.replace(color, "")
|
||||||
f.write(f"[{timestamp}] {clean_message}\n")
|
f.write(f"[{timestamp}] {clean_message}\n")
|
||||||
|
|
||||||
def _log(
|
def _log(
|
||||||
@@ -99,7 +133,7 @@ class AsyncLogger:
|
|||||||
params: Optional[Dict[str, Any]] = None,
|
params: Optional[Dict[str, Any]] = None,
|
||||||
colors: Optional[Dict[str, str]] = None,
|
colors: Optional[Dict[str, str]] = None,
|
||||||
base_color: Optional[str] = None,
|
base_color: Optional[str] = None,
|
||||||
**kwargs
|
**kwargs,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Core logging method that handles message formatting and output.
|
Core logging method that handles message formatting and output.
|
||||||
@@ -128,12 +162,13 @@ class AsyncLogger:
|
|||||||
if key in params:
|
if key in params:
|
||||||
value_str = str(params[key])
|
value_str = str(params[key])
|
||||||
formatted_message = formatted_message.replace(
|
formatted_message = formatted_message.replace(
|
||||||
value_str,
|
value_str, f"{color}{value_str}{Style.RESET_ALL}"
|
||||||
f"{color}{value_str}{Style.RESET_ALL}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
formatted_message = f"LOGGING ERROR: Missing parameter {e} in message template"
|
formatted_message = (
|
||||||
|
f"LOGGING ERROR: Missing parameter {e} in message template"
|
||||||
|
)
|
||||||
level = LogLevel.ERROR
|
level = LogLevel.ERROR
|
||||||
else:
|
else:
|
||||||
formatted_message = message
|
formatted_message = message
|
||||||
@@ -175,7 +210,7 @@ class AsyncLogger:
|
|||||||
success: bool,
|
success: bool,
|
||||||
timing: float,
|
timing: float,
|
||||||
tag: str = "FETCH",
|
tag: str = "FETCH",
|
||||||
url_length: int = 50
|
url_length: int = 50,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Convenience method for logging URL fetch status.
|
Convenience method for logging URL fetch status.
|
||||||
@@ -195,20 +230,16 @@ class AsyncLogger:
|
|||||||
"url": url,
|
"url": url,
|
||||||
"url_length": url_length,
|
"url_length": url_length,
|
||||||
"status": success,
|
"status": success,
|
||||||
"timing": timing
|
"timing": timing,
|
||||||
},
|
},
|
||||||
colors={
|
colors={
|
||||||
"status": Fore.GREEN if success else Fore.RED,
|
"status": Fore.GREEN if success else Fore.RED,
|
||||||
"timing": Fore.YELLOW
|
"timing": Fore.YELLOW,
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def error_status(
|
def error_status(
|
||||||
self,
|
self, url: str, error: str, tag: str = "ERROR", url_length: int = 50
|
||||||
url: str,
|
|
||||||
error: str,
|
|
||||||
tag: str = "ERROR",
|
|
||||||
url_length: int = 50
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Convenience method for logging error status.
|
Convenience method for logging error status.
|
||||||
@@ -223,9 +254,57 @@ class AsyncLogger:
|
|||||||
level=LogLevel.ERROR,
|
level=LogLevel.ERROR,
|
||||||
message="{url:.{url_length}}... | Error: {error}",
|
message="{url:.{url_length}}... | Error: {error}",
|
||||||
tag=tag,
|
tag=tag,
|
||||||
params={
|
params={"url": url, "url_length": url_length, "error": error},
|
||||||
"url": url,
|
|
||||||
"url_length": url_length,
|
|
||||||
"error": error
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
class AsyncFileLogger(AsyncLoggerBase):
|
||||||
|
"""
|
||||||
|
File-only asynchronous logger that writes logs to a specified file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_file: str):
|
||||||
|
"""
|
||||||
|
Initialize the file logger.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
log_file: File path for logging
|
||||||
|
"""
|
||||||
|
self.log_file = log_file
|
||||||
|
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
|
||||||
|
|
||||||
|
def _write_to_file(self, level: str, message: str, tag: str):
|
||||||
|
"""Write a message to the log file."""
|
||||||
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||||
|
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||||
|
f.write(f"[{timestamp}] [{level}] [{tag}] {message}\n")
|
||||||
|
|
||||||
|
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||||
|
"""Log a debug message to file."""
|
||||||
|
self._write_to_file("DEBUG", message, tag)
|
||||||
|
|
||||||
|
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||||
|
"""Log an info message to file."""
|
||||||
|
self._write_to_file("INFO", message, tag)
|
||||||
|
|
||||||
|
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||||
|
"""Log a success message to file."""
|
||||||
|
self._write_to_file("SUCCESS", message, tag)
|
||||||
|
|
||||||
|
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||||
|
"""Log a warning message to file."""
|
||||||
|
self._write_to_file("WARNING", message, tag)
|
||||||
|
|
||||||
|
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||||
|
"""Log an error message to file."""
|
||||||
|
self._write_to_file("ERROR", message, tag)
|
||||||
|
|
||||||
|
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 50):
|
||||||
|
"""Log URL fetch status to file."""
|
||||||
|
status = "SUCCESS" if success else "FAILED"
|
||||||
|
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
|
||||||
|
self._write_to_file("URL_STATUS", message, tag)
|
||||||
|
|
||||||
|
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 50):
|
||||||
|
"""Log error status to file."""
|
||||||
|
message = f"{url[:url_length]}... | Error: {error}"
|
||||||
|
self._write_to_file("ERROR", message, tag)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
873
crawl4ai/browser_manager.py
Normal file
873
crawl4ai/browser_manager.py
Normal file
@@ -0,0 +1,873 @@
|
|||||||
|
import asyncio
|
||||||
|
import time
|
||||||
|
from typing import List, Optional
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
import subprocess
|
||||||
|
from playwright.async_api import BrowserContext
|
||||||
|
import hashlib
|
||||||
|
from .js_snippet import load_js_script
|
||||||
|
from .config import DOWNLOAD_PAGE_TIMEOUT
|
||||||
|
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||||
|
from playwright_stealth import StealthConfig
|
||||||
|
from .utils import get_chromium_path
|
||||||
|
|
||||||
|
stealth_config = StealthConfig(
|
||||||
|
webdriver=True,
|
||||||
|
chrome_app=True,
|
||||||
|
chrome_csi=True,
|
||||||
|
chrome_load_times=True,
|
||||||
|
chrome_runtime=True,
|
||||||
|
navigator_languages=True,
|
||||||
|
navigator_plugins=True,
|
||||||
|
navigator_permissions=True,
|
||||||
|
webgl_vendor=True,
|
||||||
|
outerdimensions=True,
|
||||||
|
navigator_hardware_concurrency=True,
|
||||||
|
media_codecs=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
BROWSER_DISABLE_OPTIONS = [
|
||||||
|
"--disable-background-networking",
|
||||||
|
"--disable-background-timer-throttling",
|
||||||
|
"--disable-backgrounding-occluded-windows",
|
||||||
|
"--disable-breakpad",
|
||||||
|
"--disable-client-side-phishing-detection",
|
||||||
|
"--disable-component-extensions-with-background-pages",
|
||||||
|
"--disable-default-apps",
|
||||||
|
"--disable-extensions",
|
||||||
|
"--disable-features=TranslateUI",
|
||||||
|
"--disable-hang-monitor",
|
||||||
|
"--disable-ipc-flooding-protection",
|
||||||
|
"--disable-popup-blocking",
|
||||||
|
"--disable-prompt-on-repost",
|
||||||
|
"--disable-sync",
|
||||||
|
"--force-color-profile=srgb",
|
||||||
|
"--metrics-recording-only",
|
||||||
|
"--no-first-run",
|
||||||
|
"--password-store=basic",
|
||||||
|
"--use-mock-keychain",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class ManagedBrowser:
|
||||||
|
"""
|
||||||
|
Manages the browser process and context. This class allows to connect to the browser using CDP protocol.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
browser_type (str): The type of browser to launch. Supported values: "chromium", "firefox", "webkit".
|
||||||
|
Default: "chromium".
|
||||||
|
user_data_dir (str or None): Path to a user data directory for persistent sessions. If None, a
|
||||||
|
temporary directory may be used. Default: None.
|
||||||
|
headless (bool): Whether to run the browser in headless mode (no visible GUI).
|
||||||
|
Default: True.
|
||||||
|
browser_process (subprocess.Popen): The process object for the browser.
|
||||||
|
temp_dir (str): Temporary directory for user data if not provided.
|
||||||
|
debugging_port (int): Port for debugging the browser.
|
||||||
|
host (str): Host for debugging the browser.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
start(): Starts the browser process and returns the CDP endpoint URL.
|
||||||
|
_get_browser_path(): Returns the browser executable path based on OS and browser type.
|
||||||
|
_get_browser_args(): Returns browser-specific command line arguments.
|
||||||
|
_get_user_data_dir(): Returns the user data directory path.
|
||||||
|
_cleanup(): Terminates the browser process and removes the temporary directory.
|
||||||
|
create_profile(): Static method to create a user profile by launching a browser for user interaction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
browser_type: str
|
||||||
|
user_data_dir: str
|
||||||
|
headless: bool
|
||||||
|
browser_process: subprocess.Popen
|
||||||
|
temp_dir: str
|
||||||
|
debugging_port: int
|
||||||
|
host: str
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
browser_type: str = "chromium",
|
||||||
|
user_data_dir: Optional[str] = None,
|
||||||
|
headless: bool = False,
|
||||||
|
logger=None,
|
||||||
|
host: str = "localhost",
|
||||||
|
debugging_port: int = 9222,
|
||||||
|
cdp_url: Optional[str] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the ManagedBrowser instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
browser_type (str): The type of browser to launch. Supported values: "chromium", "firefox", "webkit".
|
||||||
|
Default: "chromium".
|
||||||
|
user_data_dir (str or None): Path to a user data directory for persistent sessions. If None, a
|
||||||
|
temporary directory may be used. Default: None.
|
||||||
|
headless (bool): Whether to run the browser in headless mode (no visible GUI).
|
||||||
|
Default: True.
|
||||||
|
logger (logging.Logger): Logger instance for logging messages. Default: None.
|
||||||
|
host (str): Host for debugging the browser. Default: "localhost".
|
||||||
|
debugging_port (int): Port for debugging the browser. Default: 9222.
|
||||||
|
cdp_url (str or None): CDP URL to connect to the browser. Default: None.
|
||||||
|
"""
|
||||||
|
self.browser_type = browser_type
|
||||||
|
self.user_data_dir = user_data_dir
|
||||||
|
self.headless = headless
|
||||||
|
self.browser_process = None
|
||||||
|
self.temp_dir = None
|
||||||
|
self.debugging_port = debugging_port
|
||||||
|
self.host = host
|
||||||
|
self.logger = logger
|
||||||
|
self.shutting_down = False
|
||||||
|
self.cdp_url = cdp_url
|
||||||
|
|
||||||
|
async def start(self) -> str:
|
||||||
|
"""
|
||||||
|
Starts the browser process or returns CDP endpoint URL.
|
||||||
|
If cdp_url is provided, returns it directly.
|
||||||
|
If user_data_dir is not provided for local browser, creates a temporary directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: CDP endpoint URL
|
||||||
|
"""
|
||||||
|
# If CDP URL provided, just return it
|
||||||
|
if self.cdp_url:
|
||||||
|
return self.cdp_url
|
||||||
|
|
||||||
|
# Create temp dir if needed
|
||||||
|
if not self.user_data_dir:
|
||||||
|
self.temp_dir = tempfile.mkdtemp(prefix="browser-profile-")
|
||||||
|
self.user_data_dir = self.temp_dir
|
||||||
|
|
||||||
|
# Get browser path and args based on OS and browser type
|
||||||
|
# browser_path = self._get_browser_path()
|
||||||
|
args = await self._get_browser_args()
|
||||||
|
|
||||||
|
# Start browser process
|
||||||
|
try:
|
||||||
|
self.browser_process = subprocess.Popen(
|
||||||
|
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||||
|
)
|
||||||
|
# Monitor browser process output for errors
|
||||||
|
asyncio.create_task(self._monitor_browser_process())
|
||||||
|
await asyncio.sleep(2) # Give browser time to start
|
||||||
|
return f"http://{self.host}:{self.debugging_port}"
|
||||||
|
except Exception as e:
|
||||||
|
await self.cleanup()
|
||||||
|
raise Exception(f"Failed to start browser: {e}")
|
||||||
|
|
||||||
|
async def _monitor_browser_process(self):
|
||||||
|
"""
|
||||||
|
Monitor the browser process for unexpected termination.
|
||||||
|
|
||||||
|
How it works:
|
||||||
|
1. Read stdout and stderr from the browser process.
|
||||||
|
2. If the process has terminated, log the error message and terminate the browser.
|
||||||
|
3. If the shutting_down flag is set, log the normal termination message.
|
||||||
|
4. If any other error occurs, log the error message.
|
||||||
|
|
||||||
|
Note: This method should be called in a separate task to avoid blocking the main event loop.
|
||||||
|
"""
|
||||||
|
if self.browser_process:
|
||||||
|
try:
|
||||||
|
stdout, stderr = await asyncio.gather(
|
||||||
|
asyncio.to_thread(self.browser_process.stdout.read),
|
||||||
|
asyncio.to_thread(self.browser_process.stderr.read),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check shutting_down flag BEFORE logging anything
|
||||||
|
if self.browser_process.poll() is not None:
|
||||||
|
if not self.shutting_down:
|
||||||
|
self.logger.error(
|
||||||
|
message="Browser process terminated unexpectedly | Code: {code} | STDOUT: {stdout} | STDERR: {stderr}",
|
||||||
|
tag="ERROR",
|
||||||
|
params={
|
||||||
|
"code": self.browser_process.returncode,
|
||||||
|
"stdout": stdout.decode(),
|
||||||
|
"stderr": stderr.decode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
await self.cleanup()
|
||||||
|
else:
|
||||||
|
self.logger.info(
|
||||||
|
message="Browser process terminated normally | Code: {code}",
|
||||||
|
tag="INFO",
|
||||||
|
params={"code": self.browser_process.returncode},
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
if not self.shutting_down:
|
||||||
|
self.logger.error(
|
||||||
|
message="Error monitoring browser process: {error}",
|
||||||
|
tag="ERROR",
|
||||||
|
params={"error": str(e)},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_browser_path_WIP(self) -> str:
|
||||||
|
"""Returns the browser executable path based on OS and browser type"""
|
||||||
|
if sys.platform == "darwin": # macOS
|
||||||
|
paths = {
|
||||||
|
"chromium": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
|
||||||
|
"firefox": "/Applications/Firefox.app/Contents/MacOS/firefox",
|
||||||
|
"webkit": "/Applications/Safari.app/Contents/MacOS/Safari",
|
||||||
|
}
|
||||||
|
elif sys.platform == "win32": # Windows
|
||||||
|
paths = {
|
||||||
|
"chromium": "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe",
|
||||||
|
"firefox": "C:\\Program Files\\Mozilla Firefox\\firefox.exe",
|
||||||
|
"webkit": None, # WebKit not supported on Windows
|
||||||
|
}
|
||||||
|
else: # Linux
|
||||||
|
paths = {
|
||||||
|
"chromium": "google-chrome",
|
||||||
|
"firefox": "firefox",
|
||||||
|
"webkit": None, # WebKit not supported on Linux
|
||||||
|
}
|
||||||
|
|
||||||
|
return paths.get(self.browser_type)
|
||||||
|
|
||||||
|
async def _get_browser_path(self) -> str:
|
||||||
|
browser_path = await get_chromium_path(self.browser_type)
|
||||||
|
return browser_path
|
||||||
|
|
||||||
|
async def _get_browser_args(self) -> List[str]:
|
||||||
|
"""Returns browser-specific command line arguments"""
|
||||||
|
base_args = [await self._get_browser_path()]
|
||||||
|
|
||||||
|
if self.browser_type == "chromium":
|
||||||
|
args = [
|
||||||
|
f"--remote-debugging-port={self.debugging_port}",
|
||||||
|
f"--user-data-dir={self.user_data_dir}",
|
||||||
|
]
|
||||||
|
if self.headless:
|
||||||
|
args.append("--headless=new")
|
||||||
|
elif self.browser_type == "firefox":
|
||||||
|
args = [
|
||||||
|
"--remote-debugging-port",
|
||||||
|
str(self.debugging_port),
|
||||||
|
"--profile",
|
||||||
|
self.user_data_dir,
|
||||||
|
]
|
||||||
|
if self.headless:
|
||||||
|
args.append("--headless")
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(f"Browser type {self.browser_type} not supported")
|
||||||
|
|
||||||
|
return base_args + args
|
||||||
|
|
||||||
|
async def cleanup(self):
|
||||||
|
"""Cleanup browser process and temporary directory"""
|
||||||
|
# Set shutting_down flag BEFORE any termination actions
|
||||||
|
self.shutting_down = True
|
||||||
|
|
||||||
|
if self.browser_process:
|
||||||
|
try:
|
||||||
|
self.browser_process.terminate()
|
||||||
|
# Wait for process to end gracefully
|
||||||
|
for _ in range(10): # 10 attempts, 100ms each
|
||||||
|
if self.browser_process.poll() is not None:
|
||||||
|
break
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
|
# Force kill if still running
|
||||||
|
if self.browser_process.poll() is None:
|
||||||
|
self.browser_process.kill()
|
||||||
|
await asyncio.sleep(0.1) # Brief wait for kill to take effect
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(
|
||||||
|
message="Error terminating browser: {error}",
|
||||||
|
tag="ERROR",
|
||||||
|
params={"error": str(e)},
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.temp_dir and os.path.exists(self.temp_dir):
|
||||||
|
try:
|
||||||
|
shutil.rmtree(self.temp_dir)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(
|
||||||
|
message="Error removing temporary directory: {error}",
|
||||||
|
tag="ERROR",
|
||||||
|
params={"error": str(e)},
|
||||||
|
)
|
||||||
|
|
||||||
|
# These methods have been moved to BrowserProfiler class
|
||||||
|
@staticmethod
|
||||||
|
async def create_profile(browser_config=None, profile_name=None, logger=None):
|
||||||
|
"""
|
||||||
|
This method has been moved to the BrowserProfiler class.
|
||||||
|
|
||||||
|
Creates a browser profile by launching a browser for interactive user setup
|
||||||
|
and waits until the user closes it. The profile is stored in a directory that
|
||||||
|
can be used later with BrowserConfig.user_data_dir.
|
||||||
|
|
||||||
|
Please use BrowserProfiler.create_profile() instead.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
from crawl4ai.browser_profiler import BrowserProfiler
|
||||||
|
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
profile_path = await profiler.create_profile(profile_name="my-login-profile")
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
from .browser_profiler import BrowserProfiler
|
||||||
|
|
||||||
|
# Create a BrowserProfiler instance and delegate to it
|
||||||
|
profiler = BrowserProfiler(logger=logger)
|
||||||
|
return await profiler.create_profile(profile_name=profile_name, browser_config=browser_config)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list_profiles():
|
||||||
|
"""
|
||||||
|
This method has been moved to the BrowserProfiler class.
|
||||||
|
|
||||||
|
Lists all available browser profiles in the Crawl4AI profiles directory.
|
||||||
|
|
||||||
|
Please use BrowserProfiler.list_profiles() instead.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
from crawl4ai.browser_profiler import BrowserProfiler
|
||||||
|
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
profiles = profiler.list_profiles()
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
from .browser_profiler import BrowserProfiler
|
||||||
|
|
||||||
|
# Create a BrowserProfiler instance and delegate to it
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
return profiler.list_profiles()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_profile(profile_name_or_path):
|
||||||
|
"""
|
||||||
|
This method has been moved to the BrowserProfiler class.
|
||||||
|
|
||||||
|
Delete a browser profile by name or path.
|
||||||
|
|
||||||
|
Please use BrowserProfiler.delete_profile() instead.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
from crawl4ai.browser_profiler import BrowserProfiler
|
||||||
|
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
success = profiler.delete_profile("my-profile")
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
from .browser_profiler import BrowserProfiler
|
||||||
|
|
||||||
|
# Create a BrowserProfiler instance and delegate to it
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
return profiler.delete_profile(profile_name_or_path)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BrowserManager:
|
||||||
|
"""
|
||||||
|
Manages the browser instance and context.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
config (BrowserConfig): Configuration object containing all browser settings
|
||||||
|
logger: Logger instance for recording events and errors
|
||||||
|
browser (Browser): The browser instance
|
||||||
|
default_context (BrowserContext): The default browser context
|
||||||
|
managed_browser (ManagedBrowser): The managed browser instance
|
||||||
|
playwright (Playwright): The Playwright instance
|
||||||
|
sessions (dict): Dictionary to store session information
|
||||||
|
session_ttl (int): Session timeout in seconds
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, browser_config: BrowserConfig, logger=None):
|
||||||
|
"""
|
||||||
|
Initialize the BrowserManager with a browser configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
browser_config (BrowserConfig): Configuration object containing all browser settings
|
||||||
|
logger: Logger instance for recording events and errors
|
||||||
|
"""
|
||||||
|
self.config: BrowserConfig = browser_config
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
# Browser state
|
||||||
|
self.browser = None
|
||||||
|
self.default_context = None
|
||||||
|
self.managed_browser = None
|
||||||
|
self.playwright = None
|
||||||
|
|
||||||
|
# Session management
|
||||||
|
self.sessions = {}
|
||||||
|
self.session_ttl = 1800 # 30 minutes
|
||||||
|
|
||||||
|
# Keep track of contexts by a "config signature," so each unique config reuses a single context
|
||||||
|
self.contexts_by_config = {}
|
||||||
|
self._contexts_lock = asyncio.Lock()
|
||||||
|
|
||||||
|
# Initialize ManagedBrowser if needed
|
||||||
|
if self.config.use_managed_browser:
|
||||||
|
self.managed_browser = ManagedBrowser(
|
||||||
|
browser_type=self.config.browser_type,
|
||||||
|
user_data_dir=self.config.user_data_dir,
|
||||||
|
headless=self.config.headless,
|
||||||
|
logger=self.logger,
|
||||||
|
debugging_port=self.config.debugging_port,
|
||||||
|
cdp_url=self.config.cdp_url,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
"""
|
||||||
|
Start the browser instance and set up the default context.
|
||||||
|
|
||||||
|
How it works:
|
||||||
|
1. Check if Playwright is already initialized.
|
||||||
|
2. If not, initialize Playwright.
|
||||||
|
3. If managed browser is used, start it and connect to the CDP endpoint.
|
||||||
|
4. If managed browser is not used, launch the browser and set up the default context.
|
||||||
|
|
||||||
|
Note: This method should be called in a separate task to avoid blocking the main event loop.
|
||||||
|
"""
|
||||||
|
if self.playwright is None:
|
||||||
|
from playwright.async_api import async_playwright
|
||||||
|
|
||||||
|
self.playwright = await async_playwright().start()
|
||||||
|
|
||||||
|
if self.config.use_managed_browser:
|
||||||
|
cdp_url = await self.managed_browser.start()
|
||||||
|
self.browser = await self.playwright.chromium.connect_over_cdp(cdp_url)
|
||||||
|
contexts = self.browser.contexts
|
||||||
|
if contexts:
|
||||||
|
self.default_context = contexts[0]
|
||||||
|
else:
|
||||||
|
self.default_context = await self.create_browser_context()
|
||||||
|
# self.default_context = await self.browser.new_context(
|
||||||
|
# viewport={
|
||||||
|
# "width": self.config.viewport_width,
|
||||||
|
# "height": self.config.viewport_height,
|
||||||
|
# },
|
||||||
|
# storage_state=self.config.storage_state,
|
||||||
|
# user_agent=self.config.headers.get(
|
||||||
|
# "User-Agent", self.config.user_agent
|
||||||
|
# ),
|
||||||
|
# accept_downloads=self.config.accept_downloads,
|
||||||
|
# ignore_https_errors=self.config.ignore_https_errors,
|
||||||
|
# java_script_enabled=self.config.java_script_enabled,
|
||||||
|
# )
|
||||||
|
await self.setup_context(self.default_context)
|
||||||
|
else:
|
||||||
|
browser_args = self._build_browser_args()
|
||||||
|
|
||||||
|
# Launch appropriate browser type
|
||||||
|
if self.config.browser_type == "firefox":
|
||||||
|
self.browser = await self.playwright.firefox.launch(**browser_args)
|
||||||
|
elif self.config.browser_type == "webkit":
|
||||||
|
self.browser = await self.playwright.webkit.launch(**browser_args)
|
||||||
|
else:
|
||||||
|
self.browser = await self.playwright.chromium.launch(**browser_args)
|
||||||
|
|
||||||
|
self.default_context = self.browser
|
||||||
|
|
||||||
|
def _build_browser_args(self) -> dict:
|
||||||
|
"""Build browser launch arguments from config."""
|
||||||
|
args = [
|
||||||
|
"--disable-gpu",
|
||||||
|
"--disable-gpu-compositing",
|
||||||
|
"--disable-software-rasterizer",
|
||||||
|
"--no-sandbox",
|
||||||
|
"--disable-dev-shm-usage",
|
||||||
|
"--no-first-run",
|
||||||
|
"--no-default-browser-check",
|
||||||
|
"--disable-infobars",
|
||||||
|
"--window-position=0,0",
|
||||||
|
"--ignore-certificate-errors",
|
||||||
|
"--ignore-certificate-errors-spki-list",
|
||||||
|
"--disable-blink-features=AutomationControlled",
|
||||||
|
"--window-position=400,0",
|
||||||
|
"--disable-renderer-backgrounding",
|
||||||
|
"--disable-ipc-flooding-protection",
|
||||||
|
"--force-color-profile=srgb",
|
||||||
|
"--mute-audio",
|
||||||
|
"--disable-background-timer-throttling",
|
||||||
|
# "--single-process",
|
||||||
|
f"--window-size={self.config.viewport_width},{self.config.viewport_height}",
|
||||||
|
]
|
||||||
|
|
||||||
|
if self.config.light_mode:
|
||||||
|
args.extend(BROWSER_DISABLE_OPTIONS)
|
||||||
|
|
||||||
|
if self.config.text_mode:
|
||||||
|
args.extend(
|
||||||
|
[
|
||||||
|
"--blink-settings=imagesEnabled=false",
|
||||||
|
"--disable-remote-fonts",
|
||||||
|
"--disable-images",
|
||||||
|
"--disable-javascript",
|
||||||
|
"--disable-software-rasterizer",
|
||||||
|
"--disable-dev-shm-usage",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.config.extra_args:
|
||||||
|
args.extend(self.config.extra_args)
|
||||||
|
|
||||||
|
browser_args = {"headless": self.config.headless, "args": args}
|
||||||
|
|
||||||
|
if self.config.chrome_channel:
|
||||||
|
browser_args["channel"] = self.config.chrome_channel
|
||||||
|
|
||||||
|
if self.config.accept_downloads:
|
||||||
|
browser_args["downloads_path"] = self.config.downloads_path or os.path.join(
|
||||||
|
os.getcwd(), "downloads"
|
||||||
|
)
|
||||||
|
os.makedirs(browser_args["downloads_path"], exist_ok=True)
|
||||||
|
|
||||||
|
if self.config.proxy or self.config.proxy_config:
|
||||||
|
from playwright.async_api import ProxySettings
|
||||||
|
|
||||||
|
proxy_settings = (
|
||||||
|
ProxySettings(server=self.config.proxy)
|
||||||
|
if self.config.proxy
|
||||||
|
else ProxySettings(
|
||||||
|
server=self.config.proxy_config.get("server"),
|
||||||
|
username=self.config.proxy_config.get("username"),
|
||||||
|
password=self.config.proxy_config.get("password"),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
browser_args["proxy"] = proxy_settings
|
||||||
|
|
||||||
|
return browser_args
|
||||||
|
|
||||||
|
async def setup_context(
|
||||||
|
self,
|
||||||
|
context: BrowserContext,
|
||||||
|
crawlerRunConfig: CrawlerRunConfig = None,
|
||||||
|
is_default=False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Set up a browser context with the configured options.
|
||||||
|
|
||||||
|
How it works:
|
||||||
|
1. Set extra HTTP headers if provided.
|
||||||
|
2. Add cookies if provided.
|
||||||
|
3. Load storage state if provided.
|
||||||
|
4. Accept downloads if enabled.
|
||||||
|
5. Set default timeouts for navigation and download.
|
||||||
|
6. Set user agent if provided.
|
||||||
|
7. Set browser hints if provided.
|
||||||
|
8. Set proxy if provided.
|
||||||
|
9. Set downloads path if provided.
|
||||||
|
10. Set storage state if provided.
|
||||||
|
11. Set cache if provided.
|
||||||
|
12. Set extra HTTP headers if provided.
|
||||||
|
13. Add cookies if provided.
|
||||||
|
14. Set default timeouts for navigation and download if enabled.
|
||||||
|
15. Set user agent if provided.
|
||||||
|
16. Set browser hints if provided.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context (BrowserContext): The browser context to set up
|
||||||
|
crawlerRunConfig (CrawlerRunConfig): Configuration object containing all browser settings
|
||||||
|
is_default (bool): Flag indicating if this is the default context
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
if self.config.headers:
|
||||||
|
await context.set_extra_http_headers(self.config.headers)
|
||||||
|
|
||||||
|
if self.config.cookies:
|
||||||
|
await context.add_cookies(self.config.cookies)
|
||||||
|
|
||||||
|
if self.config.storage_state:
|
||||||
|
await context.storage_state(path=None)
|
||||||
|
|
||||||
|
if self.config.accept_downloads:
|
||||||
|
context.set_default_timeout(DOWNLOAD_PAGE_TIMEOUT)
|
||||||
|
context.set_default_navigation_timeout(DOWNLOAD_PAGE_TIMEOUT)
|
||||||
|
if self.config.downloads_path:
|
||||||
|
context._impl_obj._options["accept_downloads"] = True
|
||||||
|
context._impl_obj._options[
|
||||||
|
"downloads_path"
|
||||||
|
] = self.config.downloads_path
|
||||||
|
|
||||||
|
# Handle user agent and browser hints
|
||||||
|
if self.config.user_agent:
|
||||||
|
combined_headers = {
|
||||||
|
"User-Agent": self.config.user_agent,
|
||||||
|
"sec-ch-ua": self.config.browser_hint,
|
||||||
|
}
|
||||||
|
combined_headers.update(self.config.headers)
|
||||||
|
await context.set_extra_http_headers(combined_headers)
|
||||||
|
|
||||||
|
# Add default cookie
|
||||||
|
await context.add_cookies(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "cookiesEnabled",
|
||||||
|
"value": "true",
|
||||||
|
"url": crawlerRunConfig.url
|
||||||
|
if crawlerRunConfig
|
||||||
|
else "https://crawl4ai.com/",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle navigator overrides
|
||||||
|
if crawlerRunConfig:
|
||||||
|
if (
|
||||||
|
crawlerRunConfig.override_navigator
|
||||||
|
or crawlerRunConfig.simulate_user
|
||||||
|
or crawlerRunConfig.magic
|
||||||
|
):
|
||||||
|
await context.add_init_script(load_js_script("navigator_overrider"))
|
||||||
|
|
||||||
|
async def create_browser_context(self, crawlerRunConfig: CrawlerRunConfig = None):
|
||||||
|
"""
|
||||||
|
Creates and returns a new browser context with configured settings.
|
||||||
|
Applies text-only mode settings if text_mode is enabled in config.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Context: Browser context object with the specified configurations
|
||||||
|
"""
|
||||||
|
# Base settings
|
||||||
|
user_agent = self.config.headers.get("User-Agent", self.config.user_agent)
|
||||||
|
viewport_settings = {
|
||||||
|
"width": self.config.viewport_width,
|
||||||
|
"height": self.config.viewport_height,
|
||||||
|
}
|
||||||
|
proxy_settings = {"server": self.config.proxy} if self.config.proxy else None
|
||||||
|
|
||||||
|
blocked_extensions = [
|
||||||
|
# Images
|
||||||
|
"jpg",
|
||||||
|
"jpeg",
|
||||||
|
"png",
|
||||||
|
"gif",
|
||||||
|
"webp",
|
||||||
|
"svg",
|
||||||
|
"ico",
|
||||||
|
"bmp",
|
||||||
|
"tiff",
|
||||||
|
"psd",
|
||||||
|
# Fonts
|
||||||
|
"woff",
|
||||||
|
"woff2",
|
||||||
|
"ttf",
|
||||||
|
"otf",
|
||||||
|
"eot",
|
||||||
|
# Styles
|
||||||
|
# 'css', 'less', 'scss', 'sass',
|
||||||
|
# Media
|
||||||
|
"mp4",
|
||||||
|
"webm",
|
||||||
|
"ogg",
|
||||||
|
"avi",
|
||||||
|
"mov",
|
||||||
|
"wmv",
|
||||||
|
"flv",
|
||||||
|
"m4v",
|
||||||
|
"mp3",
|
||||||
|
"wav",
|
||||||
|
"aac",
|
||||||
|
"m4a",
|
||||||
|
"opus",
|
||||||
|
"flac",
|
||||||
|
# Documents
|
||||||
|
"pdf",
|
||||||
|
"doc",
|
||||||
|
"docx",
|
||||||
|
"xls",
|
||||||
|
"xlsx",
|
||||||
|
"ppt",
|
||||||
|
"pptx",
|
||||||
|
# Archives
|
||||||
|
"zip",
|
||||||
|
"rar",
|
||||||
|
"7z",
|
||||||
|
"tar",
|
||||||
|
"gz",
|
||||||
|
# Scripts and data
|
||||||
|
"xml",
|
||||||
|
"swf",
|
||||||
|
"wasm",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Common context settings
|
||||||
|
context_settings = {
|
||||||
|
"user_agent": user_agent,
|
||||||
|
"viewport": viewport_settings,
|
||||||
|
"proxy": proxy_settings,
|
||||||
|
"accept_downloads": self.config.accept_downloads,
|
||||||
|
"storage_state": self.config.storage_state,
|
||||||
|
"ignore_https_errors": self.config.ignore_https_errors,
|
||||||
|
"device_scale_factor": 1.0,
|
||||||
|
"java_script_enabled": self.config.java_script_enabled,
|
||||||
|
}
|
||||||
|
|
||||||
|
if crawlerRunConfig:
|
||||||
|
# Check if there is value for crawlerRunConfig.proxy_config set add that to context
|
||||||
|
if crawlerRunConfig.proxy_config:
|
||||||
|
proxy_settings = {
|
||||||
|
"server": crawlerRunConfig.proxy_config.server,
|
||||||
|
}
|
||||||
|
if crawlerRunConfig.proxy_config.username:
|
||||||
|
proxy_settings.update({
|
||||||
|
"username": crawlerRunConfig.proxy_config.username,
|
||||||
|
"password": crawlerRunConfig.proxy_config.password,
|
||||||
|
})
|
||||||
|
context_settings["proxy"] = proxy_settings
|
||||||
|
|
||||||
|
if self.config.text_mode:
|
||||||
|
text_mode_settings = {
|
||||||
|
"has_touch": False,
|
||||||
|
"is_mobile": False,
|
||||||
|
}
|
||||||
|
# Update context settings with text mode settings
|
||||||
|
context_settings.update(text_mode_settings)
|
||||||
|
|
||||||
|
# Create and return the context with all settings
|
||||||
|
context = await self.browser.new_context(**context_settings)
|
||||||
|
|
||||||
|
# Apply text mode settings if enabled
|
||||||
|
if self.config.text_mode:
|
||||||
|
# Create and apply route patterns for each extension
|
||||||
|
for ext in blocked_extensions:
|
||||||
|
await context.route(f"**/*.{ext}", lambda route: route.abort())
|
||||||
|
return context
|
||||||
|
|
||||||
|
def _make_config_signature(self, crawlerRunConfig: CrawlerRunConfig) -> str:
|
||||||
|
"""
|
||||||
|
Converts the crawlerRunConfig into a dict, excludes ephemeral fields,
|
||||||
|
then returns a hash of the sorted JSON. This yields a stable signature
|
||||||
|
that identifies configurations requiring a unique browser context.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
|
||||||
|
config_dict = crawlerRunConfig.__dict__.copy()
|
||||||
|
# Exclude items that do not affect browser-level setup.
|
||||||
|
# Expand or adjust as needed, e.g. chunking_strategy is purely for data extraction, not for browser config.
|
||||||
|
ephemeral_keys = [
|
||||||
|
"session_id",
|
||||||
|
"js_code",
|
||||||
|
"scraping_strategy",
|
||||||
|
"extraction_strategy",
|
||||||
|
"chunking_strategy",
|
||||||
|
"cache_mode",
|
||||||
|
"content_filter",
|
||||||
|
"semaphore_count",
|
||||||
|
"url"
|
||||||
|
]
|
||||||
|
for key in ephemeral_keys:
|
||||||
|
if key in config_dict:
|
||||||
|
del config_dict[key]
|
||||||
|
# Convert to canonical JSON string
|
||||||
|
signature_json = json.dumps(config_dict, sort_keys=True, default=str)
|
||||||
|
|
||||||
|
# Hash the JSON so we get a compact, unique string
|
||||||
|
signature_hash = hashlib.sha256(signature_json.encode("utf-8")).hexdigest()
|
||||||
|
return signature_hash
|
||||||
|
|
||||||
|
async def get_page(self, crawlerRunConfig: CrawlerRunConfig):
|
||||||
|
"""
|
||||||
|
Get a page for the given session ID, creating a new one if needed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crawlerRunConfig (CrawlerRunConfig): Configuration object containing all browser settings
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(page, context): The Page and its BrowserContext
|
||||||
|
"""
|
||||||
|
self._cleanup_expired_sessions()
|
||||||
|
|
||||||
|
# If a session_id is provided and we already have it, reuse that page + context
|
||||||
|
if crawlerRunConfig.session_id and crawlerRunConfig.session_id in self.sessions:
|
||||||
|
context, page, _ = self.sessions[crawlerRunConfig.session_id]
|
||||||
|
# Update last-used timestamp
|
||||||
|
self.sessions[crawlerRunConfig.session_id] = (context, page, time.time())
|
||||||
|
return page, context
|
||||||
|
|
||||||
|
# If using a managed browser, just grab the shared default_context
|
||||||
|
if self.config.use_managed_browser:
|
||||||
|
context = self.default_context
|
||||||
|
page = await context.new_page()
|
||||||
|
else:
|
||||||
|
# Otherwise, check if we have an existing context for this config
|
||||||
|
config_signature = self._make_config_signature(crawlerRunConfig)
|
||||||
|
|
||||||
|
async with self._contexts_lock:
|
||||||
|
if config_signature in self.contexts_by_config:
|
||||||
|
context = self.contexts_by_config[config_signature]
|
||||||
|
else:
|
||||||
|
# Create and setup a new context
|
||||||
|
context = await self.create_browser_context(crawlerRunConfig)
|
||||||
|
await self.setup_context(context, crawlerRunConfig)
|
||||||
|
self.contexts_by_config[config_signature] = context
|
||||||
|
|
||||||
|
# Create a new page from the chosen context
|
||||||
|
page = await context.new_page()
|
||||||
|
|
||||||
|
# If a session_id is specified, store this session so we can reuse later
|
||||||
|
if crawlerRunConfig.session_id:
|
||||||
|
self.sessions[crawlerRunConfig.session_id] = (context, page, time.time())
|
||||||
|
|
||||||
|
return page, context
|
||||||
|
|
||||||
|
async def kill_session(self, session_id: str):
|
||||||
|
"""
|
||||||
|
Kill a browser session and clean up resources.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id (str): The session ID to kill.
|
||||||
|
"""
|
||||||
|
if session_id in self.sessions:
|
||||||
|
context, page, _ = self.sessions[session_id]
|
||||||
|
await page.close()
|
||||||
|
if not self.config.use_managed_browser:
|
||||||
|
await context.close()
|
||||||
|
del self.sessions[session_id]
|
||||||
|
|
||||||
|
def _cleanup_expired_sessions(self):
|
||||||
|
"""Clean up expired sessions based on TTL."""
|
||||||
|
current_time = time.time()
|
||||||
|
expired_sessions = [
|
||||||
|
sid
|
||||||
|
for sid, (_, _, last_used) in self.sessions.items()
|
||||||
|
if current_time - last_used > self.session_ttl
|
||||||
|
]
|
||||||
|
for sid in expired_sessions:
|
||||||
|
asyncio.create_task(self.kill_session(sid))
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
"""Close all browser resources and clean up."""
|
||||||
|
if self.config.sleep_on_close:
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
|
||||||
|
session_ids = list(self.sessions.keys())
|
||||||
|
for session_id in session_ids:
|
||||||
|
await self.kill_session(session_id)
|
||||||
|
|
||||||
|
# Now close all contexts we created. This reclaims memory from ephemeral contexts.
|
||||||
|
for ctx in self.contexts_by_config.values():
|
||||||
|
try:
|
||||||
|
await ctx.close()
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(
|
||||||
|
message="Error closing context: {error}",
|
||||||
|
tag="ERROR",
|
||||||
|
params={"error": str(e)}
|
||||||
|
)
|
||||||
|
self.contexts_by_config.clear()
|
||||||
|
|
||||||
|
if self.browser:
|
||||||
|
await self.browser.close()
|
||||||
|
self.browser = None
|
||||||
|
|
||||||
|
if self.managed_browser:
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
await self.managed_browser.cleanup()
|
||||||
|
self.managed_browser = None
|
||||||
|
|
||||||
|
if self.playwright:
|
||||||
|
await self.playwright.stop()
|
||||||
|
self.playwright = None
|
||||||
544
crawl4ai/browser_profiler.py
Normal file
544
crawl4ai/browser_profiler.py
Normal file
@@ -0,0 +1,544 @@
|
|||||||
|
"""
|
||||||
|
Browser Profiler Module
|
||||||
|
|
||||||
|
This module provides a dedicated class for managing browser profiles
|
||||||
|
that can be used for identity-based crawling with Crawl4AI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import datetime
|
||||||
|
import uuid
|
||||||
|
import shutil
|
||||||
|
from typing import List, Dict, Optional, Any
|
||||||
|
from colorama import Fore, Style, init
|
||||||
|
|
||||||
|
from .async_configs import BrowserConfig
|
||||||
|
from .browser_manager import ManagedBrowser
|
||||||
|
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||||
|
from .utils import get_home_folder
|
||||||
|
|
||||||
|
|
||||||
|
class BrowserProfiler:
|
||||||
|
"""
|
||||||
|
A dedicated class for managing browser profiles for Crawl4AI.
|
||||||
|
|
||||||
|
The BrowserProfiler allows you to:
|
||||||
|
- Create browser profiles interactively
|
||||||
|
- List available profiles
|
||||||
|
- Delete profiles when no longer needed
|
||||||
|
- Get profile paths for use in BrowserConfig
|
||||||
|
|
||||||
|
Profiles are stored by default in ~/.crawl4ai/profiles/
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, logger: Optional[AsyncLoggerBase] = None):
|
||||||
|
"""
|
||||||
|
Initialize the BrowserProfiler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
logger (AsyncLoggerBase, optional): Logger for outputting messages.
|
||||||
|
If None, a default AsyncLogger will be created.
|
||||||
|
"""
|
||||||
|
# Initialize colorama for colorful terminal output
|
||||||
|
init()
|
||||||
|
|
||||||
|
# Create a logger if not provided
|
||||||
|
if logger is None:
|
||||||
|
self.logger = AsyncLogger(verbose=True)
|
||||||
|
elif not isinstance(logger, AsyncLoggerBase):
|
||||||
|
self.logger = AsyncLogger(verbose=True)
|
||||||
|
else:
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
# Ensure profiles directory exists
|
||||||
|
self.profiles_dir = os.path.join(get_home_folder(), "profiles")
|
||||||
|
os.makedirs(self.profiles_dir, exist_ok=True)
|
||||||
|
|
||||||
|
async def create_profile(self,
|
||||||
|
profile_name: Optional[str] = None,
|
||||||
|
browser_config: Optional[BrowserConfig] = None) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Creates a browser profile by launching a browser for interactive user setup
|
||||||
|
and waits until the user closes it. The profile is stored in a directory that
|
||||||
|
can be used later with BrowserConfig.user_data_dir.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
profile_name (str, optional): Name for the profile directory.
|
||||||
|
If None, a name is generated based on timestamp.
|
||||||
|
browser_config (BrowserConfig, optional): Configuration for the browser.
|
||||||
|
If None, a default configuration is used with headless=False.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Path to the created profile directory, or None if creation failed
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
|
||||||
|
# Create a profile interactively
|
||||||
|
profile_path = await profiler.create_profile(
|
||||||
|
profile_name="my-login-profile"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the profile in a crawler
|
||||||
|
browser_config = BrowserConfig(
|
||||||
|
headless=True,
|
||||||
|
use_managed_browser=True,
|
||||||
|
user_data_dir=profile_path
|
||||||
|
)
|
||||||
|
|
||||||
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
|
# The crawler will now use your profile with all your cookies and login state
|
||||||
|
result = await crawler.arun("https://example.com/dashboard")
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
# Create default browser config if none provided
|
||||||
|
if browser_config is None:
|
||||||
|
from .async_configs import BrowserConfig
|
||||||
|
browser_config = BrowserConfig(
|
||||||
|
browser_type="chromium",
|
||||||
|
headless=False, # Must be visible for user interaction
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Ensure headless is False for user interaction
|
||||||
|
browser_config.headless = False
|
||||||
|
|
||||||
|
# Generate profile name if not provided
|
||||||
|
if not profile_name:
|
||||||
|
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
profile_name = f"profile_{timestamp}_{uuid.uuid4().hex[:6]}"
|
||||||
|
|
||||||
|
# Sanitize profile name (replace spaces and special chars)
|
||||||
|
profile_name = "".join(c if c.isalnum() or c in "-_" else "_" for c in profile_name)
|
||||||
|
|
||||||
|
# Set user data directory
|
||||||
|
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||||
|
os.makedirs(profile_path, exist_ok=True)
|
||||||
|
|
||||||
|
# Print instructions for the user with colorama formatting
|
||||||
|
border = f"{Fore.CYAN}{'='*80}{Style.RESET_ALL}"
|
||||||
|
self.logger.info(f"\n{border}", tag="PROFILE")
|
||||||
|
self.logger.info(f"Creating browser profile: {Fore.GREEN}{profile_name}{Style.RESET_ALL}", tag="PROFILE")
|
||||||
|
self.logger.info(f"Profile directory: {Fore.YELLOW}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||||
|
|
||||||
|
self.logger.info("\nInstructions:", tag="PROFILE")
|
||||||
|
self.logger.info("1. A browser window will open for you to set up your profile.", tag="PROFILE")
|
||||||
|
self.logger.info(f"2. {Fore.CYAN}Log in to websites{Style.RESET_ALL}, configure settings, etc. as needed.", tag="PROFILE")
|
||||||
|
self.logger.info(f"3. When you're done, {Fore.YELLOW}press 'q' in this terminal{Style.RESET_ALL} to close the browser.", tag="PROFILE")
|
||||||
|
self.logger.info("4. The profile will be saved and ready to use with Crawl4AI.", tag="PROFILE")
|
||||||
|
self.logger.info(f"{border}\n", tag="PROFILE")
|
||||||
|
|
||||||
|
# Create managed browser instance
|
||||||
|
managed_browser = ManagedBrowser(
|
||||||
|
browser_type=browser_config.browser_type,
|
||||||
|
user_data_dir=profile_path,
|
||||||
|
headless=False, # Must be visible
|
||||||
|
logger=self.logger,
|
||||||
|
debugging_port=browser_config.debugging_port
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set up signal handlers to ensure cleanup on interrupt
|
||||||
|
original_sigint = signal.getsignal(signal.SIGINT)
|
||||||
|
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||||
|
|
||||||
|
# Define cleanup handler for signals
|
||||||
|
async def cleanup_handler(sig, frame):
|
||||||
|
self.logger.warning("\nCleaning up browser process...", tag="PROFILE")
|
||||||
|
await managed_browser.cleanup()
|
||||||
|
# Restore original signal handlers
|
||||||
|
signal.signal(signal.SIGINT, original_sigint)
|
||||||
|
signal.signal(signal.SIGTERM, original_sigterm)
|
||||||
|
if sig == signal.SIGINT:
|
||||||
|
self.logger.error("Profile creation interrupted. Profile may be incomplete.", tag="PROFILE")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Set signal handlers
|
||||||
|
def sigint_handler(sig, frame):
|
||||||
|
asyncio.create_task(cleanup_handler(sig, frame))
|
||||||
|
|
||||||
|
signal.signal(signal.SIGINT, sigint_handler)
|
||||||
|
signal.signal(signal.SIGTERM, sigint_handler)
|
||||||
|
|
||||||
|
# Event to signal when user is done with the browser
|
||||||
|
user_done_event = asyncio.Event()
|
||||||
|
|
||||||
|
# Run keyboard input loop in a separate task
|
||||||
|
async def listen_for_quit_command():
|
||||||
|
import termios
|
||||||
|
import tty
|
||||||
|
import select
|
||||||
|
|
||||||
|
# First output the prompt
|
||||||
|
self.logger.info(f"{Fore.CYAN}Press '{Fore.WHITE}q{Fore.CYAN}' when you've finished using the browser...{Style.RESET_ALL}", tag="PROFILE")
|
||||||
|
|
||||||
|
# Save original terminal settings
|
||||||
|
fd = sys.stdin.fileno()
|
||||||
|
old_settings = termios.tcgetattr(fd)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Switch to non-canonical mode (no line buffering)
|
||||||
|
tty.setcbreak(fd)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# Check if input is available (non-blocking)
|
||||||
|
readable, _, _ = select.select([sys.stdin], [], [], 0.5)
|
||||||
|
if readable:
|
||||||
|
key = sys.stdin.read(1)
|
||||||
|
if key.lower() == 'q':
|
||||||
|
self.logger.info(f"{Fore.GREEN}Closing browser and saving profile...{Style.RESET_ALL}", tag="PROFILE")
|
||||||
|
user_done_event.set()
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check if the browser process has already exited
|
||||||
|
if managed_browser.browser_process and managed_browser.browser_process.poll() is not None:
|
||||||
|
self.logger.info("Browser already closed. Ending input listener.", tag="PROFILE")
|
||||||
|
user_done_event.set()
|
||||||
|
return
|
||||||
|
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Restore terminal settings
|
||||||
|
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Start the browser
|
||||||
|
await managed_browser.start()
|
||||||
|
|
||||||
|
# Check if browser started successfully
|
||||||
|
browser_process = managed_browser.browser_process
|
||||||
|
if not browser_process:
|
||||||
|
self.logger.error("Failed to start browser process.", tag="PROFILE")
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.logger.info(f"Browser launched. {Fore.CYAN}Waiting for you to finish...{Style.RESET_ALL}", tag="PROFILE")
|
||||||
|
|
||||||
|
# Start listening for keyboard input
|
||||||
|
listener_task = asyncio.create_task(listen_for_quit_command())
|
||||||
|
|
||||||
|
# Wait for either the user to press 'q' or for the browser process to exit naturally
|
||||||
|
while not user_done_event.is_set() and browser_process.poll() is None:
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
|
||||||
|
# Cancel the listener task if it's still running
|
||||||
|
if not listener_task.done():
|
||||||
|
listener_task.cancel()
|
||||||
|
try:
|
||||||
|
await listener_task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# If the browser is still running and the user pressed 'q', terminate it
|
||||||
|
if browser_process.poll() is None and user_done_event.is_set():
|
||||||
|
self.logger.info("Terminating browser process...", tag="PROFILE")
|
||||||
|
await managed_browser.cleanup()
|
||||||
|
|
||||||
|
self.logger.success(f"Browser closed. Profile saved at: {Fore.GREEN}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error creating profile: {str(e)}", tag="PROFILE")
|
||||||
|
await managed_browser.cleanup()
|
||||||
|
return None
|
||||||
|
finally:
|
||||||
|
# Restore original signal handlers
|
||||||
|
signal.signal(signal.SIGINT, original_sigint)
|
||||||
|
signal.signal(signal.SIGTERM, original_sigterm)
|
||||||
|
|
||||||
|
# Make sure browser is fully cleaned up
|
||||||
|
await managed_browser.cleanup()
|
||||||
|
|
||||||
|
# Return the profile path
|
||||||
|
return profile_path
|
||||||
|
|
||||||
|
def list_profiles(self) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Lists all available browser profiles in the Crawl4AI profiles directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: A list of dictionaries containing profile information:
|
||||||
|
[{"name": "profile_name", "path": "/path/to/profile", "created": datetime, "type": "chromium|firefox"}]
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
|
||||||
|
# List all available profiles
|
||||||
|
profiles = profiler.list_profiles()
|
||||||
|
|
||||||
|
for profile in profiles:
|
||||||
|
print(f"Profile: {profile['name']}")
|
||||||
|
print(f" Path: {profile['path']}")
|
||||||
|
print(f" Created: {profile['created']}")
|
||||||
|
print(f" Browser type: {profile['type']}")
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
if not os.path.exists(self.profiles_dir):
|
||||||
|
return []
|
||||||
|
|
||||||
|
profiles = []
|
||||||
|
|
||||||
|
for name in os.listdir(self.profiles_dir):
|
||||||
|
profile_path = os.path.join(self.profiles_dir, name)
|
||||||
|
|
||||||
|
# Skip if not a directory
|
||||||
|
if not os.path.isdir(profile_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if this looks like a valid browser profile
|
||||||
|
# For Chromium: Look for Preferences file
|
||||||
|
# For Firefox: Look for prefs.js file
|
||||||
|
is_valid = False
|
||||||
|
|
||||||
|
if os.path.exists(os.path.join(profile_path, "Preferences")) or \
|
||||||
|
os.path.exists(os.path.join(profile_path, "Default", "Preferences")):
|
||||||
|
is_valid = "chromium"
|
||||||
|
elif os.path.exists(os.path.join(profile_path, "prefs.js")):
|
||||||
|
is_valid = "firefox"
|
||||||
|
|
||||||
|
if is_valid:
|
||||||
|
# Get creation time
|
||||||
|
created = datetime.datetime.fromtimestamp(
|
||||||
|
os.path.getctime(profile_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
profiles.append({
|
||||||
|
"name": name,
|
||||||
|
"path": profile_path,
|
||||||
|
"created": created,
|
||||||
|
"type": is_valid
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort by creation time, newest first
|
||||||
|
profiles.sort(key=lambda x: x["created"], reverse=True)
|
||||||
|
|
||||||
|
return profiles
|
||||||
|
|
||||||
|
def get_profile_path(self, profile_name: str) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Get the full path to a profile by name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
profile_name (str): Name of the profile (not the full path)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Full path to the profile directory, or None if not found
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
|
||||||
|
path = profiler.get_profile_path("my-profile")
|
||||||
|
if path:
|
||||||
|
print(f"Profile path: {path}")
|
||||||
|
else:
|
||||||
|
print("Profile not found")
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||||
|
|
||||||
|
# Check if path exists and is a valid profile
|
||||||
|
if not os.path.isdir(profile_path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Look for profile indicators
|
||||||
|
is_profile = (
|
||||||
|
os.path.exists(os.path.join(profile_path, "Preferences")) or
|
||||||
|
os.path.exists(os.path.join(profile_path, "Default", "Preferences")) or
|
||||||
|
os.path.exists(os.path.join(profile_path, "prefs.js"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_profile:
|
||||||
|
return None # Not a valid browser profile
|
||||||
|
|
||||||
|
return profile_path
|
||||||
|
|
||||||
|
def delete_profile(self, profile_name_or_path: str) -> bool:
|
||||||
|
"""
|
||||||
|
Delete a browser profile by name or path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
profile_name_or_path (str): Name of the profile or full path to profile directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the profile was deleted successfully, False otherwise
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
|
||||||
|
# Delete by name
|
||||||
|
success = profiler.delete_profile("my-profile")
|
||||||
|
|
||||||
|
# Delete by path
|
||||||
|
success = profiler.delete_profile("/path/to/.crawl4ai/profiles/my-profile")
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
# Determine if input is a name or a path
|
||||||
|
if os.path.isabs(profile_name_or_path):
|
||||||
|
# Full path provided
|
||||||
|
profile_path = profile_name_or_path
|
||||||
|
else:
|
||||||
|
# Just a name provided, construct path
|
||||||
|
profile_path = os.path.join(self.profiles_dir, profile_name_or_path)
|
||||||
|
|
||||||
|
# Check if path exists and is a valid profile
|
||||||
|
if not os.path.isdir(profile_path):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Look for profile indicators
|
||||||
|
is_profile = (
|
||||||
|
os.path.exists(os.path.join(profile_path, "Preferences")) or
|
||||||
|
os.path.exists(os.path.join(profile_path, "Default", "Preferences")) or
|
||||||
|
os.path.exists(os.path.join(profile_path, "prefs.js"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_profile:
|
||||||
|
return False # Not a valid browser profile
|
||||||
|
|
||||||
|
# Delete the profile directory
|
||||||
|
try:
|
||||||
|
shutil.rmtree(profile_path)
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def interactive_manager(self, crawl_callback=None):
|
||||||
|
"""
|
||||||
|
Launch an interactive profile management console.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
crawl_callback (callable, optional): Function to call when selecting option to use
|
||||||
|
a profile for crawling. It will be called with (profile_path, url).
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
|
||||||
|
# Define a custom crawl function
|
||||||
|
async def my_crawl_function(profile_path, url):
|
||||||
|
print(f"Crawling {url} with profile {profile_path}")
|
||||||
|
# Implement your crawling logic here
|
||||||
|
|
||||||
|
# Start interactive manager
|
||||||
|
await profiler.interactive_manager(crawl_callback=my_crawl_function)
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
self.logger.info(f"\n{Fore.CYAN}Profile Management Options:{Style.RESET_ALL}", tag="MENU")
|
||||||
|
self.logger.info(f"1. {Fore.GREEN}Create a new profile{Style.RESET_ALL}", tag="MENU")
|
||||||
|
self.logger.info(f"2. {Fore.YELLOW}List available profiles{Style.RESET_ALL}", tag="MENU")
|
||||||
|
self.logger.info(f"3. {Fore.RED}Delete a profile{Style.RESET_ALL}", tag="MENU")
|
||||||
|
|
||||||
|
# Only show crawl option if callback provided
|
||||||
|
if crawl_callback:
|
||||||
|
self.logger.info(f"4. {Fore.CYAN}Use a profile to crawl a website{Style.RESET_ALL}", tag="MENU")
|
||||||
|
self.logger.info(f"5. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||||
|
exit_option = "5"
|
||||||
|
else:
|
||||||
|
self.logger.info(f"4. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||||
|
exit_option = "4"
|
||||||
|
|
||||||
|
choice = input(f"\n{Fore.CYAN}Enter your choice (1-{exit_option}): {Style.RESET_ALL}")
|
||||||
|
|
||||||
|
if choice == "1":
|
||||||
|
# Create new profile
|
||||||
|
name = input(f"{Fore.GREEN}Enter a name for the new profile (or press Enter for auto-generated name): {Style.RESET_ALL}")
|
||||||
|
await self.create_profile(name or None)
|
||||||
|
|
||||||
|
elif choice == "2":
|
||||||
|
# List profiles
|
||||||
|
profiles = self.list_profiles()
|
||||||
|
|
||||||
|
if not profiles:
|
||||||
|
self.logger.warning(" No profiles found. Create one first with option 1.", tag="PROFILES")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Print profile information with colorama formatting
|
||||||
|
self.logger.info("\nAvailable profiles:", tag="PROFILES")
|
||||||
|
for i, profile in enumerate(profiles):
|
||||||
|
self.logger.info(f"[{i+1}] {Fore.CYAN}{profile['name']}{Style.RESET_ALL}", tag="PROFILES")
|
||||||
|
self.logger.info(f" Path: {Fore.YELLOW}{profile['path']}{Style.RESET_ALL}", tag="PROFILES")
|
||||||
|
self.logger.info(f" Created: {profile['created'].strftime('%Y-%m-%d %H:%M:%S')}", tag="PROFILES")
|
||||||
|
self.logger.info(f" Browser type: {profile['type']}", tag="PROFILES")
|
||||||
|
self.logger.info("", tag="PROFILES") # Empty line for spacing
|
||||||
|
|
||||||
|
elif choice == "3":
|
||||||
|
# Delete profile
|
||||||
|
profiles = self.list_profiles()
|
||||||
|
if not profiles:
|
||||||
|
self.logger.warning("No profiles found to delete", tag="PROFILES")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Display numbered list
|
||||||
|
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||||
|
for i, profile in enumerate(profiles):
|
||||||
|
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||||
|
|
||||||
|
# Get profile to delete
|
||||||
|
profile_idx = input(f"{Fore.RED}Enter the number of the profile to delete (or 'c' to cancel): {Style.RESET_ALL}")
|
||||||
|
if profile_idx.lower() == 'c':
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
idx = int(profile_idx) - 1
|
||||||
|
if 0 <= idx < len(profiles):
|
||||||
|
profile_name = profiles[idx]["name"]
|
||||||
|
self.logger.info(f"Deleting profile: {Fore.YELLOW}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||||
|
|
||||||
|
# Confirm deletion
|
||||||
|
confirm = input(f"{Fore.RED}Are you sure you want to delete this profile? (y/n): {Style.RESET_ALL}")
|
||||||
|
if confirm.lower() == 'y':
|
||||||
|
success = self.delete_profile(profiles[idx]["path"])
|
||||||
|
|
||||||
|
if success:
|
||||||
|
self.logger.success(f"Profile {Fore.GREEN}{profile_name}{Style.RESET_ALL} deleted successfully", tag="PROFILES")
|
||||||
|
else:
|
||||||
|
self.logger.error(f"Failed to delete profile {Fore.RED}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||||
|
else:
|
||||||
|
self.logger.error("Invalid profile number", tag="PROFILES")
|
||||||
|
except ValueError:
|
||||||
|
self.logger.error("Please enter a valid number", tag="PROFILES")
|
||||||
|
|
||||||
|
elif choice == "4" and crawl_callback:
|
||||||
|
# Use profile to crawl a site
|
||||||
|
profiles = self.list_profiles()
|
||||||
|
if not profiles:
|
||||||
|
self.logger.warning("No profiles found. Create one first.", tag="PROFILES")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Display numbered list
|
||||||
|
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||||
|
for i, profile in enumerate(profiles):
|
||||||
|
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||||
|
|
||||||
|
# Get profile to use
|
||||||
|
profile_idx = input(f"{Fore.CYAN}Enter the number of the profile to use (or 'c' to cancel): {Style.RESET_ALL}")
|
||||||
|
if profile_idx.lower() == 'c':
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
idx = int(profile_idx) - 1
|
||||||
|
if 0 <= idx < len(profiles):
|
||||||
|
profile_path = profiles[idx]["path"]
|
||||||
|
url = input(f"{Fore.CYAN}Enter the URL to crawl: {Style.RESET_ALL}")
|
||||||
|
if url:
|
||||||
|
# Call the provided crawl callback
|
||||||
|
await crawl_callback(profile_path, url)
|
||||||
|
else:
|
||||||
|
self.logger.error("No URL provided", tag="CRAWL")
|
||||||
|
else:
|
||||||
|
self.logger.error("Invalid profile number", tag="PROFILES")
|
||||||
|
except ValueError:
|
||||||
|
self.logger.error("Please enter a valid number", tag="PROFILES")
|
||||||
|
|
||||||
|
elif choice == exit_option:
|
||||||
|
# Exit
|
||||||
|
self.logger.info("Exiting profile management", tag="MENU")
|
||||||
|
break
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.logger.error(f"Invalid choice. Please enter a number between 1 and {exit_option}.", tag="MENU")
|
||||||
@@ -12,6 +12,7 @@ class CacheMode(Enum):
|
|||||||
- WRITE_ONLY: Only write to cache, don't read
|
- WRITE_ONLY: Only write to cache, don't read
|
||||||
- BYPASS: Bypass cache for this operation
|
- BYPASS: Bypass cache for this operation
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ENABLED = "enabled"
|
ENABLED = "enabled"
|
||||||
DISABLED = "disabled"
|
DISABLED = "disabled"
|
||||||
READ_ONLY = "read_only"
|
READ_ONLY = "read_only"
|
||||||
@@ -36,6 +37,7 @@ class CacheContext:
|
|||||||
is_raw_html (bool): True if the URL is raw HTML, False otherwise.
|
is_raw_html (bool): True if the URL is raw HTML, False otherwise.
|
||||||
_url_display (str): The display name for the URL (web, local file, or raw HTML).
|
_url_display (str): The display name for the URL (web, local file, or raw HTML).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, url: str, cache_mode: CacheMode, always_bypass: bool = False):
|
def __init__(self, url: str, cache_mode: CacheMode, always_bypass: bool = False):
|
||||||
"""
|
"""
|
||||||
Initializes the CacheContext with the provided URL and cache mode.
|
Initializes the CacheContext with the provided URL and cache mode.
|
||||||
@@ -48,8 +50,8 @@ class CacheContext:
|
|||||||
self.url = url
|
self.url = url
|
||||||
self.cache_mode = cache_mode
|
self.cache_mode = cache_mode
|
||||||
self.always_bypass = always_bypass
|
self.always_bypass = always_bypass
|
||||||
self.is_cacheable = url.startswith(('http://', 'https://', 'file://'))
|
self.is_cacheable = url.startswith(("http://", "https://", "file://"))
|
||||||
self.is_web_url = url.startswith(('http://', 'https://'))
|
self.is_web_url = url.startswith(("http://", "https://"))
|
||||||
self.is_local_file = url.startswith("file://")
|
self.is_local_file = url.startswith("file://")
|
||||||
self.is_raw_html = url.startswith("raw:")
|
self.is_raw_html = url.startswith("raw:")
|
||||||
self._url_display = url if not self.is_raw_html else "Raw HTML"
|
self._url_display = url if not self.is_raw_html else "Raw HTML"
|
||||||
@@ -94,7 +96,7 @@ def _legacy_to_cache_mode(
|
|||||||
disable_cache: bool = False,
|
disable_cache: bool = False,
|
||||||
bypass_cache: bool = False,
|
bypass_cache: bool = False,
|
||||||
no_cache_read: bool = False,
|
no_cache_read: bool = False,
|
||||||
no_cache_write: bool = False
|
no_cache_write: bool = False,
|
||||||
) -> CacheMode:
|
) -> CacheMode:
|
||||||
"""
|
"""
|
||||||
Converts legacy cache parameters to the new CacheMode enum.
|
Converts legacy cache parameters to the new CacheMode enum.
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ import re
|
|||||||
from collections import Counter
|
from collections import Counter
|
||||||
import string
|
import string
|
||||||
from .model_loader import load_nltk_punkt
|
from .model_loader import load_nltk_punkt
|
||||||
from .utils import *
|
|
||||||
|
|
||||||
# Define the abstract base class for chunking strategies
|
# Define the abstract base class for chunking strategies
|
||||||
class ChunkingStrategy(ABC):
|
class ChunkingStrategy(ABC):
|
||||||
@@ -24,19 +23,23 @@ class ChunkingStrategy(ABC):
|
|||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# Create an identity chunking strategy f(x) = [x]
|
# Create an identity chunking strategy f(x) = [x]
|
||||||
class IdentityChunking(ChunkingStrategy):
|
class IdentityChunking(ChunkingStrategy):
|
||||||
"""
|
"""
|
||||||
Chunking strategy that returns the input text as a single chunk.
|
Chunking strategy that returns the input text as a single chunk.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def chunk(self, text: str) -> list:
|
def chunk(self, text: str) -> list:
|
||||||
return [text]
|
return [text]
|
||||||
|
|
||||||
|
|
||||||
# Regex-based chunking
|
# Regex-based chunking
|
||||||
class RegexChunking(ChunkingStrategy):
|
class RegexChunking(ChunkingStrategy):
|
||||||
"""
|
"""
|
||||||
Chunking strategy that splits text based on regular expression patterns.
|
Chunking strategy that splits text based on regular expression patterns.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, patterns=None, **kwargs):
|
def __init__(self, patterns=None, **kwargs):
|
||||||
"""
|
"""
|
||||||
Initialize the RegexChunking object.
|
Initialize the RegexChunking object.
|
||||||
@@ -45,7 +48,7 @@ class RegexChunking(ChunkingStrategy):
|
|||||||
patterns (list): A list of regular expression patterns to split text.
|
patterns (list): A list of regular expression patterns to split text.
|
||||||
"""
|
"""
|
||||||
if patterns is None:
|
if patterns is None:
|
||||||
patterns = [r'\n\n'] # Default split pattern
|
patterns = [r"\n\n"] # Default split pattern
|
||||||
self.patterns = patterns
|
self.patterns = patterns
|
||||||
|
|
||||||
def chunk(self, text: str) -> list:
|
def chunk(self, text: str) -> list:
|
||||||
@@ -57,18 +60,20 @@ class RegexChunking(ChunkingStrategy):
|
|||||||
paragraphs = new_paragraphs
|
paragraphs = new_paragraphs
|
||||||
return paragraphs
|
return paragraphs
|
||||||
|
|
||||||
|
|
||||||
# NLP-based sentence chunking
|
# NLP-based sentence chunking
|
||||||
class NlpSentenceChunking(ChunkingStrategy):
|
class NlpSentenceChunking(ChunkingStrategy):
|
||||||
"""
|
"""
|
||||||
Chunking strategy that splits text into sentences using NLTK's sentence tokenizer.
|
Chunking strategy that splits text into sentences using NLTK's sentence tokenizer.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
Initialize the NlpSentenceChunking object.
|
Initialize the NlpSentenceChunking object.
|
||||||
"""
|
"""
|
||||||
|
from crawl4ai.le.legacy.model_loader import load_nltk_punkt
|
||||||
load_nltk_punkt()
|
load_nltk_punkt()
|
||||||
|
|
||||||
|
|
||||||
def chunk(self, text: str) -> list:
|
def chunk(self, text: str) -> list:
|
||||||
# Improved regex for sentence splitting
|
# Improved regex for sentence splitting
|
||||||
# sentence_endings = re.compile(
|
# sentence_endings = re.compile(
|
||||||
@@ -77,11 +82,13 @@ class NlpSentenceChunking(ChunkingStrategy):
|
|||||||
# sentences = sentence_endings.split(text)
|
# sentences = sentence_endings.split(text)
|
||||||
# sens = [sent.strip() for sent in sentences if sent]
|
# sens = [sent.strip() for sent in sentences if sent]
|
||||||
from nltk.tokenize import sent_tokenize
|
from nltk.tokenize import sent_tokenize
|
||||||
|
|
||||||
sentences = sent_tokenize(text)
|
sentences = sent_tokenize(text)
|
||||||
sens = [sent.strip() for sent in sentences]
|
sens = [sent.strip() for sent in sentences]
|
||||||
|
|
||||||
return list(set(sens))
|
return list(set(sens))
|
||||||
|
|
||||||
|
|
||||||
# Topic-based segmentation using TextTiling
|
# Topic-based segmentation using TextTiling
|
||||||
class TopicSegmentationChunking(ChunkingStrategy):
|
class TopicSegmentationChunking(ChunkingStrategy):
|
||||||
"""
|
"""
|
||||||
@@ -100,6 +107,7 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
|||||||
num_keywords (int): The number of keywords to extract for each topic segment.
|
num_keywords (int): The number of keywords to extract for each topic segment.
|
||||||
"""
|
"""
|
||||||
import nltk as nl
|
import nltk as nl
|
||||||
|
|
||||||
self.tokenizer = nl.tokenize.TextTilingTokenizer()
|
self.tokenizer = nl.tokenize.TextTilingTokenizer()
|
||||||
self.num_keywords = num_keywords
|
self.num_keywords = num_keywords
|
||||||
|
|
||||||
@@ -111,8 +119,14 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
|||||||
def extract_keywords(self, text: str) -> list:
|
def extract_keywords(self, text: str) -> list:
|
||||||
# Tokenize and remove stopwords and punctuation
|
# Tokenize and remove stopwords and punctuation
|
||||||
import nltk as nl
|
import nltk as nl
|
||||||
|
|
||||||
tokens = nl.toknize.word_tokenize(text)
|
tokens = nl.toknize.word_tokenize(text)
|
||||||
tokens = [token.lower() for token in tokens if token not in nl.corpus.stopwords.words('english') and token not in string.punctuation]
|
tokens = [
|
||||||
|
token.lower()
|
||||||
|
for token in tokens
|
||||||
|
if token not in nl.corpus.stopwords.words("english")
|
||||||
|
and token not in string.punctuation
|
||||||
|
]
|
||||||
|
|
||||||
# Calculate frequency distribution
|
# Calculate frequency distribution
|
||||||
freq_dist = Counter(tokens)
|
freq_dist = Counter(tokens)
|
||||||
@@ -123,9 +137,12 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
|||||||
# Segment the text into topics
|
# Segment the text into topics
|
||||||
segments = self.chunk(text)
|
segments = self.chunk(text)
|
||||||
# Extract keywords for each topic segment
|
# Extract keywords for each topic segment
|
||||||
segments_with_topics = [(segment, self.extract_keywords(segment)) for segment in segments]
|
segments_with_topics = [
|
||||||
|
(segment, self.extract_keywords(segment)) for segment in segments
|
||||||
|
]
|
||||||
return segments_with_topics
|
return segments_with_topics
|
||||||
|
|
||||||
|
|
||||||
# Fixed-length word chunks
|
# Fixed-length word chunks
|
||||||
class FixedLengthWordChunking(ChunkingStrategy):
|
class FixedLengthWordChunking(ChunkingStrategy):
|
||||||
"""
|
"""
|
||||||
@@ -136,6 +153,7 @@ class FixedLengthWordChunking(ChunkingStrategy):
|
|||||||
2. Create chunks of fixed length
|
2. Create chunks of fixed length
|
||||||
3. Return the list of chunks
|
3. Return the list of chunks
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, chunk_size=100, **kwargs):
|
def __init__(self, chunk_size=100, **kwargs):
|
||||||
"""
|
"""
|
||||||
Initialize the fixed-length word chunking strategy with the given chunk size.
|
Initialize the fixed-length word chunking strategy with the given chunk size.
|
||||||
@@ -147,7 +165,11 @@ class FixedLengthWordChunking(ChunkingStrategy):
|
|||||||
|
|
||||||
def chunk(self, text: str) -> list:
|
def chunk(self, text: str) -> list:
|
||||||
words = text.split()
|
words = text.split()
|
||||||
return [' '.join(words[i:i + self.chunk_size]) for i in range(0, len(words), self.chunk_size)]
|
return [
|
||||||
|
" ".join(words[i : i + self.chunk_size])
|
||||||
|
for i in range(0, len(words), self.chunk_size)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
# Sliding window chunking
|
# Sliding window chunking
|
||||||
class SlidingWindowChunking(ChunkingStrategy):
|
class SlidingWindowChunking(ChunkingStrategy):
|
||||||
@@ -159,6 +181,7 @@ class SlidingWindowChunking(ChunkingStrategy):
|
|||||||
2. Create chunks of fixed length
|
2. Create chunks of fixed length
|
||||||
3. Return the list of chunks
|
3. Return the list of chunks
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, window_size=100, step=50, **kwargs):
|
def __init__(self, window_size=100, step=50, **kwargs):
|
||||||
"""
|
"""
|
||||||
Initialize the sliding window chunking strategy with the given window size and
|
Initialize the sliding window chunking strategy with the given window size and
|
||||||
@@ -179,15 +202,16 @@ class SlidingWindowChunking(ChunkingStrategy):
|
|||||||
return [text]
|
return [text]
|
||||||
|
|
||||||
for i in range(0, len(words) - self.window_size + 1, self.step):
|
for i in range(0, len(words) - self.window_size + 1, self.step):
|
||||||
chunk = ' '.join(words[i:i + self.window_size])
|
chunk = " ".join(words[i : i + self.window_size])
|
||||||
chunks.append(chunk)
|
chunks.append(chunk)
|
||||||
|
|
||||||
# Handle the last chunk if it doesn't align perfectly
|
# Handle the last chunk if it doesn't align perfectly
|
||||||
if i + self.window_size < len(words):
|
if i + self.window_size < len(words):
|
||||||
chunks.append(' '.join(words[-self.window_size:]))
|
chunks.append(" ".join(words[-self.window_size :]))
|
||||||
|
|
||||||
return chunks
|
return chunks
|
||||||
|
|
||||||
|
|
||||||
class OverlappingWindowChunking(ChunkingStrategy):
|
class OverlappingWindowChunking(ChunkingStrategy):
|
||||||
"""
|
"""
|
||||||
Chunking strategy that splits text into overlapping word chunks.
|
Chunking strategy that splits text into overlapping word chunks.
|
||||||
@@ -198,6 +222,7 @@ class OverlappingWindowChunking(ChunkingStrategy):
|
|||||||
3. Slide the window by the overlap size
|
3. Slide the window by the overlap size
|
||||||
4. Return the list of chunks
|
4. Return the list of chunks
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, window_size=1000, overlap=100, **kwargs):
|
def __init__(self, window_size=1000, overlap=100, **kwargs):
|
||||||
"""
|
"""
|
||||||
Initialize the overlapping window chunking strategy with the given window size and
|
Initialize the overlapping window chunking strategy with the given window size and
|
||||||
@@ -220,7 +245,7 @@ class OverlappingWindowChunking(ChunkingStrategy):
|
|||||||
start = 0
|
start = 0
|
||||||
while start < len(words):
|
while start < len(words):
|
||||||
end = start + self.window_size
|
end = start + self.window_size
|
||||||
chunk = ' '.join(words[start:end])
|
chunk = " ".join(words[start:end])
|
||||||
chunks.append(chunk)
|
chunks.append(chunk)
|
||||||
|
|
||||||
if end >= len(words):
|
if end >= len(words):
|
||||||
|
|||||||
841
crawl4ai/cli.py
841
crawl4ai/cli.py
@@ -1,105 +1,776 @@
|
|||||||
import click
|
import click
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
import sys
|
import sys
|
||||||
import asyncio
|
import shutil
|
||||||
from typing import List
|
import humanize
|
||||||
from .docs_manager import DocsManager
|
from typing import Dict, Any, Optional, List
|
||||||
from .async_logger import AsyncLogger
|
import json
|
||||||
|
import yaml
|
||||||
|
import anyio
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.table import Table
|
||||||
|
from rich.panel import Panel
|
||||||
|
from rich.prompt import Prompt, Confirm
|
||||||
|
from rich.style import Style
|
||||||
|
|
||||||
logger = AsyncLogger(verbose=True)
|
from crawl4ai import (
|
||||||
docs_manager = DocsManager(logger)
|
CacheMode,
|
||||||
|
AsyncWebCrawler,
|
||||||
|
CrawlResult,
|
||||||
|
BrowserConfig,
|
||||||
|
CrawlerRunConfig,
|
||||||
|
LLMExtractionStrategy,
|
||||||
|
JsonCssExtractionStrategy,
|
||||||
|
JsonXPathExtractionStrategy,
|
||||||
|
BM25ContentFilter,
|
||||||
|
PruningContentFilter,
|
||||||
|
BrowserProfiler
|
||||||
|
)
|
||||||
|
from litellm import completion
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
|
from crawl4ai.async_configs import LlmConfig
|
||||||
"""Print formatted table with headers and rows"""
|
|
||||||
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
|
|
||||||
border = '+' + '+'.join('-' * (w + 2 * padding) for w in widths) + '+'
|
|
||||||
|
|
||||||
def format_row(row):
|
# Initialize rich console
|
||||||
return '|' + '|'.join(f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
|
console = Console()
|
||||||
for cell, w in zip(row, widths)) + '|'
|
|
||||||
|
|
||||||
click.echo(border)
|
def get_global_config() -> dict:
|
||||||
click.echo(format_row(headers))
|
config_dir = Path.home() / ".crawl4ai"
|
||||||
click.echo(border)
|
config_file = config_dir / "global.yml"
|
||||||
for row in rows:
|
|
||||||
click.echo(format_row(row))
|
|
||||||
click.echo(border)
|
|
||||||
|
|
||||||
@click.group()
|
if not config_file.exists():
|
||||||
|
config_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
with open(config_file) as f:
|
||||||
|
return yaml.safe_load(f) or {}
|
||||||
|
|
||||||
|
def save_global_config(config: dict):
|
||||||
|
config_file = Path.home() / ".crawl4ai" / "global.yml"
|
||||||
|
with open(config_file, "w") as f:
|
||||||
|
yaml.dump(config, f)
|
||||||
|
|
||||||
|
def setup_llm_config() -> tuple[str, str]:
|
||||||
|
config = get_global_config()
|
||||||
|
provider = config.get("DEFAULT_LLM_PROVIDER")
|
||||||
|
token = config.get("DEFAULT_LLM_PROVIDER_TOKEN")
|
||||||
|
|
||||||
|
if not provider:
|
||||||
|
click.echo("\nNo default LLM provider configured.")
|
||||||
|
click.echo("Provider format: 'company/model' (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')")
|
||||||
|
click.echo("See available providers at: https://docs.litellm.ai/docs/providers")
|
||||||
|
provider = click.prompt("Enter provider")
|
||||||
|
|
||||||
|
if not provider.startswith("ollama/"):
|
||||||
|
if not token:
|
||||||
|
token = click.prompt("Enter API token for " + provider, hide_input=True)
|
||||||
|
else:
|
||||||
|
token = "no-token"
|
||||||
|
|
||||||
|
if not config.get("DEFAULT_LLM_PROVIDER") or not config.get("DEFAULT_LLM_PROVIDER_TOKEN"):
|
||||||
|
config["DEFAULT_LLM_PROVIDER"] = provider
|
||||||
|
config["DEFAULT_LLM_PROVIDER_TOKEN"] = token
|
||||||
|
save_global_config(config)
|
||||||
|
click.echo("\nConfiguration saved to ~/.crawl4ai/global.yml")
|
||||||
|
|
||||||
|
return provider, token
|
||||||
|
|
||||||
|
async def stream_llm_response(url: str, markdown: str, query: str, provider: str, token: str):
|
||||||
|
response = completion(
|
||||||
|
model=provider,
|
||||||
|
api_key=token,
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"content": f"You are Crawl4ai assistant, answering user question based on the provided context which is crawled from {url}.",
|
||||||
|
"role": "system"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"content": f"<|start of context|>\n{markdown}\n<|end of context|>\n\n{query}",
|
||||||
|
"role": "user"
|
||||||
|
},
|
||||||
|
],
|
||||||
|
stream=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
for chunk in response:
|
||||||
|
if content := chunk["choices"][0]["delta"].get("content"):
|
||||||
|
print(content, end="", flush=True)
|
||||||
|
print() # New line at end
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def parse_key_values(ctx, param, value) -> Dict[str, Any]:
|
||||||
|
if not value:
|
||||||
|
return {}
|
||||||
|
result = {}
|
||||||
|
pairs = value.split(',')
|
||||||
|
for pair in pairs:
|
||||||
|
try:
|
||||||
|
k, v = pair.split('=', 1)
|
||||||
|
# Handle common value types
|
||||||
|
if v.lower() == 'true': v = True
|
||||||
|
elif v.lower() == 'false': v = False
|
||||||
|
elif v.isdigit(): v = int(v)
|
||||||
|
elif v.replace('.','',1).isdigit(): v = float(v)
|
||||||
|
elif v.startswith('[') and v.endswith(']'):
|
||||||
|
v = [x.strip() for x in v[1:-1].split(',') if x.strip()]
|
||||||
|
elif v.startswith('{') and v.endswith('}'):
|
||||||
|
try:
|
||||||
|
v = json.loads(v)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
raise click.BadParameter(f'Invalid JSON object: {v}')
|
||||||
|
result[k.strip()] = v
|
||||||
|
except ValueError:
|
||||||
|
raise click.BadParameter(f'Invalid key=value pair: {pair}')
|
||||||
|
return result
|
||||||
|
|
||||||
|
def load_config_file(path: Optional[str]) -> dict:
|
||||||
|
if not path:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(path) as f:
|
||||||
|
if path.endswith((".yaml", ".yml")):
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
return json.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.BadParameter(f'Error loading config file {path}: {str(e)}')
|
||||||
|
|
||||||
|
def load_schema_file(path: Optional[str]) -> dict:
|
||||||
|
if not path:
|
||||||
|
return None
|
||||||
|
return load_config_file(path)
|
||||||
|
|
||||||
|
async def run_crawler(url: str, browser_cfg: BrowserConfig, crawler_cfg: CrawlerRunConfig, verbose: bool):
|
||||||
|
if verbose:
|
||||||
|
click.echo("Starting crawler with configurations:")
|
||||||
|
click.echo(f"Browser config: {browser_cfg.dump()}")
|
||||||
|
click.echo(f"Crawler config: {crawler_cfg.dump()}")
|
||||||
|
|
||||||
|
async with AsyncWebCrawler(config=browser_cfg) as crawler:
|
||||||
|
try:
|
||||||
|
result = await crawler.arun(url=url, config=crawler_cfg)
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Crawling failed: {str(e)}")
|
||||||
|
|
||||||
|
def show_examples():
|
||||||
|
examples = """
|
||||||
|
🚀 Crawl4AI CLI Examples
|
||||||
|
|
||||||
|
1️⃣ Basic Usage:
|
||||||
|
# Simple crawl with default settings
|
||||||
|
crwl https://example.com
|
||||||
|
|
||||||
|
# Get markdown output
|
||||||
|
crwl https://example.com -o markdown
|
||||||
|
|
||||||
|
# Verbose JSON output with cache bypass
|
||||||
|
crwl https://example.com -o json -v --bypass-cache
|
||||||
|
|
||||||
|
2️⃣ Using Config Files:
|
||||||
|
# Using browser and crawler configs
|
||||||
|
crwl https://example.com -B browser.yml -C crawler.yml
|
||||||
|
|
||||||
|
# CSS-based extraction
|
||||||
|
crwl https://example.com -e extract_css.yml -s css_schema.json -o json
|
||||||
|
|
||||||
|
# LLM-based extraction
|
||||||
|
crwl https://example.com -e extract_llm.yml -s llm_schema.json -o json
|
||||||
|
|
||||||
|
3️⃣ Direct Parameters:
|
||||||
|
# Browser settings
|
||||||
|
crwl https://example.com -b "headless=true,viewport_width=1280,user_agent_mode=random"
|
||||||
|
|
||||||
|
# Crawler settings
|
||||||
|
crwl https://example.com -c "css_selector=#main,delay_before_return_html=2,scan_full_page=true"
|
||||||
|
|
||||||
|
4️⃣ Profile Management for Identity-Based Crawling:
|
||||||
|
# Launch interactive profile manager
|
||||||
|
crwl profiles
|
||||||
|
|
||||||
|
# Create, list, and delete browser profiles for identity-based crawling
|
||||||
|
# Use a profile for crawling (keeps you logged in)
|
||||||
|
crwl https://example.com -p my-profile-name
|
||||||
|
|
||||||
|
# Example: Crawl a site that requires login
|
||||||
|
# 1. First create a profile and log in:
|
||||||
|
crwl profiles
|
||||||
|
# 2. Then use that profile to crawl the authenticated site:
|
||||||
|
crwl https://site-requiring-login.com/dashboard -p my-profile-name
|
||||||
|
|
||||||
|
5️⃣ Sample Config Files:
|
||||||
|
|
||||||
|
browser.yml:
|
||||||
|
headless: true
|
||||||
|
viewport_width: 1280
|
||||||
|
user_agent_mode: "random"
|
||||||
|
verbose: true
|
||||||
|
ignore_https_errors: true
|
||||||
|
|
||||||
|
extract_css.yml:
|
||||||
|
type: "json-css"
|
||||||
|
params:
|
||||||
|
verbose: true
|
||||||
|
|
||||||
|
css_schema.json:
|
||||||
|
{
|
||||||
|
"name": "ArticleExtractor",
|
||||||
|
"baseSelector": ".article",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": "h1.title",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "link",
|
||||||
|
"selector": "a.read-more",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "href"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
extract_llm.yml:
|
||||||
|
type: "llm"
|
||||||
|
provider: "openai/gpt-4"
|
||||||
|
instruction: "Extract all articles with their titles and links"
|
||||||
|
api_token: "your-token"
|
||||||
|
params:
|
||||||
|
temperature: 0.3
|
||||||
|
max_tokens: 1000
|
||||||
|
|
||||||
|
llm_schema.json:
|
||||||
|
{
|
||||||
|
"title": "Article",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"title": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The title of the article"
|
||||||
|
},
|
||||||
|
"link": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "URL to the full article"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
6️⃣ Advanced Usage:
|
||||||
|
# Combine configs with direct parameters
|
||||||
|
crwl https://example.com -B browser.yml -b "headless=false,viewport_width=1920"
|
||||||
|
|
||||||
|
# Full extraction pipeline
|
||||||
|
crwl https://example.com \\
|
||||||
|
-B browser.yml \\
|
||||||
|
-C crawler.yml \\
|
||||||
|
-e extract_llm.yml \\
|
||||||
|
-s llm_schema.json \\
|
||||||
|
-o json \\
|
||||||
|
-v
|
||||||
|
|
||||||
|
# Content filtering with BM25
|
||||||
|
crwl https://example.com \\
|
||||||
|
-f filter_bm25.yml \\
|
||||||
|
-o markdown-fit
|
||||||
|
|
||||||
|
# Authenticated crawling with profile
|
||||||
|
crwl https://login-required-site.com \\
|
||||||
|
-p my-authenticated-profile \\
|
||||||
|
-c "css_selector=.dashboard-content" \\
|
||||||
|
-o markdown
|
||||||
|
|
||||||
|
For more documentation visit: https://github.com/unclecode/crawl4ai
|
||||||
|
|
||||||
|
7️⃣ Q&A with LLM:
|
||||||
|
# Ask a question about the content
|
||||||
|
crwl https://example.com -q "What is the main topic discussed?"
|
||||||
|
|
||||||
|
# First view content, then ask questions
|
||||||
|
crwl https://example.com -o markdown # See the crawled content first
|
||||||
|
crwl https://example.com -q "Summarize the key points"
|
||||||
|
crwl https://example.com -q "What are the conclusions?"
|
||||||
|
|
||||||
|
# Advanced crawling with Q&A
|
||||||
|
crwl https://example.com \\
|
||||||
|
-B browser.yml \\
|
||||||
|
-c "css_selector=article,scan_full_page=true" \\
|
||||||
|
-q "What are the pros and cons mentioned?"
|
||||||
|
|
||||||
|
Note: First time using -q will prompt for LLM provider and API token.
|
||||||
|
These will be saved in ~/.crawl4ai/global.yml for future use.
|
||||||
|
|
||||||
|
Supported provider format: 'company/model'
|
||||||
|
Examples:
|
||||||
|
- ollama/llama3.3
|
||||||
|
- openai/gpt-4
|
||||||
|
- anthropic/claude-3-sonnet
|
||||||
|
- cohere/command
|
||||||
|
- google/gemini-pro
|
||||||
|
|
||||||
|
See full list of providers: https://docs.litellm.ai/docs/providers
|
||||||
|
|
||||||
|
8️⃣ Profile Management:
|
||||||
|
# Launch interactive profile manager
|
||||||
|
crwl profiles
|
||||||
|
|
||||||
|
# Create a profile and use it for crawling
|
||||||
|
crwl profiles # Create and set up your profile interactively
|
||||||
|
crwl https://example.com -p my-profile-name # Use profile for crawling
|
||||||
|
|
||||||
|
# Example workflow for authenticated site
|
||||||
|
# 1. First create a profile and log in to the site:
|
||||||
|
crwl profiles # Select "Create new profile" option
|
||||||
|
# 2. Then use that profile to crawl authenticated content:
|
||||||
|
crwl https://site-requiring-login.com/dashboard -p my-profile-name
|
||||||
|
"""
|
||||||
|
click.echo(examples)
|
||||||
|
|
||||||
|
def get_directory_size(path: str) -> int:
|
||||||
|
"""Calculate the total size of a directory in bytes"""
|
||||||
|
total_size = 0
|
||||||
|
for dirpath, _, filenames in os.walk(path):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
total_size += os.path.getsize(fp)
|
||||||
|
return total_size
|
||||||
|
|
||||||
|
def display_profiles_table(profiles: List[Dict[str, Any]]):
|
||||||
|
"""Display a rich table of browser profiles"""
|
||||||
|
if not profiles:
|
||||||
|
console.print(Panel("[yellow]No profiles found. Create one with the 'create' command.[/yellow]",
|
||||||
|
title="Browser Profiles", border_style="blue"))
|
||||||
|
return
|
||||||
|
|
||||||
|
table = Table(title="Browser Profiles", show_header=True, header_style="bold cyan", border_style="blue")
|
||||||
|
table.add_column("#", style="dim", width=4)
|
||||||
|
table.add_column("Name", style="cyan", no_wrap=True)
|
||||||
|
table.add_column("Path", style="green")
|
||||||
|
table.add_column("Created", style="yellow")
|
||||||
|
table.add_column("Browser", style="magenta")
|
||||||
|
table.add_column("Size", style="blue", justify="right")
|
||||||
|
|
||||||
|
for i, profile in enumerate(profiles):
|
||||||
|
# Calculate folder size
|
||||||
|
size = get_directory_size(profile["path"])
|
||||||
|
human_size = humanize.naturalsize(size)
|
||||||
|
|
||||||
|
# Format creation date
|
||||||
|
created = profile["created"].strftime("%Y-%m-%d %H:%M")
|
||||||
|
|
||||||
|
# Add row to table
|
||||||
|
table.add_row(
|
||||||
|
str(i+1),
|
||||||
|
profile["name"],
|
||||||
|
profile["path"],
|
||||||
|
created,
|
||||||
|
profile["type"].capitalize(),
|
||||||
|
human_size
|
||||||
|
)
|
||||||
|
|
||||||
|
console.print(table)
|
||||||
|
|
||||||
|
async def create_profile_interactive(profiler: BrowserProfiler):
|
||||||
|
"""Interactive profile creation wizard"""
|
||||||
|
console.print(Panel("[bold cyan]Create Browser Profile[/bold cyan]\n"
|
||||||
|
"This will open a browser window for you to set up your identity.\n"
|
||||||
|
"Log in to sites, adjust settings, then press 'q' to save.",
|
||||||
|
border_style="cyan"))
|
||||||
|
|
||||||
|
profile_name = Prompt.ask("[cyan]Enter profile name[/cyan]", default=f"profile_{int(time.time())}")
|
||||||
|
|
||||||
|
console.print("[cyan]Creating profile...[/cyan]")
|
||||||
|
console.print("[yellow]A browser window will open. After logging in to sites, press 'q' in this terminal to save.[/yellow]")
|
||||||
|
|
||||||
|
# Create the profile
|
||||||
|
try:
|
||||||
|
profile_path = await profiler.create_profile(profile_name)
|
||||||
|
|
||||||
|
if profile_path:
|
||||||
|
console.print(f"[green]Profile successfully created at:[/green] {profile_path}")
|
||||||
|
else:
|
||||||
|
console.print("[red]Failed to create profile.[/red]")
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"[red]Error creating profile: {str(e)}[/red]")
|
||||||
|
|
||||||
|
def delete_profile_interactive(profiler: BrowserProfiler):
|
||||||
|
"""Interactive profile deletion"""
|
||||||
|
profiles = profiler.list_profiles()
|
||||||
|
|
||||||
|
if not profiles:
|
||||||
|
console.print("[yellow]No profiles found to delete.[/yellow]")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Display profiles
|
||||||
|
display_profiles_table(profiles)
|
||||||
|
|
||||||
|
# Get profile selection
|
||||||
|
idx = Prompt.ask(
|
||||||
|
"[red]Enter number of profile to delete[/red]",
|
||||||
|
console=console,
|
||||||
|
choices=[str(i+1) for i in range(len(profiles))],
|
||||||
|
show_choices=False
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
idx = int(idx) - 1
|
||||||
|
profile = profiles[idx]
|
||||||
|
|
||||||
|
# Confirm deletion
|
||||||
|
if Confirm.ask(f"[red]Are you sure you want to delete profile '{profile['name']}'?[/red]"):
|
||||||
|
success = profiler.delete_profile(profile["path"])
|
||||||
|
|
||||||
|
if success:
|
||||||
|
console.print(f"[green]Profile '{profile['name']}' deleted successfully.[/green]")
|
||||||
|
else:
|
||||||
|
console.print(f"[red]Failed to delete profile '{profile['name']}'.[/red]")
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
console.print("[red]Invalid selection.[/red]")
|
||||||
|
|
||||||
|
async def crawl_with_profile_cli(profile_path, url):
|
||||||
|
"""Use a profile to crawl a website via CLI"""
|
||||||
|
console.print(f"[cyan]Crawling [bold]{url}[/bold] using profile at [bold]{profile_path}[/bold][/cyan]")
|
||||||
|
|
||||||
|
# Create browser config with the profile
|
||||||
|
browser_cfg = BrowserConfig(
|
||||||
|
headless=False, # Set to False to see the browser in action
|
||||||
|
use_managed_browser=True,
|
||||||
|
user_data_dir=profile_path
|
||||||
|
)
|
||||||
|
|
||||||
|
# Default crawler config
|
||||||
|
crawler_cfg = CrawlerRunConfig()
|
||||||
|
|
||||||
|
# Ask for output format
|
||||||
|
output_format = Prompt.ask(
|
||||||
|
"[cyan]Output format[/cyan]",
|
||||||
|
choices=["all", "json", "markdown", "md", "title"],
|
||||||
|
default="markdown"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Run the crawler
|
||||||
|
result = await run_crawler(url, browser_cfg, crawler_cfg, True)
|
||||||
|
|
||||||
|
# Handle output
|
||||||
|
if output_format == "all":
|
||||||
|
console.print(json.dumps(result.model_dump(), indent=2))
|
||||||
|
elif output_format == "json":
|
||||||
|
console.print(json.dumps(json.loads(result.extracted_content), indent=2))
|
||||||
|
elif output_format in ["markdown", "md"]:
|
||||||
|
console.print(result.markdown.raw_markdown)
|
||||||
|
elif output_format == "title":
|
||||||
|
console.print(result.metadata.get("title", "No title found"))
|
||||||
|
|
||||||
|
console.print(f"[green]Successfully crawled[/green] {url}")
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"[red]Error crawling:[/red] {str(e)}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def use_profile_to_crawl():
|
||||||
|
"""Interactive profile selection for crawling"""
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
profiles = profiler.list_profiles()
|
||||||
|
|
||||||
|
if not profiles:
|
||||||
|
console.print("[yellow]No profiles found. Create one first.[/yellow]")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Display profiles
|
||||||
|
display_profiles_table(profiles)
|
||||||
|
|
||||||
|
# Get profile selection
|
||||||
|
idx = Prompt.ask(
|
||||||
|
"[cyan]Enter number of profile to use[/cyan]",
|
||||||
|
console=console,
|
||||||
|
choices=[str(i+1) for i in range(len(profiles))],
|
||||||
|
show_choices=False
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
idx = int(idx) - 1
|
||||||
|
profile = profiles[idx]
|
||||||
|
|
||||||
|
# Get URL
|
||||||
|
url = Prompt.ask("[cyan]Enter URL to crawl[/cyan]")
|
||||||
|
if url:
|
||||||
|
# Crawl with the selected profile
|
||||||
|
await crawl_with_profile_cli(profile["path"], url)
|
||||||
|
else:
|
||||||
|
console.print("[red]No URL provided[/red]")
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
console.print("[red]Invalid selection[/red]")
|
||||||
|
|
||||||
|
async def manage_profiles():
|
||||||
|
"""Interactive profile management menu"""
|
||||||
|
profiler = BrowserProfiler()
|
||||||
|
|
||||||
|
options = {
|
||||||
|
"1": "List profiles",
|
||||||
|
"2": "Create new profile",
|
||||||
|
"3": "Delete profile",
|
||||||
|
"4": "Use a profile to crawl a website",
|
||||||
|
"5": "Exit",
|
||||||
|
}
|
||||||
|
|
||||||
|
while True:
|
||||||
|
console.print(Panel("[bold cyan]Browser Profile Manager[/bold cyan]", border_style="cyan"))
|
||||||
|
|
||||||
|
for key, value in options.items():
|
||||||
|
color = "green" if key == "1" else "yellow" if key == "2" else "red" if key == "3" else "blue" if key == "4" else "cyan"
|
||||||
|
console.print(f"[{color}]{key}[/{color}]. {value}")
|
||||||
|
|
||||||
|
choice = Prompt.ask("Enter choice", choices=list(options.keys()), default="1")
|
||||||
|
|
||||||
|
if choice == "1":
|
||||||
|
# List profiles
|
||||||
|
profiles = profiler.list_profiles()
|
||||||
|
display_profiles_table(profiles)
|
||||||
|
|
||||||
|
elif choice == "2":
|
||||||
|
# Create profile
|
||||||
|
await create_profile_interactive(profiler)
|
||||||
|
|
||||||
|
elif choice == "3":
|
||||||
|
# Delete profile
|
||||||
|
delete_profile_interactive(profiler)
|
||||||
|
|
||||||
|
elif choice == "4":
|
||||||
|
# Use profile to crawl
|
||||||
|
await use_profile_to_crawl()
|
||||||
|
|
||||||
|
elif choice == "5":
|
||||||
|
# Exit
|
||||||
|
console.print("[cyan]Exiting profile manager.[/cyan]")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Add a separator between operations
|
||||||
|
console.print("\n")
|
||||||
|
|
||||||
|
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
|
||||||
def cli():
|
def cli():
|
||||||
"""Crawl4AI Command Line Interface"""
|
"""Crawl4AI CLI - Web content extraction and browser profile management tool"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@cli.group()
|
@cli.command("crawl")
|
||||||
def docs():
|
@click.argument("url", required=True)
|
||||||
"""Documentation operations"""
|
@click.option("--browser-config", "-B", type=click.Path(exists=True), help="Browser config file (YAML/JSON)")
|
||||||
pass
|
@click.option("--crawler-config", "-C", type=click.Path(exists=True), help="Crawler config file (YAML/JSON)")
|
||||||
|
@click.option("--filter-config", "-f", type=click.Path(exists=True), help="Content filter config file")
|
||||||
|
@click.option("--extraction-config", "-e", type=click.Path(exists=True), help="Extraction strategy config file")
|
||||||
|
@click.option("--schema", "-s", type=click.Path(exists=True), help="JSON schema for extraction")
|
||||||
|
@click.option("--browser", "-b", type=str, callback=parse_key_values, help="Browser parameters as key1=value1,key2=value2")
|
||||||
|
@click.option("--crawler", "-c", type=str, callback=parse_key_values, help="Crawler parameters as key1=value1,key2=value2")
|
||||||
|
@click.option("--output", "-o", type=click.Choice(["all", "json", "markdown", "md", "markdown-fit", "md-fit"]), default="all")
|
||||||
|
@click.option("--bypass-cache", is_flag=True, default=True, help="Bypass cache when crawling")
|
||||||
|
@click.option("--question", "-q", help="Ask a question about the crawled content")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True)
|
||||||
|
@click.option("--profile", "-p", help="Use a specific browser profile (by name)")
|
||||||
|
def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config: str,
|
||||||
|
extraction_config: str, schema: str, browser: Dict, crawler: Dict,
|
||||||
|
output: str, bypass_cache: bool, question: str, verbose: bool, profile: str):
|
||||||
|
"""Crawl a website and extract content
|
||||||
|
|
||||||
@docs.command()
|
Simple Usage:
|
||||||
@click.argument('sections', nargs=-1)
|
crwl crawl https://example.com
|
||||||
@click.option('--mode', type=click.Choice(['extended', 'condensed']), default='extended')
|
"""
|
||||||
def combine(sections: tuple, mode: str):
|
|
||||||
"""Combine documentation sections"""
|
|
||||||
try:
|
|
||||||
asyncio.run(docs_manager.ensure_docs_exist())
|
|
||||||
click.echo(docs_manager.generate(sections, mode))
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(str(e), tag="ERROR")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
@docs.command()
|
# Handle profile option
|
||||||
@click.argument('query')
|
if profile:
|
||||||
@click.option('--top-k', '-k', default=5)
|
profiler = BrowserProfiler()
|
||||||
@click.option('--build-index', is_flag=True, help='Build index if missing')
|
profile_path = profiler.get_profile_path(profile)
|
||||||
def search(query: str, top_k: int, build_index: bool):
|
|
||||||
"""Search documentation"""
|
|
||||||
try:
|
|
||||||
result = docs_manager.search(query, top_k)
|
|
||||||
if result == "No search index available. Call build_search_index() first.":
|
|
||||||
if build_index or click.confirm('No search index found. Build it now?'):
|
|
||||||
asyncio.run(docs_manager.llm_text.generate_index_files())
|
|
||||||
result = docs_manager.search(query, top_k)
|
|
||||||
click.echo(result)
|
|
||||||
except Exception as e:
|
|
||||||
click.echo(f"Error: {str(e)}", err=True)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
@docs.command()
|
if not profile_path:
|
||||||
def update():
|
profiles = profiler.list_profiles()
|
||||||
"""Update docs from GitHub"""
|
|
||||||
try:
|
|
||||||
asyncio.run(docs_manager.fetch_docs())
|
|
||||||
click.echo("Documentation updated successfully")
|
|
||||||
except Exception as e:
|
|
||||||
click.echo(f"Error: {str(e)}", err=True)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
@docs.command()
|
if profiles:
|
||||||
@click.option('--force-facts', is_flag=True, help='Force regenerate fact files')
|
console.print(f"[red]Profile '{profile}' not found. Available profiles:[/red]")
|
||||||
@click.option('--clear-cache', is_flag=True, help='Clear BM25 cache')
|
display_profiles_table(profiles)
|
||||||
def index(force_facts: bool, clear_cache: bool):
|
else:
|
||||||
"""Build or rebuild search indexes"""
|
console.print("[red]No profiles found. Create one with 'crwl profiles'[/red]")
|
||||||
try:
|
|
||||||
asyncio.run(docs_manager.ensure_docs_exist())
|
return
|
||||||
asyncio.run(docs_manager.llm_text.generate_index_files(
|
|
||||||
force_generate_facts=force_facts,
|
# Include the profile in browser config
|
||||||
clear_bm25_cache=clear_cache
|
if not browser:
|
||||||
))
|
browser = {}
|
||||||
click.echo("Search indexes built successfully")
|
browser["user_data_dir"] = profile_path
|
||||||
except Exception as e:
|
browser["use_managed_browser"] = True
|
||||||
click.echo(f"Error: {str(e)}", err=True)
|
|
||||||
sys.exit(1)
|
if verbose:
|
||||||
|
console.print(f"[green]Using browser profile:[/green] {profile}")
|
||||||
|
|
||||||
# Add docs list command
|
|
||||||
@docs.command()
|
|
||||||
def list():
|
|
||||||
"""List available documentation sections"""
|
|
||||||
try:
|
try:
|
||||||
sections = docs_manager.list()
|
# Load base configurations
|
||||||
print_table(["Sections"], [[section] for section in sections])
|
browser_cfg = BrowserConfig.load(load_config_file(browser_config))
|
||||||
|
crawler_cfg = CrawlerRunConfig.load(load_config_file(crawler_config))
|
||||||
|
|
||||||
|
# Override with CLI params
|
||||||
|
if browser:
|
||||||
|
browser_cfg = browser_cfg.clone(**browser)
|
||||||
|
if crawler:
|
||||||
|
crawler_cfg = crawler_cfg.clone(**crawler)
|
||||||
|
|
||||||
|
# Handle content filter config
|
||||||
|
if filter_config:
|
||||||
|
filter_conf = load_config_file(filter_config)
|
||||||
|
if filter_conf["type"] == "bm25":
|
||||||
|
crawler_cfg.content_filter = BM25ContentFilter(
|
||||||
|
user_query=filter_conf.get("query"),
|
||||||
|
bm25_threshold=filter_conf.get("threshold", 1.0)
|
||||||
|
)
|
||||||
|
elif filter_conf["type"] == "pruning":
|
||||||
|
crawler_cfg.content_filter = PruningContentFilter(
|
||||||
|
user_query=filter_conf.get("query"),
|
||||||
|
threshold=filter_conf.get("threshold", 0.48)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle extraction strategy
|
||||||
|
if extraction_config:
|
||||||
|
extract_conf = load_config_file(extraction_config)
|
||||||
|
schema_data = load_schema_file(schema)
|
||||||
|
|
||||||
|
# Check if type does not exist show proper message
|
||||||
|
if not extract_conf.get("type"):
|
||||||
|
raise click.ClickException("Extraction type not specified")
|
||||||
|
if extract_conf["type"] not in ["llm", "json-css", "json-xpath"]:
|
||||||
|
raise click.ClickException(f"Invalid extraction type: {extract_conf['type']}")
|
||||||
|
|
||||||
|
if extract_conf["type"] == "llm":
|
||||||
|
# if no provider show error emssage
|
||||||
|
if not extract_conf.get("provider") or not extract_conf.get("api_token"):
|
||||||
|
raise click.ClickException("LLM provider and API token are required for LLM extraction")
|
||||||
|
|
||||||
|
crawler_cfg.extraction_strategy = LLMExtractionStrategy(
|
||||||
|
llmConfig=LlmConfig(provider=extract_conf["provider"], api_token=extract_conf["api_token"]),
|
||||||
|
instruction=extract_conf["instruction"],
|
||||||
|
schema=schema_data,
|
||||||
|
**extract_conf.get("params", {})
|
||||||
|
)
|
||||||
|
elif extract_conf["type"] == "json-css":
|
||||||
|
crawler_cfg.extraction_strategy = JsonCssExtractionStrategy(
|
||||||
|
schema=schema_data
|
||||||
|
)
|
||||||
|
elif extract_conf["type"] == "json-xpath":
|
||||||
|
crawler_cfg.extraction_strategy = JsonXPathExtractionStrategy(
|
||||||
|
schema=schema_data
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# No cache
|
||||||
|
if bypass_cache:
|
||||||
|
crawler_cfg.cache_mode = CacheMode.BYPASS
|
||||||
|
|
||||||
|
# Run crawler
|
||||||
|
result : CrawlResult = anyio.run(
|
||||||
|
run_crawler,
|
||||||
|
url,
|
||||||
|
browser_cfg,
|
||||||
|
crawler_cfg,
|
||||||
|
verbose
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle question
|
||||||
|
if question:
|
||||||
|
provider, token = setup_llm_config()
|
||||||
|
markdown = result.markdown.raw_markdown
|
||||||
|
anyio.run(stream_llm_response, url, markdown, question, provider, token)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Handle output
|
||||||
|
if output == "all":
|
||||||
|
click.echo(json.dumps(result.model_dump(), indent=2))
|
||||||
|
elif output == "json":
|
||||||
|
click.echo(json.dumps(json.loads(result.extracted_content), indent=2))
|
||||||
|
elif output in ["markdown", "md"]:
|
||||||
|
click.echo(result.markdown.raw_markdown)
|
||||||
|
elif output in ["markdown-fit", "md-fit"]:
|
||||||
|
click.echo(result.markdown.fit_markdown)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
click.echo(f"Error: {str(e)}", err=True)
|
raise click.ClickException(str(e))
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
@cli.command("examples")
|
||||||
|
def examples_cmd():
|
||||||
|
"""Show usage examples"""
|
||||||
|
show_examples()
|
||||||
|
|
||||||
|
@cli.command("profiles")
|
||||||
|
def profiles_cmd():
|
||||||
|
"""Manage browser profiles interactively
|
||||||
|
|
||||||
|
Launch an interactive browser profile manager where you can:
|
||||||
|
- List all existing profiles
|
||||||
|
- Create new profiles for authenticated browsing
|
||||||
|
- Delete unused profiles
|
||||||
|
"""
|
||||||
|
# Run interactive profile manager
|
||||||
|
anyio.run(manage_profiles)
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.argument("url", required=False)
|
||||||
|
@click.option("--example", is_flag=True, help="Show usage examples")
|
||||||
|
@click.option("--browser-config", "-B", type=click.Path(exists=True), help="Browser config file (YAML/JSON)")
|
||||||
|
@click.option("--crawler-config", "-C", type=click.Path(exists=True), help="Crawler config file (YAML/JSON)")
|
||||||
|
@click.option("--filter-config", "-f", type=click.Path(exists=True), help="Content filter config file")
|
||||||
|
@click.option("--extraction-config", "-e", type=click.Path(exists=True), help="Extraction strategy config file")
|
||||||
|
@click.option("--schema", "-s", type=click.Path(exists=True), help="JSON schema for extraction")
|
||||||
|
@click.option("--browser", "-b", type=str, callback=parse_key_values, help="Browser parameters as key1=value1,key2=value2")
|
||||||
|
@click.option("--crawler", "-c", type=str, callback=parse_key_values, help="Crawler parameters as key1=value1,key2=value2")
|
||||||
|
@click.option("--output", "-o", type=click.Choice(["all", "json", "markdown", "md", "markdown-fit", "md-fit"]), default="all")
|
||||||
|
@click.option("--bypass-cache", is_flag=True, default=True, help="Bypass cache when crawling")
|
||||||
|
@click.option("--question", "-q", help="Ask a question about the crawled content")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True)
|
||||||
|
@click.option("--profile", "-p", help="Use a specific browser profile (by name)")
|
||||||
|
def default(url: str, example: bool, browser_config: str, crawler_config: str, filter_config: str,
|
||||||
|
extraction_config: str, schema: str, browser: Dict, crawler: Dict,
|
||||||
|
output: str, bypass_cache: bool, question: str, verbose: bool, profile: str):
|
||||||
|
"""Crawl4AI CLI - Web content extraction tool
|
||||||
|
|
||||||
|
Simple Usage:
|
||||||
|
crwl https://example.com
|
||||||
|
|
||||||
|
Run with --example to see detailed usage examples.
|
||||||
|
|
||||||
|
Other commands:
|
||||||
|
crwl profiles - Manage browser profiles for identity-based crawling
|
||||||
|
crwl crawl - Crawl a website with advanced options
|
||||||
|
crwl examples - Show more usage examples
|
||||||
|
"""
|
||||||
|
|
||||||
|
if example:
|
||||||
|
show_examples()
|
||||||
|
return
|
||||||
|
|
||||||
|
if not url:
|
||||||
|
# Show help without error message
|
||||||
|
ctx = click.get_current_context()
|
||||||
|
click.echo(ctx.get_help())
|
||||||
|
return
|
||||||
|
|
||||||
|
# Forward to crawl command
|
||||||
|
ctx = click.get_current_context()
|
||||||
|
ctx.invoke(
|
||||||
|
crawl_cmd,
|
||||||
|
url=url,
|
||||||
|
browser_config=browser_config,
|
||||||
|
crawler_config=crawler_config,
|
||||||
|
filter_config=filter_config,
|
||||||
|
extraction_config=extraction_config,
|
||||||
|
schema=schema,
|
||||||
|
browser=browser,
|
||||||
|
crawler=crawler,
|
||||||
|
output=output,
|
||||||
|
bypass_cache=bypass_cache,
|
||||||
|
question=question,
|
||||||
|
verbose=verbose,
|
||||||
|
profile=profile
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
cli()
|
cli()
|
||||||
@@ -8,21 +8,29 @@ DEFAULT_PROVIDER = "openai/gpt-4o-mini"
|
|||||||
MODEL_REPO_BRANCH = "new-release-0.0.2"
|
MODEL_REPO_BRANCH = "new-release-0.0.2"
|
||||||
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
|
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||||
PROVIDER_MODELS = {
|
PROVIDER_MODELS = {
|
||||||
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
||||||
"groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"),
|
"groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"),
|
||||||
"groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"),
|
"groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"),
|
||||||
"openai/gpt-4o-mini": os.getenv("OPENAI_API_KEY"),
|
"openai/gpt-4o-mini": os.getenv("OPENAI_API_KEY"),
|
||||||
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
|
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
|
||||||
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
|
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
|
||||||
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
|
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
|
||||||
|
"openai/o3-mini": os.getenv("OPENAI_API_KEY"),
|
||||||
|
"openai/o3-mini-high": os.getenv("OPENAI_API_KEY"),
|
||||||
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
|
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
|
||||||
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||||
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||||
"anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"),
|
"anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"),
|
||||||
|
"gemini/gemini-pro": os.getenv("GEMINI_API_KEY"),
|
||||||
|
'gemini/gemini-1.5-pro': os.getenv("GEMINI_API_KEY"),
|
||||||
|
'gemini/gemini-2.0-flash': os.getenv("GEMINI_API_KEY"),
|
||||||
|
'gemini/gemini-2.0-flash-exp': os.getenv("GEMINI_API_KEY"),
|
||||||
|
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
|
||||||
|
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Chunk token threshold
|
# Chunk token threshold
|
||||||
CHUNK_TOKEN_THRESHOLD = 2 ** 11 # 2048 tokens
|
CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens
|
||||||
OVERLAP_RATE = 0.1
|
OVERLAP_RATE = 0.1
|
||||||
WORD_TOKEN_RATE = 1.3
|
WORD_TOKEN_RATE = 1.3
|
||||||
|
|
||||||
@@ -30,19 +38,41 @@ WORD_TOKEN_RATE = 1.3
|
|||||||
MIN_WORD_THRESHOLD = 1
|
MIN_WORD_THRESHOLD = 1
|
||||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1
|
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1
|
||||||
|
|
||||||
IMPORTANT_ATTRS = ['src', 'href', 'alt', 'title', 'width', 'height']
|
IMPORTANT_ATTRS = ["src", "href", "alt", "title", "width", "height"]
|
||||||
ONLY_TEXT_ELIGIBLE_TAGS = ['b', 'i', 'u', 'span', 'del', 'ins', 'sub', 'sup', 'strong', 'em', 'code', 'kbd', 'var', 's', 'q', 'abbr', 'cite', 'dfn', 'time', 'small', 'mark']
|
ONLY_TEXT_ELIGIBLE_TAGS = [
|
||||||
|
"b",
|
||||||
|
"i",
|
||||||
|
"u",
|
||||||
|
"span",
|
||||||
|
"del",
|
||||||
|
"ins",
|
||||||
|
"sub",
|
||||||
|
"sup",
|
||||||
|
"strong",
|
||||||
|
"em",
|
||||||
|
"code",
|
||||||
|
"kbd",
|
||||||
|
"var",
|
||||||
|
"s",
|
||||||
|
"q",
|
||||||
|
"abbr",
|
||||||
|
"cite",
|
||||||
|
"dfn",
|
||||||
|
"time",
|
||||||
|
"small",
|
||||||
|
"mark",
|
||||||
|
]
|
||||||
SOCIAL_MEDIA_DOMAINS = [
|
SOCIAL_MEDIA_DOMAINS = [
|
||||||
'facebook.com',
|
"facebook.com",
|
||||||
'twitter.com',
|
"twitter.com",
|
||||||
'x.com',
|
"x.com",
|
||||||
'linkedin.com',
|
"linkedin.com",
|
||||||
'instagram.com',
|
"instagram.com",
|
||||||
'pinterest.com',
|
"pinterest.com",
|
||||||
'tiktok.com',
|
"tiktok.com",
|
||||||
'snapchat.com',
|
"snapchat.com",
|
||||||
'reddit.com',
|
"reddit.com",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Threshold for the Image extraction - Range is 1 to 6
|
# Threshold for the Image extraction - Range is 1 to 6
|
||||||
# Images are scored based on point based system, to filter based on usefulness. Points are assigned
|
# Images are scored based on point based system, to filter based on usefulness. Points are assigned
|
||||||
@@ -60,5 +90,5 @@ NEED_MIGRATION = True
|
|||||||
URL_LOG_SHORTEN_LENGTH = 30
|
URL_LOG_SHORTEN_LENGTH = 30
|
||||||
SHOW_DEPRECATION_WARNINGS = True
|
SHOW_DEPRECATION_WARNINGS = True
|
||||||
SCREENSHOT_HEIGHT_TRESHOLD = 10000
|
SCREENSHOT_HEIGHT_TRESHOLD = 10000
|
||||||
PAGE_TIMEOUT=60000
|
PAGE_TIMEOUT = 60000
|
||||||
DOWNLOAD_PAGE_TIMEOUT=60000
|
DOWNLOAD_PAGE_TIMEOUT = 60000
|
||||||
|
|||||||
2
crawl4ai/configs/__init__.py
Normal file
2
crawl4ai/configs/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
from .proxy_config import ProxyConfig
|
||||||
|
__all__ = ["ProxyConfig"]
|
||||||
113
crawl4ai/configs/proxy_config.py
Normal file
113
crawl4ai/configs/proxy_config.py
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
import os
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyConfig:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
server: str,
|
||||||
|
username: Optional[str] = None,
|
||||||
|
password: Optional[str] = None,
|
||||||
|
ip: Optional[str] = None,
|
||||||
|
):
|
||||||
|
"""Configuration class for a single proxy.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||||
|
username: Optional username for proxy authentication
|
||||||
|
password: Optional password for proxy authentication
|
||||||
|
ip: Optional IP address for verification purposes
|
||||||
|
"""
|
||||||
|
self.server = server
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
|
||||||
|
# Extract IP from server if not explicitly provided
|
||||||
|
self.ip = ip or self._extract_ip_from_server()
|
||||||
|
|
||||||
|
def _extract_ip_from_server(self) -> Optional[str]:
|
||||||
|
"""Extract IP address from server URL."""
|
||||||
|
try:
|
||||||
|
# Simple extraction assuming http://ip:port format
|
||||||
|
if "://" in self.server:
|
||||||
|
parts = self.server.split("://")[1].split(":")
|
||||||
|
return parts[0]
|
||||||
|
else:
|
||||||
|
parts = self.server.split(":")
|
||||||
|
return parts[0]
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||||
|
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||||
|
parts = proxy_str.split(":")
|
||||||
|
if len(parts) == 4: # ip:port:username:password
|
||||||
|
ip, port, username, password = parts
|
||||||
|
return ProxyConfig(
|
||||||
|
server=f"http://{ip}:{port}",
|
||||||
|
username=username,
|
||||||
|
password=password,
|
||||||
|
ip=ip
|
||||||
|
)
|
||||||
|
elif len(parts) == 2: # ip:port only
|
||||||
|
ip, port = parts
|
||||||
|
return ProxyConfig(
|
||||||
|
server=f"http://{ip}:{port}",
|
||||||
|
ip=ip
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||||
|
"""Create a ProxyConfig from a dictionary."""
|
||||||
|
return ProxyConfig(
|
||||||
|
server=proxy_dict.get("server"),
|
||||||
|
username=proxy_dict.get("username"),
|
||||||
|
password=proxy_dict.get("password"),
|
||||||
|
ip=proxy_dict.get("ip")
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||||
|
"""Load proxies from environment variable.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
env_var: Name of environment variable containing comma-separated proxy strings
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of ProxyConfig objects
|
||||||
|
"""
|
||||||
|
proxies = []
|
||||||
|
try:
|
||||||
|
proxy_list = os.getenv(env_var, "").split(",")
|
||||||
|
for proxy in proxy_list:
|
||||||
|
if not proxy:
|
||||||
|
continue
|
||||||
|
proxies.append(ProxyConfig.from_string(proxy))
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error loading proxies from environment: {e}")
|
||||||
|
return proxies
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict:
|
||||||
|
"""Convert to dictionary representation."""
|
||||||
|
return {
|
||||||
|
"server": self.server,
|
||||||
|
"username": self.username,
|
||||||
|
"password": self.password,
|
||||||
|
"ip": self.ip
|
||||||
|
}
|
||||||
|
|
||||||
|
def clone(self, **kwargs) -> "ProxyConfig":
|
||||||
|
"""Create a copy of this configuration with updated values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Key-value pairs of configuration options to update
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ProxyConfig: A new instance with the specified updates
|
||||||
|
"""
|
||||||
|
config_dict = self.to_dict()
|
||||||
|
config_dict.update(kwargs)
|
||||||
|
return ProxyConfig.from_dict(config_dict)
|
||||||
@@ -1,46 +1,122 @@
|
|||||||
|
import inspect
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
from bs4 import BeautifulSoup, Tag
|
from bs4 import BeautifulSoup, Tag
|
||||||
from typing import List, Tuple, Dict
|
from typing import List, Tuple, Dict, Optional
|
||||||
from rank_bm25 import BM25Okapi
|
from rank_bm25 import BM25Okapi
|
||||||
from time import perf_counter
|
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from bs4 import BeautifulSoup, NavigableString, Tag, Comment
|
from bs4 import NavigableString, Comment
|
||||||
from .utils import clean_tokens
|
|
||||||
|
from .utils import (
|
||||||
|
clean_tokens,
|
||||||
|
perform_completion_with_backoff,
|
||||||
|
escape_json_string,
|
||||||
|
sanitize_html,
|
||||||
|
get_home_folder,
|
||||||
|
extract_xml_data,
|
||||||
|
merge_chunks,
|
||||||
|
)
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
import math
|
import math
|
||||||
from snowballstemmer import stemmer
|
from snowballstemmer import stemmer
|
||||||
|
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE, PROVIDER_MODELS
|
||||||
|
from .models import TokenUsage
|
||||||
|
from .prompts import PROMPT_FILTER_CONTENT
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import hashlib
|
||||||
|
from pathlib import Path
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from .async_logger import AsyncLogger, LogLevel
|
||||||
|
from colorama import Fore, Style
|
||||||
|
|
||||||
|
|
||||||
class RelevantContentFilter(ABC):
|
class RelevantContentFilter(ABC):
|
||||||
"""Abstract base class for content filtering strategies"""
|
"""Abstract base class for content filtering strategies"""
|
||||||
def __init__(self, user_query: str = None):
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
user_query: str = None,
|
||||||
|
verbose: bool = False,
|
||||||
|
logger: Optional[AsyncLogger] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes the RelevantContentFilter class with optional user query.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_query (str): User query for filtering (optional).
|
||||||
|
verbose (bool): Enable verbose logging (default: False).
|
||||||
|
"""
|
||||||
self.user_query = user_query
|
self.user_query = user_query
|
||||||
self.included_tags = {
|
self.included_tags = {
|
||||||
# Primary structure
|
# Primary structure
|
||||||
'article', 'main', 'section', 'div',
|
"article",
|
||||||
|
"main",
|
||||||
|
"section",
|
||||||
|
"div",
|
||||||
# List structures
|
# List structures
|
||||||
'ul', 'ol', 'li', 'dl', 'dt', 'dd',
|
"ul",
|
||||||
|
"ol",
|
||||||
|
"li",
|
||||||
|
"dl",
|
||||||
|
"dt",
|
||||||
|
"dd",
|
||||||
# Text content
|
# Text content
|
||||||
'p', 'span', 'blockquote', 'pre', 'code',
|
"p",
|
||||||
|
"span",
|
||||||
|
"blockquote",
|
||||||
|
"pre",
|
||||||
|
"code",
|
||||||
# Headers
|
# Headers
|
||||||
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
|
"h1",
|
||||||
|
"h2",
|
||||||
|
"h3",
|
||||||
|
"h4",
|
||||||
|
"h5",
|
||||||
|
"h6",
|
||||||
# Tables
|
# Tables
|
||||||
'table', 'thead', 'tbody', 'tr', 'td', 'th',
|
"table",
|
||||||
|
"thead",
|
||||||
|
"tbody",
|
||||||
|
"tr",
|
||||||
|
"td",
|
||||||
|
"th",
|
||||||
# Other semantic elements
|
# Other semantic elements
|
||||||
'figure', 'figcaption', 'details', 'summary',
|
"figure",
|
||||||
|
"figcaption",
|
||||||
|
"details",
|
||||||
|
"summary",
|
||||||
# Text formatting
|
# Text formatting
|
||||||
'em', 'strong', 'b', 'i', 'mark', 'small',
|
"em",
|
||||||
|
"strong",
|
||||||
|
"b",
|
||||||
|
"i",
|
||||||
|
"mark",
|
||||||
|
"small",
|
||||||
# Rich content
|
# Rich content
|
||||||
'time', 'address', 'cite', 'q'
|
"time",
|
||||||
|
"address",
|
||||||
|
"cite",
|
||||||
|
"q",
|
||||||
}
|
}
|
||||||
self.excluded_tags = {
|
self.excluded_tags = {
|
||||||
'nav', 'footer', 'header', 'aside', 'script',
|
"nav",
|
||||||
'style', 'form', 'iframe', 'noscript'
|
"footer",
|
||||||
|
"header",
|
||||||
|
"aside",
|
||||||
|
"script",
|
||||||
|
"style",
|
||||||
|
"form",
|
||||||
|
"iframe",
|
||||||
|
"noscript",
|
||||||
}
|
}
|
||||||
self.header_tags = {'h1', 'h2', 'h3', 'h4', 'h5', 'h6'}
|
self.header_tags = {"h1", "h2", "h3", "h4", "h5", "h6"}
|
||||||
self.negative_patterns = re.compile(
|
self.negative_patterns = re.compile(
|
||||||
r'nav|footer|header|sidebar|ads|comment|promo|advert|social|share',
|
r"nav|footer|header|sidebar|ads|comment|promo|advert|social|share", re.I
|
||||||
re.I
|
|
||||||
)
|
)
|
||||||
self.min_word_count = 2
|
self.min_word_count = 2
|
||||||
|
self.verbose = False
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def filter_content(self, html: str) -> List[str]:
|
def filter_content(self, html: str) -> List[str]:
|
||||||
@@ -62,28 +138,30 @@ class RelevantContentFilter(ABC):
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if soup.find('h1'):
|
if soup.find("h1"):
|
||||||
query_parts.append(soup.find('h1').get_text())
|
query_parts.append(soup.find("h1").get_text())
|
||||||
|
|
||||||
# Meta tags
|
# Meta tags
|
||||||
temp = ""
|
temp = ""
|
||||||
for meta_name in ['keywords', 'description']:
|
for meta_name in ["keywords", "description"]:
|
||||||
meta = soup.find('meta', attrs={'name': meta_name})
|
meta = soup.find("meta", attrs={"name": meta_name})
|
||||||
if meta and meta.get('content'):
|
if meta and meta.get("content"):
|
||||||
query_parts.append(meta['content'])
|
query_parts.append(meta["content"])
|
||||||
temp += meta['content']
|
temp += meta["content"]
|
||||||
|
|
||||||
# If still empty, grab first significant paragraph
|
# If still empty, grab first significant paragraph
|
||||||
if not temp:
|
if not temp:
|
||||||
# Find the first tag P thatits text contains more than 50 characters
|
# Find the first tag P thatits text contains more than 50 characters
|
||||||
for p in body.find_all('p'):
|
for p in body.find_all("p"):
|
||||||
if len(p.get_text()) > 150:
|
if len(p.get_text()) > 150:
|
||||||
query_parts.append(p.get_text()[:150])
|
query_parts.append(p.get_text()[:150])
|
||||||
break
|
break
|
||||||
|
|
||||||
return ' '.join(filter(None, query_parts))
|
return " ".join(filter(None, query_parts))
|
||||||
|
|
||||||
def extract_text_chunks(self, body: Tag, min_word_threshold: int = None) -> List[Tuple[str, str]]:
|
def extract_text_chunks(
|
||||||
|
self, body: Tag, min_word_threshold: int = None
|
||||||
|
) -> List[Tuple[str, str]]:
|
||||||
"""
|
"""
|
||||||
Extracts text chunks from a BeautifulSoup body element while preserving order.
|
Extracts text chunks from a BeautifulSoup body element while preserving order.
|
||||||
Returns list of tuples (text, tag_name) for classification.
|
Returns list of tuples (text, tag_name) for classification.
|
||||||
@@ -96,14 +174,42 @@ class RelevantContentFilter(ABC):
|
|||||||
"""
|
"""
|
||||||
# Tags to ignore - inline elements that shouldn't break text flow
|
# Tags to ignore - inline elements that shouldn't break text flow
|
||||||
INLINE_TAGS = {
|
INLINE_TAGS = {
|
||||||
'a', 'abbr', 'acronym', 'b', 'bdo', 'big', 'br', 'button', 'cite', 'code',
|
"a",
|
||||||
'dfn', 'em', 'i', 'img', 'input', 'kbd', 'label', 'map', 'object', 'q',
|
"abbr",
|
||||||
'samp', 'script', 'select', 'small', 'span', 'strong', 'sub', 'sup',
|
"acronym",
|
||||||
'textarea', 'time', 'tt', 'var'
|
"b",
|
||||||
|
"bdo",
|
||||||
|
"big",
|
||||||
|
"br",
|
||||||
|
"button",
|
||||||
|
"cite",
|
||||||
|
"code",
|
||||||
|
"dfn",
|
||||||
|
"em",
|
||||||
|
"i",
|
||||||
|
"img",
|
||||||
|
"input",
|
||||||
|
"kbd",
|
||||||
|
"label",
|
||||||
|
"map",
|
||||||
|
"object",
|
||||||
|
"q",
|
||||||
|
"samp",
|
||||||
|
"script",
|
||||||
|
"select",
|
||||||
|
"small",
|
||||||
|
"span",
|
||||||
|
"strong",
|
||||||
|
"sub",
|
||||||
|
"sup",
|
||||||
|
"textarea",
|
||||||
|
"time",
|
||||||
|
"tt",
|
||||||
|
"var",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Tags that typically contain meaningful headers
|
# Tags that typically contain meaningful headers
|
||||||
HEADER_TAGS = {'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header'}
|
HEADER_TAGS = {"h1", "h2", "h3", "h4", "h5", "h6", "header"}
|
||||||
|
|
||||||
chunks = []
|
chunks = []
|
||||||
current_text = []
|
current_text = []
|
||||||
@@ -111,9 +217,8 @@ class RelevantContentFilter(ABC):
|
|||||||
|
|
||||||
def should_break_chunk(tag: Tag) -> bool:
|
def should_break_chunk(tag: Tag) -> bool:
|
||||||
"""Determine if a tag should cause a break in the current text chunk"""
|
"""Determine if a tag should cause a break in the current text chunk"""
|
||||||
return (
|
return tag.name not in INLINE_TAGS and not (
|
||||||
tag.name not in INLINE_TAGS
|
tag.name == "p" and len(current_text) == 0
|
||||||
and not (tag.name == 'p' and len(current_text) == 0)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Use deque for efficient push/pop operations
|
# Use deque for efficient push/pop operations
|
||||||
@@ -125,9 +230,11 @@ class RelevantContentFilter(ABC):
|
|||||||
if visited:
|
if visited:
|
||||||
# End of block element - flush accumulated text
|
# End of block element - flush accumulated text
|
||||||
if current_text and should_break_chunk(element):
|
if current_text and should_break_chunk(element):
|
||||||
text = ' '.join(''.join(current_text).split())
|
text = " ".join("".join(current_text).split())
|
||||||
if text:
|
if text:
|
||||||
tag_type = 'header' if element.name in HEADER_TAGS else 'content'
|
tag_type = (
|
||||||
|
"header" if element.name in HEADER_TAGS else "content"
|
||||||
|
)
|
||||||
chunks.append((chunk_index, text, tag_type, element))
|
chunks.append((chunk_index, text, tag_type, element))
|
||||||
chunk_index += 1
|
chunk_index += 1
|
||||||
current_text = []
|
current_text = []
|
||||||
@@ -153,18 +260,23 @@ class RelevantContentFilter(ABC):
|
|||||||
|
|
||||||
# Handle any remaining text
|
# Handle any remaining text
|
||||||
if current_text:
|
if current_text:
|
||||||
text = ' '.join(''.join(current_text).split())
|
text = " ".join("".join(current_text).split())
|
||||||
if text:
|
if text:
|
||||||
chunks.append((chunk_index, text, 'content', body))
|
chunks.append((chunk_index, text, "content", body))
|
||||||
|
|
||||||
if min_word_threshold:
|
if min_word_threshold:
|
||||||
chunks = [chunk for chunk in chunks if len(chunk[1].split()) >= min_word_threshold]
|
chunks = [
|
||||||
|
chunk for chunk in chunks if len(chunk[1].split()) >= min_word_threshold
|
||||||
|
]
|
||||||
|
|
||||||
return chunks
|
return chunks
|
||||||
|
|
||||||
def _deprecated_extract_text_chunks(self, soup: BeautifulSoup) -> List[Tuple[int, str, Tag]]:
|
def _deprecated_extract_text_chunks(
|
||||||
|
self, soup: BeautifulSoup
|
||||||
|
) -> List[Tuple[int, str, Tag]]:
|
||||||
"""Common method for extracting text chunks"""
|
"""Common method for extracting text chunks"""
|
||||||
_text_cache = {}
|
_text_cache = {}
|
||||||
|
|
||||||
def fast_text(element: Tag) -> str:
|
def fast_text(element: Tag) -> str:
|
||||||
elem_id = id(element)
|
elem_id = id(element)
|
||||||
if elem_id in _text_cache:
|
if elem_id in _text_cache:
|
||||||
@@ -175,7 +287,7 @@ class RelevantContentFilter(ABC):
|
|||||||
text = content.strip()
|
text = content.strip()
|
||||||
if text:
|
if text:
|
||||||
texts.append(text)
|
texts.append(text)
|
||||||
result = ' '.join(texts)
|
result = " ".join(texts)
|
||||||
_text_cache[elem_id] = result
|
_text_cache[elem_id] = result
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -210,10 +322,9 @@ class RelevantContentFilter(ABC):
|
|||||||
"""Common method for exclusion logic"""
|
"""Common method for exclusion logic"""
|
||||||
if tag.name in self.excluded_tags:
|
if tag.name in self.excluded_tags:
|
||||||
return True
|
return True
|
||||||
class_id = ' '.join(filter(None, [
|
class_id = " ".join(
|
||||||
' '.join(tag.get('class', [])),
|
filter(None, [" ".join(tag.get("class", [])), tag.get("id", "")])
|
||||||
tag.get('id', '')
|
)
|
||||||
]))
|
|
||||||
return bool(self.negative_patterns.search(class_id))
|
return bool(self.negative_patterns.search(class_id))
|
||||||
|
|
||||||
def clean_element(self, tag: Tag) -> str:
|
def clean_element(self, tag: Tag) -> str:
|
||||||
@@ -221,8 +332,16 @@ class RelevantContentFilter(ABC):
|
|||||||
if not tag or not isinstance(tag, Tag):
|
if not tag or not isinstance(tag, Tag):
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
unwanted_tags = {'script', 'style', 'aside', 'form', 'iframe', 'noscript'}
|
unwanted_tags = {"script", "style", "aside", "form", "iframe", "noscript"}
|
||||||
unwanted_attrs = {'style', 'onclick', 'onmouseover', 'align', 'bgcolor', 'class', 'id'}
|
unwanted_attrs = {
|
||||||
|
"style",
|
||||||
|
"onclick",
|
||||||
|
"onmouseover",
|
||||||
|
"align",
|
||||||
|
"bgcolor",
|
||||||
|
"class",
|
||||||
|
"id",
|
||||||
|
}
|
||||||
|
|
||||||
# Use string builder pattern for better performance
|
# Use string builder pattern for better performance
|
||||||
builder = []
|
builder = []
|
||||||
@@ -237,28 +356,29 @@ class RelevantContentFilter(ABC):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Start tag
|
# Start tag
|
||||||
builder.append(f'<{elem.name}')
|
builder.append(f"<{elem.name}")
|
||||||
|
|
||||||
# Add cleaned attributes
|
# Add cleaned attributes
|
||||||
attrs = {k: v for k, v in elem.attrs.items() if k not in unwanted_attrs}
|
attrs = {k: v for k, v in elem.attrs.items() if k not in unwanted_attrs}
|
||||||
for key, value in attrs.items():
|
for key, value in attrs.items():
|
||||||
builder.append(f' {key}="{value}"')
|
builder.append(f' {key}="{value}"')
|
||||||
|
|
||||||
builder.append('>')
|
builder.append(">")
|
||||||
|
|
||||||
# Process children
|
# Process children
|
||||||
for child in elem.children:
|
for child in elem.children:
|
||||||
render_tag(child)
|
render_tag(child)
|
||||||
|
|
||||||
# Close tag
|
# Close tag
|
||||||
builder.append(f'</{elem.name}>')
|
builder.append(f"</{elem.name}>")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
render_tag(tag)
|
render_tag(tag)
|
||||||
return ''.join(builder)
|
return "".join(builder)
|
||||||
except Exception:
|
except Exception:
|
||||||
return str(tag) # Fallback to original if anything fails
|
return str(tag) # Fallback to original if anything fails
|
||||||
|
|
||||||
|
|
||||||
class BM25ContentFilter(RelevantContentFilter):
|
class BM25ContentFilter(RelevantContentFilter):
|
||||||
"""
|
"""
|
||||||
Content filtering using BM25 algorithm with priority tag handling.
|
Content filtering using BM25 algorithm with priority tag handling.
|
||||||
@@ -280,7 +400,13 @@ class BM25ContentFilter(RelevantContentFilter):
|
|||||||
Methods:
|
Methods:
|
||||||
filter_content(self, html: str, min_word_threshold: int = None)
|
filter_content(self, html: str, min_word_threshold: int = None)
|
||||||
"""
|
"""
|
||||||
def __init__(self, user_query: str = None, bm25_threshold: float = 1.0, language: str = 'english'):
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
user_query: str = None,
|
||||||
|
bm25_threshold: float = 1.0,
|
||||||
|
language: str = "english",
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the BM25ContentFilter class, if not provided, falls back to page metadata.
|
Initializes the BM25ContentFilter class, if not provided, falls back to page metadata.
|
||||||
|
|
||||||
@@ -295,17 +421,17 @@ class BM25ContentFilter(RelevantContentFilter):
|
|||||||
super().__init__(user_query=user_query)
|
super().__init__(user_query=user_query)
|
||||||
self.bm25_threshold = bm25_threshold
|
self.bm25_threshold = bm25_threshold
|
||||||
self.priority_tags = {
|
self.priority_tags = {
|
||||||
'h1': 5.0,
|
"h1": 5.0,
|
||||||
'h2': 4.0,
|
"h2": 4.0,
|
||||||
'h3': 3.0,
|
"h3": 3.0,
|
||||||
'title': 4.0,
|
"title": 4.0,
|
||||||
'strong': 2.0,
|
"strong": 2.0,
|
||||||
'b': 1.5,
|
"b": 1.5,
|
||||||
'em': 1.5,
|
"em": 1.5,
|
||||||
'blockquote': 2.0,
|
"blockquote": 2.0,
|
||||||
'code': 2.0,
|
"code": 2.0,
|
||||||
'pre': 1.5,
|
"pre": 1.5,
|
||||||
'th': 1.5, # Table headers
|
"th": 1.5, # Table headers
|
||||||
}
|
}
|
||||||
self.stemmer = stemmer(language)
|
self.stemmer = stemmer(language)
|
||||||
|
|
||||||
@@ -327,13 +453,13 @@ class BM25ContentFilter(RelevantContentFilter):
|
|||||||
if not html or not isinstance(html, str):
|
if not html or not isinstance(html, str):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
soup = BeautifulSoup(html, 'lxml')
|
soup = BeautifulSoup(html, "lxml")
|
||||||
|
|
||||||
# Check if body is present
|
# Check if body is present
|
||||||
if not soup.body:
|
if not soup.body:
|
||||||
# Wrap in body tag if missing
|
# Wrap in body tag if missing
|
||||||
soup = BeautifulSoup(f'<body>{html}</body>', 'lxml')
|
soup = BeautifulSoup(f"<body>{html}</body>", "lxml")
|
||||||
body = soup.find('body')
|
body = soup.find("body")
|
||||||
|
|
||||||
query = self.extract_page_query(soup, body)
|
query = self.extract_page_query(soup, body)
|
||||||
|
|
||||||
@@ -354,9 +480,13 @@ class BM25ContentFilter(RelevantContentFilter):
|
|||||||
# for _, chunk, _, _ in candidates]
|
# for _, chunk, _, _ in candidates]
|
||||||
# tokenized_query = [ps.stem(word) for word in query.lower().split()]
|
# tokenized_query = [ps.stem(word) for word in query.lower().split()]
|
||||||
|
|
||||||
tokenized_corpus = [[self.stemmer.stemWord(word) for word in chunk.lower().split()]
|
tokenized_corpus = [
|
||||||
for _, chunk, _, _ in candidates]
|
[self.stemmer.stemWord(word) for word in chunk.lower().split()]
|
||||||
tokenized_query = [self.stemmer.stemWord(word) for word in query.lower().split()]
|
for _, chunk, _, _ in candidates
|
||||||
|
]
|
||||||
|
tokenized_query = [
|
||||||
|
self.stemmer.stemWord(word) for word in query.lower().split()
|
||||||
|
]
|
||||||
|
|
||||||
# tokenized_corpus = [[self.stemmer.stemWord(word) for word in tokenize_text(chunk.lower())]
|
# tokenized_corpus = [[self.stemmer.stemWord(word) for word in tokenize_text(chunk.lower())]
|
||||||
# for _, chunk, _, _ in candidates]
|
# for _, chunk, _, _ in candidates]
|
||||||
@@ -378,7 +508,8 @@ class BM25ContentFilter(RelevantContentFilter):
|
|||||||
|
|
||||||
# Filter candidates by threshold
|
# Filter candidates by threshold
|
||||||
selected_candidates = [
|
selected_candidates = [
|
||||||
(index, chunk, tag) for adjusted_score, index, chunk, tag in adjusted_candidates
|
(index, chunk, tag)
|
||||||
|
for adjusted_score, index, chunk, tag in adjusted_candidates
|
||||||
if adjusted_score >= self.bm25_threshold
|
if adjusted_score >= self.bm25_threshold
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -390,6 +521,7 @@ class BM25ContentFilter(RelevantContentFilter):
|
|||||||
|
|
||||||
return [self.clean_element(tag) for _, _, tag in selected_candidates]
|
return [self.clean_element(tag) for _, _, tag in selected_candidates]
|
||||||
|
|
||||||
|
|
||||||
class PruningContentFilter(RelevantContentFilter):
|
class PruningContentFilter(RelevantContentFilter):
|
||||||
"""
|
"""
|
||||||
Content filtering using pruning algorithm with dynamic threshold.
|
Content filtering using pruning algorithm with dynamic threshold.
|
||||||
@@ -411,8 +543,14 @@ class PruningContentFilter(RelevantContentFilter):
|
|||||||
Methods:
|
Methods:
|
||||||
filter_content(self, html: str, min_word_threshold: int = None):
|
filter_content(self, html: str, min_word_threshold: int = None):
|
||||||
"""
|
"""
|
||||||
def __init__(self, user_query: str = None, min_word_threshold: int = None,
|
|
||||||
threshold_type: str = 'fixed', threshold: float = 0.48):
|
def __init__(
|
||||||
|
self,
|
||||||
|
user_query: str = None,
|
||||||
|
min_word_threshold: int = None,
|
||||||
|
threshold_type: str = "fixed",
|
||||||
|
threshold: float = 0.48,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the PruningContentFilter class, if not provided, falls back to page metadata.
|
Initializes the PruningContentFilter class, if not provided, falls back to page metadata.
|
||||||
|
|
||||||
@@ -432,49 +570,49 @@ class PruningContentFilter(RelevantContentFilter):
|
|||||||
|
|
||||||
# Add tag importance for dynamic threshold
|
# Add tag importance for dynamic threshold
|
||||||
self.tag_importance = {
|
self.tag_importance = {
|
||||||
'article': 1.5,
|
"article": 1.5,
|
||||||
'main': 1.4,
|
"main": 1.4,
|
||||||
'section': 1.3,
|
"section": 1.3,
|
||||||
'p': 1.2,
|
"p": 1.2,
|
||||||
'h1': 1.4,
|
"h1": 1.4,
|
||||||
'h2': 1.3,
|
"h2": 1.3,
|
||||||
'h3': 1.2,
|
"h3": 1.2,
|
||||||
'div': 0.7,
|
"div": 0.7,
|
||||||
'span': 0.6
|
"span": 0.6,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Metric configuration
|
# Metric configuration
|
||||||
self.metric_config = {
|
self.metric_config = {
|
||||||
'text_density': True,
|
"text_density": True,
|
||||||
'link_density': True,
|
"link_density": True,
|
||||||
'tag_weight': True,
|
"tag_weight": True,
|
||||||
'class_id_weight': True,
|
"class_id_weight": True,
|
||||||
'text_length': True,
|
"text_length": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
self.metric_weights = {
|
self.metric_weights = {
|
||||||
'text_density': 0.4,
|
"text_density": 0.4,
|
||||||
'link_density': 0.2,
|
"link_density": 0.2,
|
||||||
'tag_weight': 0.2,
|
"tag_weight": 0.2,
|
||||||
'class_id_weight': 0.1,
|
"class_id_weight": 0.1,
|
||||||
'text_length': 0.1,
|
"text_length": 0.1,
|
||||||
}
|
}
|
||||||
|
|
||||||
self.tag_weights = {
|
self.tag_weights = {
|
||||||
'div': 0.5,
|
"div": 0.5,
|
||||||
'p': 1.0,
|
"p": 1.0,
|
||||||
'article': 1.5,
|
"article": 1.5,
|
||||||
'section': 1.0,
|
"section": 1.0,
|
||||||
'span': 0.3,
|
"span": 0.3,
|
||||||
'li': 0.5,
|
"li": 0.5,
|
||||||
'ul': 0.5,
|
"ul": 0.5,
|
||||||
'ol': 0.5,
|
"ol": 0.5,
|
||||||
'h1': 1.2,
|
"h1": 1.2,
|
||||||
'h2': 1.1,
|
"h2": 1.1,
|
||||||
'h3': 1.0,
|
"h3": 1.0,
|
||||||
'h4': 0.9,
|
"h4": 0.9,
|
||||||
'h5': 0.8,
|
"h5": 0.8,
|
||||||
'h6': 0.7,
|
"h6": 0.7,
|
||||||
}
|
}
|
||||||
|
|
||||||
def filter_content(self, html: str, min_word_threshold: int = None) -> List[str]:
|
def filter_content(self, html: str, min_word_threshold: int = None) -> List[str]:
|
||||||
@@ -495,22 +633,22 @@ class PruningContentFilter(RelevantContentFilter):
|
|||||||
if not html or not isinstance(html, str):
|
if not html or not isinstance(html, str):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
soup = BeautifulSoup(html, 'lxml')
|
soup = BeautifulSoup(html, "lxml")
|
||||||
if not soup.body:
|
if not soup.body:
|
||||||
soup = BeautifulSoup(f'<body>{html}</body>', 'lxml')
|
soup = BeautifulSoup(f"<body>{html}</body>", "lxml")
|
||||||
|
|
||||||
# Remove comments and unwanted tags
|
# Remove comments and unwanted tags
|
||||||
self._remove_comments(soup)
|
self._remove_comments(soup)
|
||||||
self._remove_unwanted_tags(soup)
|
self._remove_unwanted_tags(soup)
|
||||||
|
|
||||||
# Prune tree starting from body
|
# Prune tree starting from body
|
||||||
body = soup.find('body')
|
body = soup.find("body")
|
||||||
self._prune_tree(body)
|
self._prune_tree(body)
|
||||||
|
|
||||||
# Extract remaining content as list of HTML strings
|
# Extract remaining content as list of HTML strings
|
||||||
content_blocks = []
|
content_blocks = []
|
||||||
for element in body.children:
|
for element in body.children:
|
||||||
if isinstance(element, str) or not hasattr(element, 'name'):
|
if isinstance(element, str) or not hasattr(element, "name"):
|
||||||
continue
|
continue
|
||||||
if len(element.get_text(strip=True)) > 0:
|
if len(element.get_text(strip=True)) > 0:
|
||||||
content_blocks.append(str(element))
|
content_blocks.append(str(element))
|
||||||
@@ -535,24 +673,28 @@ class PruningContentFilter(RelevantContentFilter):
|
|||||||
Args:
|
Args:
|
||||||
node (Tag): The node from which the pruning starts.
|
node (Tag): The node from which the pruning starts.
|
||||||
"""
|
"""
|
||||||
if not node or not hasattr(node, 'name') or node.name is None:
|
if not node or not hasattr(node, "name") or node.name is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
text_len = len(node.get_text(strip=True))
|
text_len = len(node.get_text(strip=True))
|
||||||
tag_len = len(node.encode_contents().decode('utf-8'))
|
tag_len = len(node.encode_contents().decode("utf-8"))
|
||||||
link_text_len = sum(len(s.strip()) for s in (a.string for a in node.find_all('a', recursive=False)) if s)
|
link_text_len = sum(
|
||||||
|
len(s.strip())
|
||||||
|
for s in (a.string for a in node.find_all("a", recursive=False))
|
||||||
|
if s
|
||||||
|
)
|
||||||
|
|
||||||
metrics = {
|
metrics = {
|
||||||
'node': node,
|
"node": node,
|
||||||
'tag_name': node.name,
|
"tag_name": node.name,
|
||||||
'text_len': text_len,
|
"text_len": text_len,
|
||||||
'tag_len': tag_len,
|
"tag_len": tag_len,
|
||||||
'link_text_len': link_text_len
|
"link_text_len": link_text_len,
|
||||||
}
|
}
|
||||||
|
|
||||||
score = self._compute_composite_score(metrics, text_len, tag_len, link_text_len)
|
score = self._compute_composite_score(metrics, text_len, tag_len, link_text_len)
|
||||||
|
|
||||||
if self.threshold_type == 'fixed':
|
if self.threshold_type == "fixed":
|
||||||
should_remove = score < self.threshold
|
should_remove = score < self.threshold
|
||||||
else: # dynamic
|
else: # dynamic
|
||||||
tag_importance = self.tag_importance.get(node.name, 0.7)
|
tag_importance = self.tag_importance.get(node.name, 0.7)
|
||||||
@@ -572,7 +714,7 @@ class PruningContentFilter(RelevantContentFilter):
|
|||||||
if should_remove:
|
if should_remove:
|
||||||
node.decompose()
|
node.decompose()
|
||||||
else:
|
else:
|
||||||
children = [child for child in node.children if hasattr(child, 'name')]
|
children = [child for child in node.children if hasattr(child, "name")]
|
||||||
for child in children:
|
for child in children:
|
||||||
self._prune_tree(child)
|
self._prune_tree(child)
|
||||||
|
|
||||||
@@ -580,48 +722,332 @@ class PruningContentFilter(RelevantContentFilter):
|
|||||||
"""Computes the composite score"""
|
"""Computes the composite score"""
|
||||||
if self.min_word_threshold:
|
if self.min_word_threshold:
|
||||||
# Get raw text from metrics node - avoid extra processing
|
# Get raw text from metrics node - avoid extra processing
|
||||||
text = metrics['node'].get_text(strip=True)
|
text = metrics["node"].get_text(strip=True)
|
||||||
word_count = text.count(' ') + 1
|
word_count = text.count(" ") + 1
|
||||||
if word_count < self.min_word_threshold:
|
if word_count < self.min_word_threshold:
|
||||||
return -1.0 # Guaranteed removal
|
return -1.0 # Guaranteed removal
|
||||||
score = 0.0
|
score = 0.0
|
||||||
total_weight = 0.0
|
total_weight = 0.0
|
||||||
|
|
||||||
if self.metric_config['text_density']:
|
if self.metric_config["text_density"]:
|
||||||
density = text_len / tag_len if tag_len > 0 else 0
|
density = text_len / tag_len if tag_len > 0 else 0
|
||||||
score += self.metric_weights['text_density'] * density
|
score += self.metric_weights["text_density"] * density
|
||||||
total_weight += self.metric_weights['text_density']
|
total_weight += self.metric_weights["text_density"]
|
||||||
|
|
||||||
if self.metric_config['link_density']:
|
if self.metric_config["link_density"]:
|
||||||
density = 1 - (link_text_len / text_len if text_len > 0 else 0)
|
density = 1 - (link_text_len / text_len if text_len > 0 else 0)
|
||||||
score += self.metric_weights['link_density'] * density
|
score += self.metric_weights["link_density"] * density
|
||||||
total_weight += self.metric_weights['link_density']
|
total_weight += self.metric_weights["link_density"]
|
||||||
|
|
||||||
if self.metric_config['tag_weight']:
|
if self.metric_config["tag_weight"]:
|
||||||
tag_score = self.tag_weights.get(metrics['tag_name'], 0.5)
|
tag_score = self.tag_weights.get(metrics["tag_name"], 0.5)
|
||||||
score += self.metric_weights['tag_weight'] * tag_score
|
score += self.metric_weights["tag_weight"] * tag_score
|
||||||
total_weight += self.metric_weights['tag_weight']
|
total_weight += self.metric_weights["tag_weight"]
|
||||||
|
|
||||||
if self.metric_config['class_id_weight']:
|
if self.metric_config["class_id_weight"]:
|
||||||
class_score = self._compute_class_id_weight(metrics['node'])
|
class_score = self._compute_class_id_weight(metrics["node"])
|
||||||
score += self.metric_weights['class_id_weight'] * max(0, class_score)
|
score += self.metric_weights["class_id_weight"] * max(0, class_score)
|
||||||
total_weight += self.metric_weights['class_id_weight']
|
total_weight += self.metric_weights["class_id_weight"]
|
||||||
|
|
||||||
if self.metric_config['text_length']:
|
if self.metric_config["text_length"]:
|
||||||
score += self.metric_weights['text_length'] * math.log(text_len + 1)
|
score += self.metric_weights["text_length"] * math.log(text_len + 1)
|
||||||
total_weight += self.metric_weights['text_length']
|
total_weight += self.metric_weights["text_length"]
|
||||||
|
|
||||||
return score / total_weight if total_weight > 0 else 0
|
return score / total_weight if total_weight > 0 else 0
|
||||||
|
|
||||||
def _compute_class_id_weight(self, node):
|
def _compute_class_id_weight(self, node):
|
||||||
"""Computes the class ID weight"""
|
"""Computes the class ID weight"""
|
||||||
class_id_score = 0
|
class_id_score = 0
|
||||||
if 'class' in node.attrs:
|
if "class" in node.attrs:
|
||||||
classes = ' '.join(node['class'])
|
classes = " ".join(node["class"])
|
||||||
if self.negative_patterns.match(classes):
|
if self.negative_patterns.match(classes):
|
||||||
class_id_score -= 0.5
|
class_id_score -= 0.5
|
||||||
if 'id' in node.attrs:
|
if "id" in node.attrs:
|
||||||
element_id = node['id']
|
element_id = node["id"]
|
||||||
if self.negative_patterns.match(element_id):
|
if self.negative_patterns.match(element_id):
|
||||||
class_id_score -= 0.5
|
class_id_score -= 0.5
|
||||||
return class_id_score
|
return class_id_score
|
||||||
|
|
||||||
|
|
||||||
|
class LLMContentFilter(RelevantContentFilter):
|
||||||
|
"""Content filtering using LLMs to generate relevant markdown."""
|
||||||
|
_UNWANTED_PROPS = {
|
||||||
|
'provider' : 'Instead, use llmConfig=LlmConfig(provider="...")',
|
||||||
|
'api_token' : 'Instead, use llmConfig=LlMConfig(api_token="...")',
|
||||||
|
'base_url' : 'Instead, use llmConfig=LlmConfig(base_url="...")',
|
||||||
|
'api_base' : 'Instead, use llmConfig=LlmConfig(base_url="...")',
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
provider: str = DEFAULT_PROVIDER,
|
||||||
|
api_token: Optional[str] = None,
|
||||||
|
llmConfig: "LlmConfig" = None,
|
||||||
|
instruction: str = None,
|
||||||
|
chunk_token_threshold: int = int(1e9),
|
||||||
|
overlap_rate: float = OVERLAP_RATE,
|
||||||
|
word_token_rate: float = WORD_TOKEN_RATE,
|
||||||
|
base_url: Optional[str] = None,
|
||||||
|
api_base: Optional[str] = None,
|
||||||
|
extra_args: Dict = None,
|
||||||
|
# char_token_rate: float = WORD_TOKEN_RATE * 5,
|
||||||
|
# chunk_mode: str = "char",
|
||||||
|
verbose: bool = False,
|
||||||
|
logger: Optional[AsyncLogger] = None,
|
||||||
|
ignore_cache: bool = True,
|
||||||
|
):
|
||||||
|
super().__init__(None)
|
||||||
|
self.provider = provider
|
||||||
|
self.api_token = api_token
|
||||||
|
self.base_url = base_url or api_base
|
||||||
|
self.llmConfig = llmConfig
|
||||||
|
self.instruction = instruction
|
||||||
|
self.chunk_token_threshold = chunk_token_threshold
|
||||||
|
self.overlap_rate = overlap_rate
|
||||||
|
self.word_token_rate = word_token_rate or WORD_TOKEN_RATE
|
||||||
|
# self.chunk_mode: str = chunk_mode
|
||||||
|
# self.char_token_rate = char_token_rate or word_token_rate / 5
|
||||||
|
# self.token_rate = word_token_rate if chunk_mode == "word" else self.char_token_rate
|
||||||
|
self.token_rate = word_token_rate or WORD_TOKEN_RATE
|
||||||
|
self.extra_args = extra_args or {}
|
||||||
|
self.ignore_cache = ignore_cache
|
||||||
|
self.verbose = verbose
|
||||||
|
|
||||||
|
# Setup logger with custom styling for LLM operations
|
||||||
|
if logger:
|
||||||
|
self.logger = logger
|
||||||
|
elif verbose:
|
||||||
|
self.logger = AsyncLogger(
|
||||||
|
verbose=verbose,
|
||||||
|
icons={
|
||||||
|
**AsyncLogger.DEFAULT_ICONS,
|
||||||
|
"LLM": "★", # Star for LLM operations
|
||||||
|
"CHUNK": "◈", # Diamond for chunks
|
||||||
|
"CACHE": "⚡", # Lightning for cache operations
|
||||||
|
},
|
||||||
|
colors={
|
||||||
|
**AsyncLogger.DEFAULT_COLORS,
|
||||||
|
LogLevel.INFO: Fore.MAGENTA
|
||||||
|
+ Style.DIM, # Dimmed purple for LLM ops
|
||||||
|
},
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.logger = None
|
||||||
|
|
||||||
|
self.usages = []
|
||||||
|
self.total_usage = TokenUsage()
|
||||||
|
|
||||||
|
def __setattr__(self, name, value):
|
||||||
|
"""Handle attribute setting."""
|
||||||
|
# TODO: Planning to set properties dynamically based on the __init__ signature
|
||||||
|
sig = inspect.signature(self.__init__)
|
||||||
|
all_params = sig.parameters # Dictionary of parameter names and their details
|
||||||
|
|
||||||
|
if name in self._UNWANTED_PROPS and value is not all_params[name].default:
|
||||||
|
raise AttributeError(f"Setting '{name}' is deprecated. {self._UNWANTED_PROPS[name]}")
|
||||||
|
|
||||||
|
super().__setattr__(name, value)
|
||||||
|
|
||||||
|
def _get_cache_key(self, html: str, instruction: str) -> str:
|
||||||
|
"""Generate a unique cache key based on HTML and instruction"""
|
||||||
|
content = f"{html}{instruction}"
|
||||||
|
return hashlib.md5(content.encode()).hexdigest()
|
||||||
|
|
||||||
|
def _merge_chunks(self, text: str) -> List[str]:
|
||||||
|
"""Split text into chunks with overlap using char or word mode."""
|
||||||
|
ov = int(self.chunk_token_threshold * self.overlap_rate)
|
||||||
|
sections = merge_chunks(
|
||||||
|
docs=[text],
|
||||||
|
target_size=self.chunk_token_threshold,
|
||||||
|
overlap=ov,
|
||||||
|
word_token_ratio=self.word_token_rate,
|
||||||
|
)
|
||||||
|
return sections
|
||||||
|
|
||||||
|
def filter_content(self, html: str, ignore_cache: bool = True) -> List[str]:
|
||||||
|
if not html or not isinstance(html, str):
|
||||||
|
return []
|
||||||
|
|
||||||
|
if self.logger:
|
||||||
|
self.logger.info(
|
||||||
|
"Starting LLM markdown content filtering process",
|
||||||
|
tag="LLM",
|
||||||
|
params={"provider": self.llmConfig.provider},
|
||||||
|
colors={"provider": Fore.CYAN},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cache handling
|
||||||
|
cache_dir = Path(get_home_folder()) / "llm_cache" / "content_filter"
|
||||||
|
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
cache_key = self._get_cache_key(html, self.instruction or "")
|
||||||
|
cache_file = cache_dir / f"{cache_key}.json"
|
||||||
|
|
||||||
|
# if ignore_cache == None:
|
||||||
|
ignore_cache = self.ignore_cache
|
||||||
|
|
||||||
|
if not ignore_cache and cache_file.exists():
|
||||||
|
if self.logger:
|
||||||
|
self.logger.info("Found cached markdown result", tag="CACHE")
|
||||||
|
try:
|
||||||
|
with cache_file.open("r") as f:
|
||||||
|
cached_data = json.load(f)
|
||||||
|
usage = TokenUsage(**cached_data["usage"])
|
||||||
|
self.usages.append(usage)
|
||||||
|
self.total_usage.completion_tokens += usage.completion_tokens
|
||||||
|
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||||
|
self.total_usage.total_tokens += usage.total_tokens
|
||||||
|
return cached_data["blocks"]
|
||||||
|
except Exception as e:
|
||||||
|
if self.logger:
|
||||||
|
self.logger.error(
|
||||||
|
f"LLM markdown: Cache read error: {str(e)}", tag="CACHE"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Split into chunks
|
||||||
|
html_chunks = self._merge_chunks(html)
|
||||||
|
if self.logger:
|
||||||
|
self.logger.info(
|
||||||
|
"LLM markdown: Split content into {chunk_count} chunks",
|
||||||
|
tag="CHUNK",
|
||||||
|
params={"chunk_count": len(html_chunks)},
|
||||||
|
colors={"chunk_count": Fore.YELLOW},
|
||||||
|
)
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
# Process chunks in parallel
|
||||||
|
with ThreadPoolExecutor(max_workers=4) as executor:
|
||||||
|
futures = []
|
||||||
|
for i, chunk in enumerate(html_chunks):
|
||||||
|
if self.logger:
|
||||||
|
self.logger.debug(
|
||||||
|
"LLM markdown: Processing chunk {chunk_num}/{total_chunks}",
|
||||||
|
tag="CHUNK",
|
||||||
|
params={"chunk_num": i + 1, "total_chunks": len(html_chunks)},
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt_variables = {
|
||||||
|
"HTML": escape_json_string(sanitize_html(chunk)),
|
||||||
|
"REQUEST": self.instruction
|
||||||
|
or "Convert this HTML into clean, relevant markdown, removing any noise or irrelevant content.",
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt = PROMPT_FILTER_CONTENT
|
||||||
|
for var, value in prompt_variables.items():
|
||||||
|
prompt = prompt.replace("{" + var + "}", value)
|
||||||
|
|
||||||
|
def _proceed_with_chunk(
|
||||||
|
provider: str,
|
||||||
|
prompt: str,
|
||||||
|
api_token: str,
|
||||||
|
base_url: Optional[str] = None,
|
||||||
|
extra_args: Dict = {},
|
||||||
|
) -> List[str]:
|
||||||
|
if self.logger:
|
||||||
|
self.logger.info(
|
||||||
|
"LLM Markdown: Processing chunk {chunk_num}",
|
||||||
|
tag="CHUNK",
|
||||||
|
params={"chunk_num": i + 1},
|
||||||
|
)
|
||||||
|
return perform_completion_with_backoff(
|
||||||
|
provider,
|
||||||
|
prompt,
|
||||||
|
api_token,
|
||||||
|
base_url=base_url,
|
||||||
|
extra_args=extra_args,
|
||||||
|
)
|
||||||
|
|
||||||
|
future = executor.submit(
|
||||||
|
_proceed_with_chunk,
|
||||||
|
self.llmConfig.provider,
|
||||||
|
prompt,
|
||||||
|
self.llmConfig.api_token,
|
||||||
|
self.llmConfig.base_url,
|
||||||
|
self.extra_args,
|
||||||
|
)
|
||||||
|
futures.append((i, future))
|
||||||
|
|
||||||
|
# Collect results in order
|
||||||
|
ordered_results = []
|
||||||
|
for i, future in sorted(futures):
|
||||||
|
try:
|
||||||
|
response = future.result()
|
||||||
|
|
||||||
|
# Track usage
|
||||||
|
usage = TokenUsage(
|
||||||
|
completion_tokens=response.usage.completion_tokens,
|
||||||
|
prompt_tokens=response.usage.prompt_tokens,
|
||||||
|
total_tokens=response.usage.total_tokens,
|
||||||
|
completion_tokens_details=(
|
||||||
|
response.usage.completion_tokens_details.__dict__
|
||||||
|
if response.usage.completion_tokens_details
|
||||||
|
else {}
|
||||||
|
),
|
||||||
|
prompt_tokens_details=(
|
||||||
|
response.usage.prompt_tokens_details.__dict__
|
||||||
|
if response.usage.prompt_tokens_details
|
||||||
|
else {}
|
||||||
|
),
|
||||||
|
)
|
||||||
|
self.usages.append(usage)
|
||||||
|
self.total_usage.completion_tokens += usage.completion_tokens
|
||||||
|
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||||
|
self.total_usage.total_tokens += usage.total_tokens
|
||||||
|
|
||||||
|
blocks = extract_xml_data(
|
||||||
|
["content"], response.choices[0].message.content
|
||||||
|
)["content"]
|
||||||
|
if blocks:
|
||||||
|
ordered_results.append(blocks)
|
||||||
|
if self.logger:
|
||||||
|
self.logger.success(
|
||||||
|
"LLM markdown: Successfully processed chunk {chunk_num}",
|
||||||
|
tag="CHUNK",
|
||||||
|
params={"chunk_num": i + 1},
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
if self.logger:
|
||||||
|
self.logger.error(
|
||||||
|
"LLM markdown: Error processing chunk {chunk_num}: {error}",
|
||||||
|
tag="CHUNK",
|
||||||
|
params={"chunk_num": i + 1, "error": str(e)},
|
||||||
|
)
|
||||||
|
|
||||||
|
end_time = time.time()
|
||||||
|
if self.logger:
|
||||||
|
self.logger.success(
|
||||||
|
"LLM markdown: Completed processing in {time:.2f}s",
|
||||||
|
tag="LLM",
|
||||||
|
params={"time": end_time - start_time},
|
||||||
|
colors={"time": Fore.YELLOW},
|
||||||
|
)
|
||||||
|
|
||||||
|
result = ordered_results if ordered_results else []
|
||||||
|
|
||||||
|
# Cache the final result
|
||||||
|
cache_data = {"blocks": result, "usage": self.total_usage.__dict__}
|
||||||
|
with cache_file.open("w") as f:
|
||||||
|
json.dump(cache_data, f)
|
||||||
|
if self.logger:
|
||||||
|
self.logger.info("Cached results for future use", tag="CACHE")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def show_usage(self) -> None:
|
||||||
|
"""Print usage statistics"""
|
||||||
|
print("\n=== Token Usage Summary ===")
|
||||||
|
print(f"{'Type':<15} {'Count':>12}")
|
||||||
|
print("-" * 30)
|
||||||
|
print(f"{'Completion':<15} {self.total_usage.completion_tokens:>12,}")
|
||||||
|
print(f"{'Prompt':<15} {self.total_usage.prompt_tokens:>12,}")
|
||||||
|
print(f"{'Total':<15} {self.total_usage.total_tokens:>12,}")
|
||||||
|
|
||||||
|
if self.usages:
|
||||||
|
print("\n=== Usage History ===")
|
||||||
|
print(f"{'Request #':<10} {'Completion':>12} {'Prompt':>12} {'Total':>12}")
|
||||||
|
print("-" * 48)
|
||||||
|
for i, usage in enumerate(self.usages, 1):
|
||||||
|
print(
|
||||||
|
f"{i:<10} {usage.completion_tokens:>12,} "
|
||||||
|
f"{usage.prompt_tokens:>12,} {usage.total_tokens:>12,}"
|
||||||
|
)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
from crawl4ai.hub import BaseCrawler
|
||||||
|
|
||||||
|
__meta__ = {
|
||||||
|
"version": "1.2.0",
|
||||||
|
"tested_on": ["amazon.com"],
|
||||||
|
"rate_limit": "50 RPM",
|
||||||
|
"schema": {"product": ["name", "price"]}
|
||||||
|
}
|
||||||
|
|
||||||
|
class AmazonProductCrawler(BaseCrawler):
|
||||||
|
async def run(self, url: str, **kwargs) -> str:
|
||||||
|
try:
|
||||||
|
self.logger.info(f"Crawling {url}")
|
||||||
|
return '{"product": {"name": "Test Amazon Product"}}'
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Crawl failed: {str(e)}")
|
||||||
|
return json.dumps({
|
||||||
|
"error": str(e),
|
||||||
|
"metadata": self.meta # Include meta in error response
|
||||||
|
})
|
||||||
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
130
crawl4ai/crawlers/google_search/crawler.py
Normal file
130
crawl4ai/crawlers/google_search/crawler.py
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
|
||||||
|
from crawl4ai.hub import BaseCrawler
|
||||||
|
from crawl4ai.utils import optimize_html, get_home_folder
|
||||||
|
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||||
|
from pathlib import Path
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleSearchCrawler(BaseCrawler):
|
||||||
|
__meta__ = {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"tested_on": ["google.com/search*"],
|
||||||
|
"rate_limit": "10 RPM",
|
||||||
|
"description": "Crawls Google Search results (text + images)",
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.js_script = (Path(__file__).parent /
|
||||||
|
"script.js").read_text()
|
||||||
|
|
||||||
|
async def run(self, url="", query: str = "", search_type: str = "text", schema_cache_path = None, **kwargs) -> str:
|
||||||
|
"""Crawl Google Search results for a query"""
|
||||||
|
url = f"https://www.google.com/search?q={query}&gl=sg&hl=en" if search_type == "text" else f"https://www.google.com/search?q={query}&gl=sg&hl=en&tbs=qdr:d&udm=2"
|
||||||
|
if kwargs.get("page_start", 1) > 1:
|
||||||
|
url = f"{url}&start={kwargs['page_start'] * 10}"
|
||||||
|
if kwargs.get("page_length", 1) > 1:
|
||||||
|
url = f"{url}&num={kwargs['page_length']}"
|
||||||
|
|
||||||
|
browser_config = BrowserConfig(headless=True, verbose=True)
|
||||||
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
|
config = CrawlerRunConfig(
|
||||||
|
cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS),
|
||||||
|
keep_attrs=["id", "class"],
|
||||||
|
keep_data_attributes=True,
|
||||||
|
delay_before_return_html=kwargs.get(
|
||||||
|
"delay", 2 if search_type == "image" else 1),
|
||||||
|
js_code=self.js_script if search_type == "image" else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = await crawler.arun(url=url, config=config)
|
||||||
|
if not result.success:
|
||||||
|
return json.dumps({"error": result.error})
|
||||||
|
|
||||||
|
if search_type == "image":
|
||||||
|
if result.js_execution_result.get("success", False) is False:
|
||||||
|
return json.dumps({"error": result.js_execution_result.get("error", "Unknown error")})
|
||||||
|
if "results" in result.js_execution_result:
|
||||||
|
image_result = result.js_execution_result['results'][0]
|
||||||
|
if image_result.get("success", False) is False:
|
||||||
|
return json.dumps({"error": image_result.get("error", "Unknown error")})
|
||||||
|
return json.dumps(image_result["result"], indent=4)
|
||||||
|
|
||||||
|
# For text search, extract structured data
|
||||||
|
schemas = await self._build_schemas(result.cleaned_html, schema_cache_path)
|
||||||
|
extracted = {
|
||||||
|
key: JsonCssExtractionStrategy(schema=schemas[key]).run(
|
||||||
|
url=url, sections=[result.html]
|
||||||
|
)
|
||||||
|
for key in schemas
|
||||||
|
}
|
||||||
|
return json.dumps(extracted, indent=4)
|
||||||
|
|
||||||
|
async def _build_schemas(self, html: str, schema_cache_path: str = None) -> Dict[str, Dict]:
|
||||||
|
"""Build extraction schemas (organic, top stories, etc.)"""
|
||||||
|
home_dir = get_home_folder() if not schema_cache_path else schema_cache_path
|
||||||
|
os.makedirs(f"{home_dir}/schema", exist_ok=True)
|
||||||
|
|
||||||
|
cleaned_html = optimize_html(html, threshold=100)
|
||||||
|
|
||||||
|
organic_schema = None
|
||||||
|
if os.path.exists(f"{home_dir}/schema/organic_schema.json"):
|
||||||
|
with open(f"{home_dir}/schema/organic_schema.json", "r") as f:
|
||||||
|
organic_schema = json.load(f)
|
||||||
|
else:
|
||||||
|
organic_schema = JsonCssExtractionStrategy.generate_schema(
|
||||||
|
html=cleaned_html,
|
||||||
|
target_json_example="""{
|
||||||
|
"title": "...",
|
||||||
|
"link": "...",
|
||||||
|
"snippet": "...",
|
||||||
|
"date": "1 hour ago",
|
||||||
|
}""",
|
||||||
|
query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text. date."""
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(f"{home_dir}/schema/organic_schema.json", "w") as f:
|
||||||
|
f.write(json.dumps(organic_schema))
|
||||||
|
|
||||||
|
top_stories_schema = None
|
||||||
|
if os.path.exists(f"{home_dir}/schema/top_stories_schema.json"):
|
||||||
|
with open(f"{home_dir}/schema/top_stories_schema.json", "r") as f:
|
||||||
|
top_stories_schema = json.load(f)
|
||||||
|
else:
|
||||||
|
top_stories_schema = JsonCssExtractionStrategy.generate_schema(
|
||||||
|
html=cleaned_html,
|
||||||
|
target_json_example="""{
|
||||||
|
"title": "...",
|
||||||
|
"link": "...",
|
||||||
|
"source": "Insider Monkey",
|
||||||
|
"date": "1 hour ago",
|
||||||
|
}""",
|
||||||
|
query="""The given html is the crawled html from Google search result. Please find the schema for Top Story item int he given html, I am interested in title, link, source. date and imageUrl."""
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(f"{home_dir}/schema/top_stories_schema.json", "w") as f:
|
||||||
|
f.write(json.dumps(top_stories_schema))
|
||||||
|
|
||||||
|
suggested_query_schema = None
|
||||||
|
if os.path.exists(f"{home_dir}/schema/suggested_query_schema.json"):
|
||||||
|
with open(f"{home_dir}/schema/suggested_query_schema.json", "r") as f:
|
||||||
|
suggested_query_schema = json.load(f)
|
||||||
|
else:
|
||||||
|
suggested_query_schema = JsonCssExtractionStrategy.generate_schema(
|
||||||
|
html=cleaned_html,
|
||||||
|
target_json_example="""{
|
||||||
|
"query": "A for Apple",
|
||||||
|
}""",
|
||||||
|
query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "People also search for" within the given HTML. I am interested in the queries only."""
|
||||||
|
)
|
||||||
|
with open(f"{home_dir}/schema/suggested_query_schema.json", "w") as f:
|
||||||
|
f.write(json.dumps(suggested_query_schema))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"organic_schema": organic_schema,
|
||||||
|
"top_stories_schema": top_stories_schema,
|
||||||
|
"suggested_query_schema": suggested_query_schema,
|
||||||
|
}
|
||||||
115
crawl4ai/crawlers/google_search/script.js
Normal file
115
crawl4ai/crawlers/google_search/script.js
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
(() => {
|
||||||
|
// Function to extract image data from Google Images page
|
||||||
|
function extractImageData() {
|
||||||
|
const keys = Object.keys(window.W_jd);
|
||||||
|
let allImageData = [];
|
||||||
|
let currentPosition = 0;
|
||||||
|
|
||||||
|
// Get the symbol we'll use (from first valid entry)
|
||||||
|
let targetSymbol;
|
||||||
|
for (let key of keys) {
|
||||||
|
try {
|
||||||
|
const symbols = Object.getOwnPropertySymbols(window.W_jd[key]);
|
||||||
|
if (symbols.length > 0) {
|
||||||
|
targetSymbol = symbols[0];
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!targetSymbol) return [];
|
||||||
|
|
||||||
|
// Iterate through ALL keys
|
||||||
|
for (let key of keys) {
|
||||||
|
try {
|
||||||
|
const o1 = window.W_jd[key][targetSymbol]
|
||||||
|
if (!o1) continue;
|
||||||
|
const data = Object.values(o1)[0]
|
||||||
|
// const data = window.W_jd[key][targetSymbol]?.Ws;
|
||||||
|
// Check if this is a valid image data entry
|
||||||
|
if (data && Array.isArray(data[1])) {
|
||||||
|
const processedData = processImageEntry(data, currentPosition);
|
||||||
|
if (processedData) {
|
||||||
|
allImageData.push(processedData);
|
||||||
|
currentPosition++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allImageData;
|
||||||
|
}
|
||||||
|
|
||||||
|
function processImageEntry(entry, position) {
|
||||||
|
const imageData = entry[1];
|
||||||
|
if (!Array.isArray(imageData)) return null;
|
||||||
|
|
||||||
|
// Extract the image ID
|
||||||
|
const imageId = imageData[1];
|
||||||
|
if (!imageId) return null;
|
||||||
|
|
||||||
|
// Find the corresponding DOM element
|
||||||
|
const domElement = document.querySelector(`[data-docid="${imageId}"]`);
|
||||||
|
if (!domElement) return null;
|
||||||
|
|
||||||
|
// Extract data from the array structure
|
||||||
|
const [
|
||||||
|
_,
|
||||||
|
id,
|
||||||
|
thumbnailInfo,
|
||||||
|
imageInfo,
|
||||||
|
__,
|
||||||
|
___,
|
||||||
|
rgb,
|
||||||
|
____,
|
||||||
|
_____,
|
||||||
|
metadata
|
||||||
|
] = imageData;
|
||||||
|
|
||||||
|
// Ensure we have the required data
|
||||||
|
if (!thumbnailInfo || !imageInfo) return null;
|
||||||
|
|
||||||
|
// Extract metadata from DOM
|
||||||
|
const title = domElement?.querySelector('.toI8Rb')?.textContent?.trim();
|
||||||
|
const source = domElement?.querySelector('.guK3rf')?.textContent?.trim();
|
||||||
|
const link = domElement?.querySelector('a.EZAeBe')?.href;
|
||||||
|
|
||||||
|
if (!link) return null;
|
||||||
|
|
||||||
|
// Build Google Image URL
|
||||||
|
const googleUrl = buildGoogleImageUrl(imageInfo[0], link, imageId, imageInfo[1], imageInfo[2]);
|
||||||
|
|
||||||
|
return {
|
||||||
|
title,
|
||||||
|
imageUrl: imageInfo[0],
|
||||||
|
imageWidth: imageInfo[2],
|
||||||
|
imageHeight: imageInfo[1],
|
||||||
|
thumbnailUrl: thumbnailInfo[0],
|
||||||
|
thumbnailWidth: thumbnailInfo[2],
|
||||||
|
thumbnailHeight: thumbnailInfo[1],
|
||||||
|
source,
|
||||||
|
domain: metadata['2000']?.[1] || new URL(link).hostname,
|
||||||
|
link,
|
||||||
|
googleUrl,
|
||||||
|
position: position + 1
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildGoogleImageUrl(imgUrl, refUrl, tbnid, height, width) {
|
||||||
|
const params = new URLSearchParams({
|
||||||
|
imgurl: imgUrl,
|
||||||
|
tbnid: tbnid,
|
||||||
|
imgrefurl: refUrl,
|
||||||
|
docid: tbnid,
|
||||||
|
w: width.toString(),
|
||||||
|
h: height.toString(),
|
||||||
|
});
|
||||||
|
|
||||||
|
return `https://www.google.com/imgres?${params.toString()}`;
|
||||||
|
}
|
||||||
|
return extractImageData();
|
||||||
|
})();
|
||||||
47
crawl4ai/deep_crawling/__init__.py
Normal file
47
crawl4ai/deep_crawling/__init__.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# deep_crawling/__init__.py
|
||||||
|
from .base_strategy import DeepCrawlDecorator, DeepCrawlStrategy
|
||||||
|
from .bfs_strategy import BFSDeepCrawlStrategy
|
||||||
|
from .bff_strategy import BestFirstCrawlingStrategy
|
||||||
|
from .dfs_strategy import DFSDeepCrawlStrategy
|
||||||
|
from .filters import (
|
||||||
|
FilterChain,
|
||||||
|
ContentTypeFilter,
|
||||||
|
DomainFilter,
|
||||||
|
URLFilter,
|
||||||
|
URLPatternFilter,
|
||||||
|
FilterStats,
|
||||||
|
ContentRelevanceFilter,
|
||||||
|
SEOFilter
|
||||||
|
)
|
||||||
|
from .scorers import (
|
||||||
|
KeywordRelevanceScorer,
|
||||||
|
URLScorer,
|
||||||
|
CompositeScorer,
|
||||||
|
DomainAuthorityScorer,
|
||||||
|
FreshnessScorer,
|
||||||
|
PathDepthScorer,
|
||||||
|
ContentTypeScorer
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"DeepCrawlDecorator",
|
||||||
|
"DeepCrawlStrategy",
|
||||||
|
"BFSDeepCrawlStrategy",
|
||||||
|
"BestFirstCrawlingStrategy",
|
||||||
|
"DFSDeepCrawlStrategy",
|
||||||
|
"FilterChain",
|
||||||
|
"ContentTypeFilter",
|
||||||
|
"DomainFilter",
|
||||||
|
"URLFilter",
|
||||||
|
"URLPatternFilter",
|
||||||
|
"FilterStats",
|
||||||
|
"ContentRelevanceFilter",
|
||||||
|
"SEOFilter",
|
||||||
|
"KeywordRelevanceScorer",
|
||||||
|
"URLScorer",
|
||||||
|
"CompositeScorer",
|
||||||
|
"DomainAuthorityScorer",
|
||||||
|
"FreshnessScorer",
|
||||||
|
"PathDepthScorer",
|
||||||
|
"ContentTypeScorer",
|
||||||
|
]
|
||||||
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import AsyncGenerator, Optional, Set, List, Dict
|
||||||
|
from functools import wraps
|
||||||
|
from contextvars import ContextVar
|
||||||
|
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||||
|
|
||||||
|
|
||||||
|
class DeepCrawlDecorator:
|
||||||
|
"""Decorator that adds deep crawling capability to arun method."""
|
||||||
|
deep_crawl_active = ContextVar("deep_crawl_active", default=False)
|
||||||
|
|
||||||
|
def __init__(self, crawler: AsyncWebCrawler):
|
||||||
|
self.crawler = crawler
|
||||||
|
|
||||||
|
def __call__(self, original_arun):
|
||||||
|
@wraps(original_arun)
|
||||||
|
async def wrapped_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||||
|
# If deep crawling is already active, call the original method to avoid recursion.
|
||||||
|
if config and config.deep_crawl_strategy and not self.deep_crawl_active.get():
|
||||||
|
token = self.deep_crawl_active.set(True)
|
||||||
|
# Await the arun call to get the actual result object.
|
||||||
|
result_obj = await config.deep_crawl_strategy.arun(
|
||||||
|
crawler=self.crawler,
|
||||||
|
start_url=url,
|
||||||
|
config=config
|
||||||
|
)
|
||||||
|
if config.stream:
|
||||||
|
async def result_wrapper():
|
||||||
|
try:
|
||||||
|
async for result in result_obj:
|
||||||
|
yield result
|
||||||
|
finally:
|
||||||
|
self.deep_crawl_active.reset(token)
|
||||||
|
return result_wrapper()
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
return result_obj
|
||||||
|
finally:
|
||||||
|
self.deep_crawl_active.reset(token)
|
||||||
|
return await original_arun(url, config=config, **kwargs)
|
||||||
|
return wrapped_arun
|
||||||
|
|
||||||
|
class DeepCrawlStrategy(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base class for deep crawling strategies.
|
||||||
|
|
||||||
|
Core functions:
|
||||||
|
- arun: Main entry point that returns an async generator of CrawlResults.
|
||||||
|
- shutdown: Clean up resources.
|
||||||
|
- can_process_url: Validate a URL and decide whether to process it.
|
||||||
|
- _process_links: Extract and process links from a CrawlResult.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def _arun_batch(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> List[CrawlResult]:
|
||||||
|
"""
|
||||||
|
Batch (non-streaming) mode:
|
||||||
|
Processes one BFS level at a time, then yields all the results.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def _arun_stream(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> AsyncGenerator[CrawlResult, None]:
|
||||||
|
"""
|
||||||
|
Streaming mode:
|
||||||
|
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def arun(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: Optional[CrawlerRunConfig] = None,
|
||||||
|
) -> RunManyReturn:
|
||||||
|
"""
|
||||||
|
Traverse the given URL using the specified crawler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_url (str): The URL from which to start crawling.
|
||||||
|
crawler (AsyncWebCrawler): The crawler instance to use.
|
||||||
|
crawler_run_config (Optional[CrawlerRunConfig]): Crawler configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||||
|
"""
|
||||||
|
if config is None:
|
||||||
|
raise ValueError("CrawlerRunConfig must be provided")
|
||||||
|
|
||||||
|
if config.stream:
|
||||||
|
return self._arun_stream(start_url, crawler, config)
|
||||||
|
else:
|
||||||
|
return await self._arun_batch(start_url, crawler, config)
|
||||||
|
|
||||||
|
def __call__(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig):
|
||||||
|
return self.arun(start_url, crawler, config)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def shutdown(self) -> None:
|
||||||
|
"""
|
||||||
|
Clean up resources used by the deep crawl strategy.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||||
|
"""
|
||||||
|
Validate the URL format and apply custom filtering logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url (str): The URL to validate.
|
||||||
|
depth (int): The current depth in the crawl.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the URL should be processed, False otherwise.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def link_discovery(
|
||||||
|
self,
|
||||||
|
result: CrawlResult,
|
||||||
|
source_url: str,
|
||||||
|
current_depth: int,
|
||||||
|
visited: Set[str],
|
||||||
|
next_level: List[tuple],
|
||||||
|
depths: Dict[str, int],
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Extract and process links from the given crawl result.
|
||||||
|
|
||||||
|
This method should:
|
||||||
|
- Validate each extracted URL using can_process_url.
|
||||||
|
- Optionally score URLs.
|
||||||
|
- Append valid URLs (and their parent references) to the next_level list.
|
||||||
|
- Update the depths dictionary with the new depth for each URL.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
result (CrawlResult): The result from a crawl operation.
|
||||||
|
source_url (str): The URL from which this result was obtained.
|
||||||
|
current_depth (int): The depth at which the source URL was processed.
|
||||||
|
visited (Set[str]): Set of already visited URLs.
|
||||||
|
next_level (List[tuple]): List of tuples (url, parent_url) for the next BFS level.
|
||||||
|
depths (Dict[str, int]): Mapping of URLs to their current depth.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
255
crawl4ai/deep_crawling/bff_strategy.py
Normal file
255
crawl4ai/deep_crawling/bff_strategy.py
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
# best_first_crawling_strategy.py
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from ..models import TraversalStats
|
||||||
|
from .filters import FilterChain
|
||||||
|
from .scorers import URLScorer
|
||||||
|
from . import DeepCrawlStrategy
|
||||||
|
|
||||||
|
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||||
|
|
||||||
|
from math import inf as infinity
|
||||||
|
|
||||||
|
# Configurable batch size for processing items from the priority queue
|
||||||
|
BATCH_SIZE = 10
|
||||||
|
|
||||||
|
|
||||||
|
class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||||
|
"""
|
||||||
|
Best-First Crawling Strategy using a priority queue.
|
||||||
|
|
||||||
|
This strategy prioritizes URLs based on their score, ensuring that higher-value
|
||||||
|
pages are crawled first. It reimplements the core traversal loop to use a priority
|
||||||
|
queue while keeping URL validation and link discovery consistent with our design.
|
||||||
|
|
||||||
|
Core methods:
|
||||||
|
- arun: Returns either a list (batch mode) or an async generator (stream mode).
|
||||||
|
- _arun_best_first: Core generator that uses a priority queue to yield CrawlResults.
|
||||||
|
- can_process_url: Validates URLs and applies filtering (inherited behavior).
|
||||||
|
- link_discovery: Extracts and validates links from a CrawlResult.
|
||||||
|
"""
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
max_depth: int,
|
||||||
|
filter_chain: FilterChain = FilterChain(),
|
||||||
|
url_scorer: Optional[URLScorer] = None,
|
||||||
|
include_external: bool = False,
|
||||||
|
max_pages: int = infinity,
|
||||||
|
logger: Optional[logging.Logger] = None,
|
||||||
|
):
|
||||||
|
self.max_depth = max_depth
|
||||||
|
self.filter_chain = filter_chain
|
||||||
|
self.url_scorer = url_scorer
|
||||||
|
self.include_external = include_external
|
||||||
|
self.max_pages = max_pages
|
||||||
|
self.logger = logger or logging.getLogger(__name__)
|
||||||
|
self.stats = TraversalStats(start_time=datetime.now())
|
||||||
|
self._cancel_event = asyncio.Event()
|
||||||
|
self._pages_crawled = 0
|
||||||
|
|
||||||
|
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||||
|
"""
|
||||||
|
Validate the URL format and apply filtering.
|
||||||
|
For the starting URL (depth 0), filtering is bypassed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
parsed = urlparse(url)
|
||||||
|
if not parsed.scheme or not parsed.netloc:
|
||||||
|
raise ValueError("Missing scheme or netloc")
|
||||||
|
if parsed.scheme not in ("http", "https"):
|
||||||
|
raise ValueError("Invalid scheme")
|
||||||
|
if "." not in parsed.netloc:
|
||||||
|
raise ValueError("Invalid domain")
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if depth != 0 and not await self.filter_chain.apply(url):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def link_discovery(
|
||||||
|
self,
|
||||||
|
result: CrawlResult,
|
||||||
|
source_url: str,
|
||||||
|
current_depth: int,
|
||||||
|
visited: Set[str],
|
||||||
|
next_links: List[Tuple[str, Optional[str]]],
|
||||||
|
depths: Dict[str, int],
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Extract links from the crawl result, validate them, and append new URLs
|
||||||
|
(with their parent references) to next_links.
|
||||||
|
Also updates the depths dictionary.
|
||||||
|
"""
|
||||||
|
new_depth = current_depth + 1
|
||||||
|
if new_depth > self.max_depth:
|
||||||
|
return
|
||||||
|
|
||||||
|
# If we've reached the max pages limit, don't discover new links
|
||||||
|
remaining_capacity = self.max_pages - self._pages_crawled
|
||||||
|
if remaining_capacity <= 0:
|
||||||
|
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Retrieve internal links; include external links if enabled.
|
||||||
|
links = result.links.get("internal", [])
|
||||||
|
if self.include_external:
|
||||||
|
links += result.links.get("external", [])
|
||||||
|
|
||||||
|
# If we have more links than remaining capacity, limit how many we'll process
|
||||||
|
valid_links = []
|
||||||
|
for link in links:
|
||||||
|
url = link.get("href")
|
||||||
|
if url in visited:
|
||||||
|
continue
|
||||||
|
if not await self.can_process_url(url, new_depth):
|
||||||
|
self.stats.urls_skipped += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
valid_links.append(url)
|
||||||
|
|
||||||
|
# If we have more valid links than capacity, limit them
|
||||||
|
if len(valid_links) > remaining_capacity:
|
||||||
|
valid_links = valid_links[:remaining_capacity]
|
||||||
|
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
|
||||||
|
|
||||||
|
# Record the new depths and add to next_links
|
||||||
|
for url in valid_links:
|
||||||
|
depths[url] = new_depth
|
||||||
|
next_links.append((url, source_url))
|
||||||
|
|
||||||
|
async def _arun_best_first(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> AsyncGenerator[CrawlResult, None]:
|
||||||
|
"""
|
||||||
|
Core best-first crawl method using a priority queue.
|
||||||
|
|
||||||
|
The queue items are tuples of (score, depth, url, parent_url). Lower scores
|
||||||
|
are treated as higher priority. URLs are processed in batches for efficiency.
|
||||||
|
"""
|
||||||
|
queue: asyncio.PriorityQueue = asyncio.PriorityQueue()
|
||||||
|
# Push the initial URL with score 0 and depth 0.
|
||||||
|
await queue.put((0, 0, start_url, None))
|
||||||
|
visited: Set[str] = set()
|
||||||
|
depths: Dict[str, int] = {start_url: 0}
|
||||||
|
|
||||||
|
while not queue.empty() and not self._cancel_event.is_set():
|
||||||
|
# Stop if we've reached the max pages limit
|
||||||
|
if self._pages_crawled >= self.max_pages:
|
||||||
|
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||||
|
break
|
||||||
|
|
||||||
|
batch: List[Tuple[float, int, str, Optional[str]]] = []
|
||||||
|
# Retrieve up to BATCH_SIZE items from the priority queue.
|
||||||
|
for _ in range(BATCH_SIZE):
|
||||||
|
if queue.empty():
|
||||||
|
break
|
||||||
|
item = await queue.get()
|
||||||
|
score, depth, url, parent_url = item
|
||||||
|
if url in visited:
|
||||||
|
continue
|
||||||
|
visited.add(url)
|
||||||
|
batch.append(item)
|
||||||
|
|
||||||
|
if not batch:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Process the current batch of URLs.
|
||||||
|
urls = [item[2] for item in batch]
|
||||||
|
batch_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||||
|
stream_gen = await crawler.arun_many(urls=urls, config=batch_config)
|
||||||
|
async for result in stream_gen:
|
||||||
|
result_url = result.url
|
||||||
|
# Find the corresponding tuple from the batch.
|
||||||
|
corresponding = next((item for item in batch if item[2] == result_url), None)
|
||||||
|
if not corresponding:
|
||||||
|
continue
|
||||||
|
score, depth, url, parent_url = corresponding
|
||||||
|
result.metadata = result.metadata or {}
|
||||||
|
result.metadata["depth"] = depth
|
||||||
|
result.metadata["parent_url"] = parent_url
|
||||||
|
result.metadata["score"] = score
|
||||||
|
|
||||||
|
# Count only successful crawls toward max_pages limit
|
||||||
|
if result.success:
|
||||||
|
self._pages_crawled += 1
|
||||||
|
|
||||||
|
yield result
|
||||||
|
|
||||||
|
# Only discover links from successful crawls
|
||||||
|
if result.success:
|
||||||
|
# Discover new links from this result
|
||||||
|
new_links: List[Tuple[str, Optional[str]]] = []
|
||||||
|
await self.link_discovery(result, result_url, depth, visited, new_links, depths)
|
||||||
|
|
||||||
|
for new_url, new_parent in new_links:
|
||||||
|
new_depth = depths.get(new_url, depth + 1)
|
||||||
|
new_score = self.url_scorer.score(new_url) if self.url_scorer else 0
|
||||||
|
await queue.put((new_score, new_depth, new_url, new_parent))
|
||||||
|
|
||||||
|
# End of crawl.
|
||||||
|
|
||||||
|
async def _arun_batch(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> List[CrawlResult]:
|
||||||
|
"""
|
||||||
|
Best-first crawl in batch mode.
|
||||||
|
|
||||||
|
Aggregates all CrawlResults into a list.
|
||||||
|
"""
|
||||||
|
results: List[CrawlResult] = []
|
||||||
|
async for result in self._arun_best_first(start_url, crawler, config):
|
||||||
|
results.append(result)
|
||||||
|
return results
|
||||||
|
|
||||||
|
async def _arun_stream(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> AsyncGenerator[CrawlResult, None]:
|
||||||
|
"""
|
||||||
|
Best-first crawl in streaming mode.
|
||||||
|
|
||||||
|
Yields CrawlResults as they become available.
|
||||||
|
"""
|
||||||
|
async for result in self._arun_best_first(start_url, crawler, config):
|
||||||
|
yield result
|
||||||
|
|
||||||
|
async def arun(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: Optional[CrawlerRunConfig] = None,
|
||||||
|
) -> "RunManyReturn":
|
||||||
|
"""
|
||||||
|
Main entry point for best-first crawling.
|
||||||
|
|
||||||
|
Returns either a list (batch mode) or an async generator (stream mode)
|
||||||
|
of CrawlResults.
|
||||||
|
"""
|
||||||
|
if config is None:
|
||||||
|
raise ValueError("CrawlerRunConfig must be provided")
|
||||||
|
if config.stream:
|
||||||
|
return self._arun_stream(start_url, crawler, config)
|
||||||
|
else:
|
||||||
|
return await self._arun_batch(start_url, crawler, config)
|
||||||
|
|
||||||
|
async def shutdown(self) -> None:
|
||||||
|
"""
|
||||||
|
Signal cancellation and clean up resources.
|
||||||
|
"""
|
||||||
|
self._cancel_event.set()
|
||||||
|
self.stats.end_time = datetime.now()
|
||||||
241
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
241
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
# bfs_deep_crawl_strategy.py
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from ..models import TraversalStats
|
||||||
|
from .filters import FilterChain
|
||||||
|
from .scorers import URLScorer
|
||||||
|
from . import DeepCrawlStrategy
|
||||||
|
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult
|
||||||
|
from math import inf as infinity
|
||||||
|
|
||||||
|
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||||
|
"""
|
||||||
|
Breadth-First Search deep crawling strategy.
|
||||||
|
|
||||||
|
Core functions:
|
||||||
|
- arun: Main entry point; splits execution into batch or stream modes.
|
||||||
|
- link_discovery: Extracts, filters, and (if needed) scores the outgoing URLs.
|
||||||
|
- can_process_url: Validates URL format and applies the filter chain.
|
||||||
|
"""
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
max_depth: int,
|
||||||
|
filter_chain: FilterChain = FilterChain(),
|
||||||
|
url_scorer: Optional[URLScorer] = None,
|
||||||
|
include_external: bool = False,
|
||||||
|
score_threshold: float = -infinity,
|
||||||
|
max_pages: int = infinity,
|
||||||
|
logger: Optional[logging.Logger] = None,
|
||||||
|
):
|
||||||
|
self.max_depth = max_depth
|
||||||
|
self.filter_chain = filter_chain
|
||||||
|
self.url_scorer = url_scorer
|
||||||
|
self.include_external = include_external
|
||||||
|
self.score_threshold = score_threshold
|
||||||
|
self.max_pages = max_pages
|
||||||
|
self.logger = logger or logging.getLogger(__name__)
|
||||||
|
self.stats = TraversalStats(start_time=datetime.now())
|
||||||
|
self._cancel_event = asyncio.Event()
|
||||||
|
self._pages_crawled = 0
|
||||||
|
|
||||||
|
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||||
|
"""
|
||||||
|
Validates the URL and applies the filter chain.
|
||||||
|
For the start URL (depth 0) filtering is bypassed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
parsed = urlparse(url)
|
||||||
|
if not parsed.scheme or not parsed.netloc:
|
||||||
|
raise ValueError("Missing scheme or netloc")
|
||||||
|
if parsed.scheme not in ("http", "https"):
|
||||||
|
raise ValueError("Invalid scheme")
|
||||||
|
if "." not in parsed.netloc:
|
||||||
|
raise ValueError("Invalid domain")
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if depth != 0 and not await self.filter_chain.apply(url):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def link_discovery(
|
||||||
|
self,
|
||||||
|
result: CrawlResult,
|
||||||
|
source_url: str,
|
||||||
|
current_depth: int,
|
||||||
|
visited: Set[str],
|
||||||
|
next_level: List[Tuple[str, Optional[str]]],
|
||||||
|
depths: Dict[str, int],
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Extracts links from the crawl result, validates and scores them, and
|
||||||
|
prepares the next level of URLs.
|
||||||
|
Each valid URL is appended to next_level as a tuple (url, parent_url)
|
||||||
|
and its depth is tracked.
|
||||||
|
"""
|
||||||
|
next_depth = current_depth + 1
|
||||||
|
if next_depth > self.max_depth:
|
||||||
|
return
|
||||||
|
|
||||||
|
# If we've reached the max pages limit, don't discover new links
|
||||||
|
remaining_capacity = self.max_pages - self._pages_crawled
|
||||||
|
if remaining_capacity <= 0:
|
||||||
|
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get internal links and, if enabled, external links.
|
||||||
|
links = result.links.get("internal", [])
|
||||||
|
if self.include_external:
|
||||||
|
links += result.links.get("external", [])
|
||||||
|
|
||||||
|
valid_links = []
|
||||||
|
|
||||||
|
# First collect all valid links
|
||||||
|
for link in links:
|
||||||
|
url = link.get("href")
|
||||||
|
if url in visited:
|
||||||
|
continue
|
||||||
|
if not await self.can_process_url(url, next_depth):
|
||||||
|
self.stats.urls_skipped += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Score the URL if a scorer is provided
|
||||||
|
score = self.url_scorer.score(url) if self.url_scorer else 0
|
||||||
|
|
||||||
|
# Skip URLs with scores below the threshold
|
||||||
|
if score < self.score_threshold:
|
||||||
|
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
|
||||||
|
self.stats.urls_skipped += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
valid_links.append((url, score))
|
||||||
|
|
||||||
|
# If we have more valid links than capacity, sort by score and take the top ones
|
||||||
|
if len(valid_links) > remaining_capacity:
|
||||||
|
if self.url_scorer:
|
||||||
|
# Sort by score in descending order
|
||||||
|
valid_links.sort(key=lambda x: x[1], reverse=True)
|
||||||
|
# Take only as many as we have capacity for
|
||||||
|
valid_links = valid_links[:remaining_capacity]
|
||||||
|
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
|
||||||
|
|
||||||
|
# Process the final selected links
|
||||||
|
for url, score in valid_links:
|
||||||
|
# attach the score to metadata if needed
|
||||||
|
if score:
|
||||||
|
result.metadata = result.metadata or {}
|
||||||
|
result.metadata["score"] = score
|
||||||
|
next_level.append((url, source_url))
|
||||||
|
depths[url] = next_depth
|
||||||
|
|
||||||
|
async def _arun_batch(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> List[CrawlResult]:
|
||||||
|
"""
|
||||||
|
Batch (non-streaming) mode:
|
||||||
|
Processes one BFS level at a time, then yields all the results.
|
||||||
|
"""
|
||||||
|
visited: Set[str] = set()
|
||||||
|
# current_level holds tuples: (url, parent_url)
|
||||||
|
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||||
|
depths: Dict[str, int] = {start_url: 0}
|
||||||
|
|
||||||
|
results: List[CrawlResult] = []
|
||||||
|
|
||||||
|
while current_level and not self._cancel_event.is_set():
|
||||||
|
next_level: List[Tuple[str, Optional[str]]] = []
|
||||||
|
urls = [url for url, _ in current_level]
|
||||||
|
visited.update(urls)
|
||||||
|
|
||||||
|
# Clone the config to disable deep crawling recursion and enforce batch mode.
|
||||||
|
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||||
|
batch_results = await crawler.arun_many(urls=urls, config=batch_config)
|
||||||
|
|
||||||
|
# Update pages crawled counter - count only successful crawls
|
||||||
|
successful_results = [r for r in batch_results if r.success]
|
||||||
|
self._pages_crawled += len(successful_results)
|
||||||
|
|
||||||
|
for result in batch_results:
|
||||||
|
url = result.url
|
||||||
|
depth = depths.get(url, 0)
|
||||||
|
result.metadata = result.metadata or {}
|
||||||
|
result.metadata["depth"] = depth
|
||||||
|
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||||
|
result.metadata["parent_url"] = parent_url
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
# Only discover links from successful crawls
|
||||||
|
if result.success:
|
||||||
|
# Link discovery will handle the max pages limit internally
|
||||||
|
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||||
|
|
||||||
|
current_level = next_level
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
async def _arun_stream(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> AsyncGenerator[CrawlResult, None]:
|
||||||
|
"""
|
||||||
|
Streaming mode:
|
||||||
|
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||||
|
"""
|
||||||
|
visited: Set[str] = set()
|
||||||
|
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||||
|
depths: Dict[str, int] = {start_url: 0}
|
||||||
|
|
||||||
|
while current_level and not self._cancel_event.is_set():
|
||||||
|
next_level: List[Tuple[str, Optional[str]]] = []
|
||||||
|
urls = [url for url, _ in current_level]
|
||||||
|
visited.update(urls)
|
||||||
|
|
||||||
|
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||||
|
stream_gen = await crawler.arun_many(urls=urls, config=stream_config)
|
||||||
|
|
||||||
|
# Keep track of processed results for this batch
|
||||||
|
results_count = 0
|
||||||
|
async for result in stream_gen:
|
||||||
|
url = result.url
|
||||||
|
depth = depths.get(url, 0)
|
||||||
|
result.metadata = result.metadata or {}
|
||||||
|
result.metadata["depth"] = depth
|
||||||
|
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||||
|
result.metadata["parent_url"] = parent_url
|
||||||
|
|
||||||
|
# Count only successful crawls
|
||||||
|
if result.success:
|
||||||
|
self._pages_crawled += 1
|
||||||
|
|
||||||
|
results_count += 1
|
||||||
|
yield result
|
||||||
|
|
||||||
|
# Only discover links from successful crawls
|
||||||
|
if result.success:
|
||||||
|
# Link discovery will handle the max pages limit internally
|
||||||
|
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||||
|
|
||||||
|
# If we didn't get results back (e.g. due to errors), avoid getting stuck in an infinite loop
|
||||||
|
# by considering these URLs as visited but not counting them toward the max_pages limit
|
||||||
|
if results_count == 0 and urls:
|
||||||
|
self.logger.warning(f"No results returned for {len(urls)} URLs, marking as visited")
|
||||||
|
|
||||||
|
current_level = next_level
|
||||||
|
|
||||||
|
async def shutdown(self) -> None:
|
||||||
|
"""
|
||||||
|
Clean up resources and signal cancellation of the crawl.
|
||||||
|
"""
|
||||||
|
self._cancel_event.set()
|
||||||
|
self.stats.end_time = datetime.now()
|
||||||
432
crawl4ai/deep_crawling/crazy.py
Normal file
432
crawl4ai/deep_crawling/crazy.py
Normal file
@@ -0,0 +1,432 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
# I just got crazy, trying to wrute K&R C but in Python. Right now I feel like I'm in a quantum state.
|
||||||
|
# I probably won't use this; I just want to leave it here. A century later, the future human race will be like, "WTF?"
|
||||||
|
|
||||||
|
# ------ Imports That Will Make You Question Reality ------ #
|
||||||
|
from functools import wraps
|
||||||
|
from contextvars import ContextVar
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from crawl4ai import CacheMode
|
||||||
|
from crawl4ai.async_configs import CrawlerRunConfig
|
||||||
|
from crawl4ai.models import CrawlResult, TraversalStats
|
||||||
|
from crawl4ai.deep_crawling.filters import FilterChain
|
||||||
|
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from collections import deque
|
||||||
|
import asyncio
|
||||||
|
from typing import (
|
||||||
|
AsyncGenerator,
|
||||||
|
Dict,
|
||||||
|
List,
|
||||||
|
TypeVar,
|
||||||
|
Generic,
|
||||||
|
Tuple,
|
||||||
|
Callable,
|
||||||
|
Awaitable,
|
||||||
|
Union,
|
||||||
|
)
|
||||||
|
from functools import lru_cache
|
||||||
|
import mmh3
|
||||||
|
from bitarray import bitarray
|
||||||
|
import numpy as np
|
||||||
|
from heapq import heappush, heappop
|
||||||
|
|
||||||
|
# ------ Type Algebra Mastery ------ #
|
||||||
|
CrawlResultT = TypeVar("CrawlResultT", bound="CrawlResult")
|
||||||
|
PriorityT = TypeVar("PriorityT")
|
||||||
|
P = TypeVar("P")
|
||||||
|
|
||||||
|
# ------ Hyperscalar Context Management ------ #
|
||||||
|
deep_crawl_ctx = ContextVar("deep_crawl_stack", default=deque())
|
||||||
|
|
||||||
|
# ------ Algebraic Crawler Monoid ------ #
|
||||||
|
class TraversalContext:
|
||||||
|
__slots__ = ('visited', 'frontier', 'depths', 'priority_fn', 'current_depth')
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
priority_fn: Callable[[str], Awaitable[float]] = lambda _: 1.0):
|
||||||
|
self.visited: BloomFilter = BloomFilter(10**6, 0.01) # 1M items, 1% FP
|
||||||
|
self.frontier: PriorityQueue = PriorityQueue()
|
||||||
|
self.depths: Dict[str, int] = {}
|
||||||
|
self.priority_fn = priority_fn
|
||||||
|
self.current_depth = 0
|
||||||
|
|
||||||
|
def clone_for_level(self) -> TraversalContext:
|
||||||
|
"""Monadic context propagation"""
|
||||||
|
new_ctx = TraversalContext(self.priority_fn)
|
||||||
|
new_ctx.visited = self.visited.copy()
|
||||||
|
new_ctx.depths = self.depths.copy()
|
||||||
|
new_ctx.current_depth = self.current_depth
|
||||||
|
return new_ctx
|
||||||
|
|
||||||
|
class PriorityQueue(Generic[PriorityT]):
|
||||||
|
"""Fibonacci heap-inspired priority queue with O(1) amortized operations"""
|
||||||
|
__slots__ = ('_heap', '_index')
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._heap: List[Tuple[PriorityT, float, P]] = []
|
||||||
|
self._index: Dict[P, int] = {}
|
||||||
|
|
||||||
|
def insert(self, priority: PriorityT, item: P) -> None:
|
||||||
|
tiebreaker = time.time() # Ensure FIFO for equal priorities
|
||||||
|
heappush(self._heap, (priority, tiebreaker, item))
|
||||||
|
self._index[item] = len(self._heap) - 1
|
||||||
|
|
||||||
|
def extract(self, top_n = 1) -> P:
|
||||||
|
items = []
|
||||||
|
for _ in range(top_n):
|
||||||
|
if not self._heap:
|
||||||
|
break
|
||||||
|
priority, _, item = heappop(self._heap)
|
||||||
|
del self._index[item]
|
||||||
|
items.append(item)
|
||||||
|
if not items:
|
||||||
|
raise IndexError("Priority queue empty")
|
||||||
|
return items
|
||||||
|
# while self._heap:
|
||||||
|
# _, _, item = heappop(self._heap)
|
||||||
|
# if item in self._index:
|
||||||
|
# del self._index[item]
|
||||||
|
# return item
|
||||||
|
raise IndexError("Priority queue empty")
|
||||||
|
|
||||||
|
|
||||||
|
def is_empty(self) -> bool:
|
||||||
|
return not bool(self._heap)
|
||||||
|
|
||||||
|
class BloomFilter:
|
||||||
|
"""Optimal Bloom filter using murmur3 hash avalanche"""
|
||||||
|
__slots__ = ('size', 'hashes', 'bits')
|
||||||
|
|
||||||
|
def __init__(self, capacity: int, error_rate: float):
|
||||||
|
self.size = self._optimal_size(capacity, error_rate)
|
||||||
|
self.hashes = self._optimal_hashes(capacity, self.size)
|
||||||
|
self.bits = bitarray(self.size)
|
||||||
|
self.bits.setall(False)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _optimal_size(n: int, p: float) -> int:
|
||||||
|
m = - (n * np.log(p)) / (np.log(2) ** 2)
|
||||||
|
return int(np.ceil(m))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _optimal_hashes(n: int, m: int) -> int:
|
||||||
|
k = (m / n) * np.log(2)
|
||||||
|
return int(np.ceil(k))
|
||||||
|
|
||||||
|
def add(self, item: str) -> None:
|
||||||
|
for seed in range(self.hashes):
|
||||||
|
digest = mmh3.hash(item, seed) % self.size
|
||||||
|
self.bits[digest] = True
|
||||||
|
|
||||||
|
def __contains__(self, item: str) -> bool:
|
||||||
|
return all(
|
||||||
|
self.bits[mmh3.hash(item, seed) % self.size]
|
||||||
|
for seed in range(self.hashes)
|
||||||
|
)
|
||||||
|
|
||||||
|
def copy(self) -> BloomFilter:
|
||||||
|
new = object.__new__(BloomFilter)
|
||||||
|
new.size = self.size
|
||||||
|
new.hashes = self.hashes
|
||||||
|
new.bits = self.bits.copy()
|
||||||
|
return new
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
"""
|
||||||
|
Estimates the number of items in the filter using the
|
||||||
|
count of set bits and the formula:
|
||||||
|
n = -m/k * ln(1 - X/m)
|
||||||
|
where:
|
||||||
|
m = size of bit array
|
||||||
|
k = number of hash functions
|
||||||
|
X = count of set bits
|
||||||
|
"""
|
||||||
|
set_bits = self.bits.count(True)
|
||||||
|
if set_bits == 0:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Use the inverse bloom filter formula to estimate cardinality
|
||||||
|
return int(
|
||||||
|
-(self.size / self.hashes) *
|
||||||
|
np.log(1 - set_bits / self.size)
|
||||||
|
)
|
||||||
|
|
||||||
|
def bit_count(self) -> int:
|
||||||
|
"""Returns the raw count of set bits in the filter"""
|
||||||
|
return self.bits.count(True)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f"BloomFilter(est_items={len(self)}, bits={self.bit_count()}/{self.size})"
|
||||||
|
|
||||||
|
# ------ Hyper-Optimal Deep Crawl Core ------ #
|
||||||
|
class DeepCrawlDecorator:
|
||||||
|
"""Metaprogramming marvel: Zero-cost deep crawl abstraction"""
|
||||||
|
def __init__(self, crawler: AsyncWebCrawler):
|
||||||
|
self.crawler = crawler
|
||||||
|
|
||||||
|
def __call__(self, original_arun: Callable) -> Callable:
|
||||||
|
@wraps(original_arun)
|
||||||
|
async def quantum_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||||
|
stack = deep_crawl_ctx.get()
|
||||||
|
if config and config.deep_crawl_strategy and not stack:
|
||||||
|
stack.append(self.crawler)
|
||||||
|
try:
|
||||||
|
deep_crawl_ctx.set(stack)
|
||||||
|
async for result in config.deep_crawl_strategy.traverse(
|
||||||
|
start_url=url,
|
||||||
|
crawler=self.crawler,
|
||||||
|
config=config
|
||||||
|
):
|
||||||
|
yield result
|
||||||
|
finally:
|
||||||
|
stack.pop()
|
||||||
|
deep_crawl_ctx.set(stack)
|
||||||
|
else:
|
||||||
|
result = await original_arun(url, config=config, **kwargs)
|
||||||
|
yield result
|
||||||
|
return quantum_arun
|
||||||
|
|
||||||
|
|
||||||
|
async def collect_results(url, crawler, config):
|
||||||
|
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||||
|
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||||
|
|
||||||
|
ret = crawler.arun(url, config=config)
|
||||||
|
# If arun is an async generator, iterate over it
|
||||||
|
if inspect.isasyncgen(ret):
|
||||||
|
return [r async for r in ret]
|
||||||
|
# Otherwise, await the coroutine and normalize to a list
|
||||||
|
result = await ret
|
||||||
|
return result if isinstance(result, list) else [result]
|
||||||
|
|
||||||
|
async def collect_many_results(url, crawler, config):
|
||||||
|
# Replace back arun to its original implementation
|
||||||
|
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||||
|
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||||
|
ret = crawler.arun_many(url, config=config)
|
||||||
|
# If arun is an async generator, iterate over it
|
||||||
|
if inspect.isasyncgen(ret):
|
||||||
|
return [r async for r in ret]
|
||||||
|
# Otherwise, await the coroutine and normalize to a list
|
||||||
|
result = await ret
|
||||||
|
return result if isinstance(result, list) else [result]
|
||||||
|
|
||||||
|
|
||||||
|
# ------ Deep Crawl Strategy Interface ------ #
|
||||||
|
CrawlResultT = TypeVar("CrawlResultT", bound=CrawlResult)
|
||||||
|
# In batch mode we return List[CrawlResult] and in stream mode an AsyncGenerator.
|
||||||
|
RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||||
|
|
||||||
|
|
||||||
|
class DeepCrawlStrategy(ABC):
|
||||||
|
"""Abstract base class that will make Dijkstra smile"""
|
||||||
|
@abstractmethod
|
||||||
|
async def traverse(self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig) -> RunManyReturn:
|
||||||
|
"""Traverse with O(1) memory complexity via generator fusion"""
|
||||||
|
...
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def precompute_priority(self, url: str) -> Awaitable[float]:
|
||||||
|
"""Quantum-inspired priority precomputation"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||||
|
"""Hilbert-curve optimized link generation"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# ------ BFS That Would Make Knuth Proud ------ #
|
||||||
|
|
||||||
|
def calculate_quantum_batch_size(
|
||||||
|
depth: int,
|
||||||
|
max_depth: int,
|
||||||
|
frontier_size: int,
|
||||||
|
visited_size: int
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Calculates optimal batch size for URL processing using quantum-inspired mathematical principles.
|
||||||
|
|
||||||
|
This function implements a sophisticated batch size calculation using:
|
||||||
|
1. Golden Ratio (φ) based scaling for optimal irrationality
|
||||||
|
2. Depth-aware amplitude modulation
|
||||||
|
3. Harmonic series dampening
|
||||||
|
4. Logarithmic growth control
|
||||||
|
5. Dynamic frontier adaptation
|
||||||
|
|
||||||
|
The formula follows the quantum harmonic oscillator principle:
|
||||||
|
N = ⌈φ^(2d) * log₂(|V|) * H(d)⁻¹ * min(20, |F|/10)⌉
|
||||||
|
where:
|
||||||
|
φ = Golden Ratio ((1 + √5) / 2)
|
||||||
|
d = depth factor (normalized remaining depth)
|
||||||
|
|V| = size of visited set
|
||||||
|
H(d) = d-th harmonic number
|
||||||
|
|F| = frontier size
|
||||||
|
|
||||||
|
Args:
|
||||||
|
depth (int): Current traversal depth
|
||||||
|
max_depth (int): Maximum allowed depth
|
||||||
|
frontier_size (int): Current size of frontier queue
|
||||||
|
visited_size (int): Number of URLs visited so far
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Optimal batch size bounded between 1 and 100
|
||||||
|
|
||||||
|
Mathematical Properties:
|
||||||
|
- Maintains O(log n) growth with respect to visited size
|
||||||
|
- Provides φ-optimal distribution of resources
|
||||||
|
- Ensures quantum-like state transitions between depths
|
||||||
|
- Harmonically dampened to prevent exponential explosion
|
||||||
|
"""
|
||||||
|
# Golden ratio φ = (1 + √5) / 2
|
||||||
|
φ = (1 + 5 ** 0.5) / 2
|
||||||
|
|
||||||
|
# Calculate normalized depth factor [0, 1]
|
||||||
|
depth_factor = (max_depth - depth) / max_depth if depth < max_depth else 0
|
||||||
|
|
||||||
|
# Compute harmonic number for current depth
|
||||||
|
harmonic = sum(1/k for k in range(1, depth + 2))
|
||||||
|
|
||||||
|
# Calculate quantum batch size
|
||||||
|
batch_size = int(np.ceil(
|
||||||
|
(φ ** (depth_factor * 2)) * # Golden ratio scaling
|
||||||
|
np.log2(visited_size + 2) * # Logarithmic growth factor
|
||||||
|
(1 / harmonic) * # Harmonic dampening
|
||||||
|
max(1, min(20, frontier_size / 10)) # Frontier-aware scaling
|
||||||
|
))
|
||||||
|
|
||||||
|
# Enforce practical bounds
|
||||||
|
return max(1, min(100, batch_size))
|
||||||
|
|
||||||
|
|
||||||
|
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||||
|
"""Breadth-First Search with Einstein-Rosen bridge optimization"""
|
||||||
|
__slots__ = ('max_depth', 'filter_chain', 'priority_fn', 'stats', '_cancel')
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
max_depth: int,
|
||||||
|
filter_chain: FilterChain = FilterChain(),
|
||||||
|
priority_fn: Callable[[str], Awaitable[float]] = lambda url: 1.0,
|
||||||
|
logger: logging.Logger = None):
|
||||||
|
self.max_depth = max_depth
|
||||||
|
self.filter_chain = filter_chain
|
||||||
|
self.priority_fn = priority_fn
|
||||||
|
self.stats = TraversalStats()
|
||||||
|
self._cancel = asyncio.Event()
|
||||||
|
self.semaphore = asyncio.Semaphore(1000)
|
||||||
|
|
||||||
|
async def traverse(self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig) -> RunManyReturn:
|
||||||
|
"""Non-blocking BFS with O(b^d) time complexity awareness"""
|
||||||
|
ctx = TraversalContext(self.priority_fn)
|
||||||
|
ctx.frontier.insert(self.priority_fn(start_url), (start_url, None, 0))
|
||||||
|
ctx.visited.add(start_url)
|
||||||
|
ctx.depths[start_url] = 0
|
||||||
|
|
||||||
|
while not ctx.frontier.is_empty() and not self._cancel.is_set():
|
||||||
|
# Use the best algorith, to find top_n value
|
||||||
|
top_n = calculate_quantum_batch_size(
|
||||||
|
depth=ctx.current_depth,
|
||||||
|
max_depth=self.max_depth,
|
||||||
|
frontier_size=len(ctx.frontier._heap),
|
||||||
|
visited_size=len(ctx.visited)
|
||||||
|
)
|
||||||
|
|
||||||
|
urls = ctx.frontier.extract(top_n=top_n)
|
||||||
|
# url, parent, depth = ctx.frontier.extract(top_n=top_n)
|
||||||
|
if urls:
|
||||||
|
ctx.current_depth = urls[0][2]
|
||||||
|
|
||||||
|
async with self.semaphore:
|
||||||
|
results = await collect_many_results([url for (url, parent, depth) in urls], crawler, config)
|
||||||
|
# results = await asyncio.gather(*[
|
||||||
|
# collect_results(url, crawler, config) for (url, parent, depth) in urls
|
||||||
|
# ])
|
||||||
|
# result = _result[0]
|
||||||
|
for ix, result in enumerate(results):
|
||||||
|
url, parent, depth = result.url, urls[ix][1], urls[ix][2]
|
||||||
|
result.metadata['depth'] = depth
|
||||||
|
result.metadata['parent'] = parent
|
||||||
|
yield result
|
||||||
|
|
||||||
|
if depth < self.max_depth:
|
||||||
|
async for link in self.link_hypercube(result):
|
||||||
|
if link not in ctx.visited:
|
||||||
|
priority = self.priority_fn(link)
|
||||||
|
ctx.frontier.insert(priority, (link, url, depth + 1))
|
||||||
|
ctx.visited.add(link)
|
||||||
|
ctx.depths[link] = depth + 1
|
||||||
|
|
||||||
|
@lru_cache(maxsize=65536)
|
||||||
|
async def validate_url(self, url: str) -> bool:
|
||||||
|
"""Memoized URL validation with λ-calculus purity"""
|
||||||
|
try:
|
||||||
|
parsed = urlparse(url)
|
||||||
|
return (parsed.scheme in {'http', 'https'}
|
||||||
|
and '.' in parsed.netloc
|
||||||
|
and await self.filter_chain.apply(url))
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||||
|
"""Hilbert-ordered link generation with O(1) yield latency"""
|
||||||
|
links = (link['href'] for link in result.links.get('internal', []))
|
||||||
|
validated = filter(self.validate_url, links)
|
||||||
|
for link in sorted(validated, key=lambda x: -self.priority_fn(x)):
|
||||||
|
yield link
|
||||||
|
|
||||||
|
def __aiter__(self) -> AsyncGenerator[CrawlResult, None]:
|
||||||
|
"""Native async iterator interface"""
|
||||||
|
return self.traverse()
|
||||||
|
|
||||||
|
async def __anext__(self) -> CrawlResult:
|
||||||
|
"""True async iterator protocol implementation"""
|
||||||
|
result = await self.traverse().__anext__()
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
raise StopAsyncIteration
|
||||||
|
|
||||||
|
async def precompute_priority(self, url):
|
||||||
|
return super().precompute_priority(url)
|
||||||
|
|
||||||
|
async def shutdown(self):
|
||||||
|
self._cancel.set()
|
||||||
|
|
||||||
|
# ------ Usage That Will Drop Jaws ------ #
|
||||||
|
async def main():
|
||||||
|
"""Quantum crawl example"""
|
||||||
|
strategy = BFSDeepCrawlStrategy(
|
||||||
|
max_depth=2,
|
||||||
|
priority_fn=lambda url: 1.0 / (len(url) + 1e-9), # Inverse length priority
|
||||||
|
# filter_chain=FilterChain(...)
|
||||||
|
)
|
||||||
|
|
||||||
|
config: CrawlerRunConfig = CrawlerRunConfig(
|
||||||
|
deep_crawl_strategy=strategy,
|
||||||
|
stream=False,
|
||||||
|
verbose=True,
|
||||||
|
cache_mode=CacheMode.BYPASS
|
||||||
|
)
|
||||||
|
|
||||||
|
async with AsyncWebCrawler() as crawler:
|
||||||
|
run_decorator = DeepCrawlDecorator(crawler)
|
||||||
|
setattr(crawler, "original_arun", crawler.arun)
|
||||||
|
crawler.arun = run_decorator(crawler.arun)
|
||||||
|
start_time = time.perf_counter()
|
||||||
|
async for result in crawler.arun("https://docs.crawl4ai.com", config=config):
|
||||||
|
print(f"🌀 {result.url} (Depth: {result.metadata['depth']})")
|
||||||
|
print(f"Deep crawl completed in {time.perf_counter() - start_time:.2f}s")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
102
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
102
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# dfs_deep_crawl_strategy.py
|
||||||
|
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||||
|
|
||||||
|
from ..models import CrawlResult
|
||||||
|
from .bfs_strategy import BFSDeepCrawlStrategy # noqa
|
||||||
|
from ..types import AsyncWebCrawler, CrawlerRunConfig
|
||||||
|
|
||||||
|
class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||||
|
"""
|
||||||
|
Depth-First Search (DFS) deep crawling strategy.
|
||||||
|
|
||||||
|
Inherits URL validation and link discovery from BFSDeepCrawlStrategy.
|
||||||
|
Overrides _arun_batch and _arun_stream to use a stack (LIFO) for DFS traversal.
|
||||||
|
"""
|
||||||
|
async def _arun_batch(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> List[CrawlResult]:
|
||||||
|
"""
|
||||||
|
Batch (non-streaming) DFS mode.
|
||||||
|
Uses a stack to traverse URLs in DFS order, aggregating CrawlResults into a list.
|
||||||
|
"""
|
||||||
|
visited: Set[str] = set()
|
||||||
|
# Stack items: (url, parent_url, depth)
|
||||||
|
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||||
|
depths: Dict[str, int] = {start_url: 0}
|
||||||
|
results: List[CrawlResult] = []
|
||||||
|
|
||||||
|
while stack and not self._cancel_event.is_set():
|
||||||
|
url, parent, depth = stack.pop()
|
||||||
|
if url in visited or depth > self.max_depth:
|
||||||
|
continue
|
||||||
|
visited.add(url)
|
||||||
|
|
||||||
|
# Clone config to disable recursive deep crawling.
|
||||||
|
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||||
|
url_results = await crawler.arun_many(urls=[url], config=batch_config)
|
||||||
|
|
||||||
|
for result in url_results:
|
||||||
|
result.metadata = result.metadata or {}
|
||||||
|
result.metadata["depth"] = depth
|
||||||
|
result.metadata["parent_url"] = parent
|
||||||
|
if self.url_scorer:
|
||||||
|
result.metadata["score"] = self.url_scorer.score(url)
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
# Count only successful crawls toward max_pages limit
|
||||||
|
if result.success:
|
||||||
|
self._pages_crawled += 1
|
||||||
|
|
||||||
|
# Only discover links from successful crawls
|
||||||
|
new_links: List[Tuple[str, Optional[str]]] = []
|
||||||
|
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||||
|
|
||||||
|
# Push new links in reverse order so the first discovered is processed next.
|
||||||
|
for new_url, new_parent in reversed(new_links):
|
||||||
|
new_depth = depths.get(new_url, depth + 1)
|
||||||
|
stack.append((new_url, new_parent, new_depth))
|
||||||
|
return results
|
||||||
|
|
||||||
|
async def _arun_stream(
|
||||||
|
self,
|
||||||
|
start_url: str,
|
||||||
|
crawler: AsyncWebCrawler,
|
||||||
|
config: CrawlerRunConfig,
|
||||||
|
) -> AsyncGenerator[CrawlResult, None]:
|
||||||
|
"""
|
||||||
|
Streaming DFS mode.
|
||||||
|
Uses a stack to traverse URLs in DFS order and yields CrawlResults as they become available.
|
||||||
|
"""
|
||||||
|
visited: Set[str] = set()
|
||||||
|
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||||
|
depths: Dict[str, int] = {start_url: 0}
|
||||||
|
|
||||||
|
while stack and not self._cancel_event.is_set():
|
||||||
|
url, parent, depth = stack.pop()
|
||||||
|
if url in visited or depth > self.max_depth:
|
||||||
|
continue
|
||||||
|
visited.add(url)
|
||||||
|
|
||||||
|
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||||
|
stream_gen = await crawler.arun_many(urls=[url], config=stream_config)
|
||||||
|
async for result in stream_gen:
|
||||||
|
result.metadata = result.metadata or {}
|
||||||
|
result.metadata["depth"] = depth
|
||||||
|
result.metadata["parent_url"] = parent
|
||||||
|
if self.url_scorer:
|
||||||
|
result.metadata["score"] = self.url_scorer.score(url)
|
||||||
|
yield result
|
||||||
|
|
||||||
|
# Only count successful crawls toward max_pages limit
|
||||||
|
# and only discover links from successful crawls
|
||||||
|
if result.success:
|
||||||
|
self._pages_crawled += 1
|
||||||
|
|
||||||
|
new_links: List[Tuple[str, Optional[str]]] = []
|
||||||
|
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||||
|
for new_url, new_parent in reversed(new_links):
|
||||||
|
new_depth = depths.get(new_url, depth + 1)
|
||||||
|
stack.append((new_url, new_parent, new_depth))
|
||||||
648
crawl4ai/deep_crawling/filters.py
Normal file
648
crawl4ai/deep_crawling/filters.py
Normal file
@@ -0,0 +1,648 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List, Pattern, Set, Union
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
from array import array
|
||||||
|
import re
|
||||||
|
import logging
|
||||||
|
from functools import lru_cache
|
||||||
|
import fnmatch
|
||||||
|
from dataclasses import dataclass
|
||||||
|
import weakref
|
||||||
|
import math
|
||||||
|
from collections import defaultdict
|
||||||
|
from typing import Dict
|
||||||
|
from ..utils import HeadPeekr
|
||||||
|
import asyncio
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FilterStats:
|
||||||
|
__slots__ = ("_counters",)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Use array of unsigned ints for atomic operations
|
||||||
|
self._counters = array("I", [0, 0, 0]) # total, passed, rejected
|
||||||
|
|
||||||
|
@property
|
||||||
|
def total_urls(self):
|
||||||
|
return self._counters[0]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def passed_urls(self):
|
||||||
|
return self._counters[1]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rejected_urls(self):
|
||||||
|
return self._counters[2]
|
||||||
|
|
||||||
|
|
||||||
|
class URLFilter(ABC):
|
||||||
|
"""Optimized base filter class"""
|
||||||
|
|
||||||
|
__slots__ = ("name", "stats", "_logger_ref")
|
||||||
|
|
||||||
|
def __init__(self, name: str = None):
|
||||||
|
self.name = name or self.__class__.__name__
|
||||||
|
self.stats = FilterStats()
|
||||||
|
# Lazy logger initialization using weakref
|
||||||
|
self._logger_ref = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logger(self):
|
||||||
|
if self._logger_ref is None or self._logger_ref() is None:
|
||||||
|
logger = logging.getLogger(f"urlfilter.{self.name}")
|
||||||
|
self._logger_ref = weakref.ref(logger)
|
||||||
|
return self._logger_ref()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def apply(self, url: str) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _update_stats(self, passed: bool):
|
||||||
|
# Use direct array index for speed
|
||||||
|
self.stats._counters[0] += 1 # total
|
||||||
|
self.stats._counters[1] += passed # passed
|
||||||
|
self.stats._counters[2] += not passed # rejected
|
||||||
|
|
||||||
|
|
||||||
|
class FilterChain:
|
||||||
|
"""Optimized filter chain"""
|
||||||
|
|
||||||
|
__slots__ = ("filters", "stats", "_logger_ref")
|
||||||
|
|
||||||
|
def __init__(self, filters: List[URLFilter] = None):
|
||||||
|
self.filters = tuple(filters or []) # Immutable tuple for speed
|
||||||
|
self.stats = FilterStats()
|
||||||
|
self._logger_ref = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logger(self):
|
||||||
|
if self._logger_ref is None or self._logger_ref() is None:
|
||||||
|
logger = logging.getLogger("urlfilter.chain")
|
||||||
|
self._logger_ref = weakref.ref(logger)
|
||||||
|
return self._logger_ref()
|
||||||
|
|
||||||
|
def add_filter(self, filter_: URLFilter) -> "FilterChain":
|
||||||
|
"""Add a filter to the chain"""
|
||||||
|
self.filters.append(filter_)
|
||||||
|
return self # Enable method chaining
|
||||||
|
|
||||||
|
async def apply(self, url: str) -> bool:
|
||||||
|
"""Apply all filters concurrently when possible"""
|
||||||
|
self.stats._counters[0] += 1 # Total processed URLs
|
||||||
|
|
||||||
|
tasks = []
|
||||||
|
for f in self.filters:
|
||||||
|
result = f.apply(url)
|
||||||
|
|
||||||
|
if inspect.isawaitable(result):
|
||||||
|
tasks.append(result) # Collect async tasks
|
||||||
|
elif not result: # Sync rejection
|
||||||
|
self.stats._counters[2] += 1 # Sync rejected
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
results = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
# Count how many filters rejected
|
||||||
|
rejections = results.count(False)
|
||||||
|
self.stats._counters[2] += rejections
|
||||||
|
|
||||||
|
if not all(results):
|
||||||
|
return False # Stop early if any filter rejected
|
||||||
|
|
||||||
|
self.stats._counters[1] += 1 # Passed
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class URLPatternFilter(URLFilter):
|
||||||
|
"""Pattern filter balancing speed and completeness"""
|
||||||
|
|
||||||
|
__slots__ = (
|
||||||
|
"_simple_suffixes",
|
||||||
|
"_simple_prefixes",
|
||||||
|
"_domain_patterns",
|
||||||
|
"_path_patterns",
|
||||||
|
)
|
||||||
|
|
||||||
|
PATTERN_TYPES = {
|
||||||
|
"SUFFIX": 1, # *.html
|
||||||
|
"PREFIX": 2, # /foo/*
|
||||||
|
"DOMAIN": 3, # *.example.com
|
||||||
|
"PATH": 4, # Everything else
|
||||||
|
"REGEX": 5,
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
patterns: Union[str, Pattern, List[Union[str, Pattern]]],
|
||||||
|
use_glob: bool = True,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns
|
||||||
|
|
||||||
|
self._simple_suffixes = set()
|
||||||
|
self._simple_prefixes = set()
|
||||||
|
self._domain_patterns = []
|
||||||
|
self._path_patterns = []
|
||||||
|
|
||||||
|
for pattern in patterns:
|
||||||
|
pattern_type = self._categorize_pattern(pattern)
|
||||||
|
self._add_pattern(pattern, pattern_type)
|
||||||
|
|
||||||
|
def _categorize_pattern(self, pattern: str) -> int:
|
||||||
|
"""Categorize pattern for specialized handling"""
|
||||||
|
if not isinstance(pattern, str):
|
||||||
|
return self.PATTERN_TYPES["PATH"]
|
||||||
|
|
||||||
|
# Check if it's a regex pattern
|
||||||
|
if pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern:
|
||||||
|
return self.PATTERN_TYPES["REGEX"]
|
||||||
|
|
||||||
|
if pattern.count("*") == 1:
|
||||||
|
if pattern.startswith("*."):
|
||||||
|
return self.PATTERN_TYPES["SUFFIX"]
|
||||||
|
if pattern.endswith("/*"):
|
||||||
|
return self.PATTERN_TYPES["PREFIX"]
|
||||||
|
|
||||||
|
if "://" in pattern and pattern.startswith("*."):
|
||||||
|
return self.PATTERN_TYPES["DOMAIN"]
|
||||||
|
|
||||||
|
return self.PATTERN_TYPES["PATH"]
|
||||||
|
|
||||||
|
def _add_pattern(self, pattern: str, pattern_type: int):
|
||||||
|
"""Add pattern to appropriate matcher"""
|
||||||
|
if pattern_type == self.PATTERN_TYPES["REGEX"]:
|
||||||
|
# For regex patterns, compile directly without glob translation
|
||||||
|
if isinstance(pattern, str) and (
|
||||||
|
pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern
|
||||||
|
):
|
||||||
|
self._path_patterns.append(re.compile(pattern))
|
||||||
|
return
|
||||||
|
elif pattern_type == self.PATTERN_TYPES["SUFFIX"]:
|
||||||
|
self._simple_suffixes.add(pattern[2:])
|
||||||
|
elif pattern_type == self.PATTERN_TYPES["PREFIX"]:
|
||||||
|
self._simple_prefixes.add(pattern[:-2])
|
||||||
|
elif pattern_type == self.PATTERN_TYPES["DOMAIN"]:
|
||||||
|
self._domain_patterns.append(re.compile(pattern.replace("*.", r"[^/]+\.")))
|
||||||
|
else:
|
||||||
|
if isinstance(pattern, str):
|
||||||
|
# Handle complex glob patterns
|
||||||
|
if "**" in pattern:
|
||||||
|
pattern = pattern.replace("**", ".*")
|
||||||
|
if "{" in pattern:
|
||||||
|
# Convert {a,b} to (a|b)
|
||||||
|
pattern = re.sub(
|
||||||
|
r"\{([^}]+)\}",
|
||||||
|
lambda m: f'({"|".join(m.group(1).split(","))})',
|
||||||
|
pattern,
|
||||||
|
)
|
||||||
|
pattern = fnmatch.translate(pattern)
|
||||||
|
self._path_patterns.append(
|
||||||
|
pattern if isinstance(pattern, Pattern) else re.compile(pattern)
|
||||||
|
)
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def apply(self, url: str) -> bool:
|
||||||
|
"""Hierarchical pattern matching"""
|
||||||
|
# Quick suffix check (*.html)
|
||||||
|
if self._simple_suffixes:
|
||||||
|
path = url.split("?")[0]
|
||||||
|
if path.split("/")[-1].split(".")[-1] in self._simple_suffixes:
|
||||||
|
self._update_stats(True)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Domain check
|
||||||
|
if self._domain_patterns:
|
||||||
|
for pattern in self._domain_patterns:
|
||||||
|
if pattern.match(url):
|
||||||
|
self._update_stats(True)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Prefix check (/foo/*)
|
||||||
|
if self._simple_prefixes:
|
||||||
|
path = url.split("?")[0]
|
||||||
|
if any(path.startswith(p) for p in self._simple_prefixes):
|
||||||
|
self._update_stats(True)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Complex patterns
|
||||||
|
if self._path_patterns:
|
||||||
|
if any(p.search(url) for p in self._path_patterns):
|
||||||
|
self._update_stats(True)
|
||||||
|
return True
|
||||||
|
|
||||||
|
self._update_stats(False)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class ContentTypeFilter(URLFilter):
|
||||||
|
"""Optimized content type filter using fast lookups"""
|
||||||
|
|
||||||
|
__slots__ = ("allowed_types", "_ext_map", "_check_extension")
|
||||||
|
|
||||||
|
# Fast extension to mime type mapping
|
||||||
|
_MIME_MAP = {
|
||||||
|
# Text Formats
|
||||||
|
"txt": "text/plain",
|
||||||
|
"html": "text/html",
|
||||||
|
"htm": "text/html",
|
||||||
|
"xhtml": "application/xhtml+xml",
|
||||||
|
"css": "text/css",
|
||||||
|
"csv": "text/csv",
|
||||||
|
"ics": "text/calendar",
|
||||||
|
"js": "application/javascript",
|
||||||
|
# Images
|
||||||
|
"bmp": "image/bmp",
|
||||||
|
"gif": "image/gif",
|
||||||
|
"jpeg": "image/jpeg",
|
||||||
|
"jpg": "image/jpeg",
|
||||||
|
"png": "image/png",
|
||||||
|
"svg": "image/svg+xml",
|
||||||
|
"tiff": "image/tiff",
|
||||||
|
"ico": "image/x-icon",
|
||||||
|
"webp": "image/webp",
|
||||||
|
# Audio
|
||||||
|
"mp3": "audio/mpeg",
|
||||||
|
"wav": "audio/wav",
|
||||||
|
"ogg": "audio/ogg",
|
||||||
|
"m4a": "audio/mp4",
|
||||||
|
"aac": "audio/aac",
|
||||||
|
# Video
|
||||||
|
"mp4": "video/mp4",
|
||||||
|
"mpeg": "video/mpeg",
|
||||||
|
"webm": "video/webm",
|
||||||
|
"avi": "video/x-msvideo",
|
||||||
|
"mov": "video/quicktime",
|
||||||
|
"flv": "video/x-flv",
|
||||||
|
"wmv": "video/x-ms-wmv",
|
||||||
|
"mkv": "video/x-matroska",
|
||||||
|
# Applications
|
||||||
|
"json": "application/json",
|
||||||
|
"xml": "application/xml",
|
||||||
|
"pdf": "application/pdf",
|
||||||
|
"zip": "application/zip",
|
||||||
|
"gz": "application/gzip",
|
||||||
|
"tar": "application/x-tar",
|
||||||
|
"rar": "application/vnd.rar",
|
||||||
|
"7z": "application/x-7z-compressed",
|
||||||
|
"exe": "application/vnd.microsoft.portable-executable",
|
||||||
|
"msi": "application/x-msdownload",
|
||||||
|
# Fonts
|
||||||
|
"woff": "font/woff",
|
||||||
|
"woff2": "font/woff2",
|
||||||
|
"ttf": "font/ttf",
|
||||||
|
"otf": "font/otf",
|
||||||
|
# Microsoft Office
|
||||||
|
"doc": "application/msword",
|
||||||
|
"dot": "application/msword",
|
||||||
|
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||||
|
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||||
|
"xls": "application/vnd.ms-excel",
|
||||||
|
"ppt": "application/vnd.ms-powerpoint",
|
||||||
|
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||||
|
# OpenDocument Formats
|
||||||
|
"odt": "application/vnd.oasis.opendocument.text",
|
||||||
|
"ods": "application/vnd.oasis.opendocument.spreadsheet",
|
||||||
|
"odp": "application/vnd.oasis.opendocument.presentation",
|
||||||
|
# Archives
|
||||||
|
"tar.gz": "application/gzip",
|
||||||
|
"tgz": "application/gzip",
|
||||||
|
"bz2": "application/x-bzip2",
|
||||||
|
# Others
|
||||||
|
"rtf": "application/rtf",
|
||||||
|
"apk": "application/vnd.android.package-archive",
|
||||||
|
"epub": "application/epub+zip",
|
||||||
|
"jar": "application/java-archive",
|
||||||
|
"swf": "application/x-shockwave-flash",
|
||||||
|
"midi": "audio/midi",
|
||||||
|
"mid": "audio/midi",
|
||||||
|
"ps": "application/postscript",
|
||||||
|
"ai": "application/postscript",
|
||||||
|
"eps": "application/postscript",
|
||||||
|
# Custom or less common
|
||||||
|
"bin": "application/octet-stream",
|
||||||
|
"dmg": "application/x-apple-diskimage",
|
||||||
|
"iso": "application/x-iso9660-image",
|
||||||
|
"deb": "application/x-debian-package",
|
||||||
|
"rpm": "application/x-rpm",
|
||||||
|
"sqlite": "application/vnd.sqlite3",
|
||||||
|
# Placeholder
|
||||||
|
"unknown": "application/octet-stream", # Fallback for unknown file types
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@lru_cache(maxsize=1000)
|
||||||
|
def _extract_extension(url: str) -> str:
|
||||||
|
"""Extracts file extension from a URL."""
|
||||||
|
# Remove scheme (http://, https://) if present
|
||||||
|
if "://" in url:
|
||||||
|
url = url.split("://", 1)[-1] # Get everything after '://'
|
||||||
|
|
||||||
|
# Remove domain (everything up to the first '/')
|
||||||
|
path_start = url.find("/")
|
||||||
|
path = url[path_start:] if path_start != -1 else ""
|
||||||
|
|
||||||
|
# Extract last filename in path
|
||||||
|
filename = path.rsplit("/", 1)[-1] if "/" in path else ""
|
||||||
|
|
||||||
|
# Extract and validate extension
|
||||||
|
if "." not in filename:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
return filename.rpartition(".")[-1].lower()
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
allowed_types: Union[str, List[str]],
|
||||||
|
check_extension: bool = True,
|
||||||
|
ext_map: Dict[str, str] = _MIME_MAP,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
# Normalize and store as frozenset for fast lookup
|
||||||
|
self.allowed_types = frozenset(
|
||||||
|
t.lower()
|
||||||
|
for t in (
|
||||||
|
allowed_types if isinstance(allowed_types, list) else [allowed_types]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self._check_extension = check_extension
|
||||||
|
|
||||||
|
# Pre-compute extension map for allowed types
|
||||||
|
self._ext_map = frozenset(
|
||||||
|
ext
|
||||||
|
for ext, mime in self._MIME_MAP.items()
|
||||||
|
if any(allowed in mime for allowed in self.allowed_types)
|
||||||
|
)
|
||||||
|
|
||||||
|
@lru_cache(maxsize=1000)
|
||||||
|
def _check_url_cached(self, url: str) -> bool:
|
||||||
|
"""Cached URL checking"""
|
||||||
|
if not self._check_extension:
|
||||||
|
return True
|
||||||
|
ext = self._extract_extension(url)
|
||||||
|
if not ext:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return ext in self._ext_map
|
||||||
|
|
||||||
|
def apply(self, url: str) -> bool:
|
||||||
|
"""Fast extension check with caching"""
|
||||||
|
result = self._check_url_cached(url)
|
||||||
|
self._update_stats(result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class DomainFilter(URLFilter):
|
||||||
|
"""Optimized domain filter with fast lookups and caching"""
|
||||||
|
|
||||||
|
__slots__ = ("_allowed_domains", "_blocked_domains", "_domain_cache")
|
||||||
|
|
||||||
|
# Regex for fast domain extraction
|
||||||
|
_DOMAIN_REGEX = re.compile(r"://([^/]+)")
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
allowed_domains: Union[str, List[str]] = None,
|
||||||
|
blocked_domains: Union[str, List[str]] = None,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
# Convert inputs to frozensets for immutable, fast lookups
|
||||||
|
self._allowed_domains = (
|
||||||
|
frozenset(self._normalize_domains(allowed_domains))
|
||||||
|
if allowed_domains
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
self._blocked_domains = (
|
||||||
|
frozenset(self._normalize_domains(blocked_domains))
|
||||||
|
if blocked_domains
|
||||||
|
else frozenset()
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _normalize_domains(domains: Union[str, List[str]]) -> Set[str]:
|
||||||
|
"""Fast domain normalization"""
|
||||||
|
if isinstance(domains, str):
|
||||||
|
return {domains.lower()}
|
||||||
|
return {d.lower() for d in domains}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _extract_domain(url: str) -> str:
|
||||||
|
"""Ultra-fast domain extraction with regex and caching"""
|
||||||
|
match = DomainFilter._DOMAIN_REGEX.search(url)
|
||||||
|
return match.group(1).lower() if match else ""
|
||||||
|
|
||||||
|
def apply(self, url: str) -> bool:
|
||||||
|
"""Optimized domain checking with early returns"""
|
||||||
|
# Skip processing if no filters
|
||||||
|
if not self._blocked_domains and self._allowed_domains is None:
|
||||||
|
self._update_stats(True)
|
||||||
|
return True
|
||||||
|
|
||||||
|
domain = self._extract_domain(url)
|
||||||
|
|
||||||
|
# Early return for blocked domains
|
||||||
|
if domain in self._blocked_domains:
|
||||||
|
self._update_stats(False)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# If no allowed domains specified, accept all non-blocked
|
||||||
|
if self._allowed_domains is None:
|
||||||
|
self._update_stats(True)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Final allowed domains check
|
||||||
|
result = domain in self._allowed_domains
|
||||||
|
self._update_stats(result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class ContentRelevanceFilter(URLFilter):
|
||||||
|
"""BM25-based relevance filter using head section content"""
|
||||||
|
|
||||||
|
__slots__ = ("query_terms", "threshold", "k1", "b", "avgdl")
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
threshold: float,
|
||||||
|
k1: float = 1.2,
|
||||||
|
b: float = 0.75,
|
||||||
|
avgdl: int = 1000,
|
||||||
|
):
|
||||||
|
super().__init__(name="BM25RelevanceFilter")
|
||||||
|
self.query_terms = self._tokenize(query)
|
||||||
|
self.threshold = threshold
|
||||||
|
self.k1 = k1 # TF saturation parameter
|
||||||
|
self.b = b # Length normalization parameter
|
||||||
|
self.avgdl = avgdl # Average document length (empirical value)
|
||||||
|
|
||||||
|
async def apply(self, url: str) -> bool:
|
||||||
|
head_content = await HeadPeekr.peek_html(url)
|
||||||
|
if not head_content:
|
||||||
|
self._update_stats(False)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Field extraction with weighting
|
||||||
|
fields = {
|
||||||
|
"title": HeadPeekr.get_title(head_content) or "",
|
||||||
|
"meta": HeadPeekr.extract_meta_tags(head_content),
|
||||||
|
}
|
||||||
|
doc_text = self._build_document(fields)
|
||||||
|
|
||||||
|
score = self._bm25(doc_text)
|
||||||
|
decision = score >= self.threshold
|
||||||
|
self._update_stats(decision)
|
||||||
|
return decision
|
||||||
|
|
||||||
|
def _build_document(self, fields: Dict) -> str:
|
||||||
|
"""Weighted document construction"""
|
||||||
|
return " ".join(
|
||||||
|
[
|
||||||
|
fields["title"] * 3, # Title weight
|
||||||
|
fields["meta"].get("description", "") * 2,
|
||||||
|
fields["meta"].get("keywords", ""),
|
||||||
|
" ".join(fields["meta"].values()),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _tokenize(self, text: str) -> List[str]:
|
||||||
|
"""Fast case-insensitive tokenization"""
|
||||||
|
return text.lower().split()
|
||||||
|
|
||||||
|
def _bm25(self, document: str) -> float:
|
||||||
|
"""Optimized BM25 implementation for head sections"""
|
||||||
|
doc_terms = self._tokenize(document)
|
||||||
|
doc_len = len(doc_terms)
|
||||||
|
tf = defaultdict(int)
|
||||||
|
|
||||||
|
for term in doc_terms:
|
||||||
|
tf[term] += 1
|
||||||
|
|
||||||
|
score = 0.0
|
||||||
|
for term in set(self.query_terms):
|
||||||
|
term_freq = tf[term]
|
||||||
|
idf = math.log((1 + 1) / (term_freq + 0.5) + 1) # Simplified IDF
|
||||||
|
numerator = term_freq * (self.k1 + 1)
|
||||||
|
denominator = term_freq + self.k1 * (
|
||||||
|
1 - self.b + self.b * (doc_len / self.avgdl)
|
||||||
|
)
|
||||||
|
score += idf * (numerator / denominator)
|
||||||
|
|
||||||
|
return score
|
||||||
|
|
||||||
|
|
||||||
|
class SEOFilter(URLFilter):
|
||||||
|
"""Quantitative SEO quality assessment filter using head section analysis"""
|
||||||
|
|
||||||
|
__slots__ = ("threshold", "_weights", "_kw_patterns")
|
||||||
|
|
||||||
|
# Based on SEMrush/Google ranking factors research
|
||||||
|
DEFAULT_WEIGHTS = {
|
||||||
|
"title_length": 0.15,
|
||||||
|
"title_kw": 0.18,
|
||||||
|
"meta_description": 0.12,
|
||||||
|
"canonical": 0.10,
|
||||||
|
"robot_ok": 0.20, # Most critical factor
|
||||||
|
"schema_org": 0.10,
|
||||||
|
"url_quality": 0.15,
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
threshold: float = 0.65,
|
||||||
|
keywords: List[str] = None,
|
||||||
|
weights: Dict[str, float] = None,
|
||||||
|
):
|
||||||
|
super().__init__(name="SEOFilter")
|
||||||
|
self.threshold = threshold
|
||||||
|
self._weights = weights or self.DEFAULT_WEIGHTS
|
||||||
|
self._kw_patterns = (
|
||||||
|
re.compile(
|
||||||
|
r"\b({})\b".format("|".join(map(re.escape, keywords or []))), re.I
|
||||||
|
)
|
||||||
|
if keywords
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
async def apply(self, url: str) -> bool:
|
||||||
|
head_content = await HeadPeekr.peek_html(url)
|
||||||
|
if not head_content:
|
||||||
|
self._update_stats(False)
|
||||||
|
return False
|
||||||
|
|
||||||
|
meta = HeadPeekr.extract_meta_tags(head_content)
|
||||||
|
title = HeadPeekr.get_title(head_content) or ""
|
||||||
|
parsed_url = urlparse(url)
|
||||||
|
|
||||||
|
scores = {
|
||||||
|
"title_length": self._score_title_length(title),
|
||||||
|
"title_kw": self._score_keyword_presence(title),
|
||||||
|
"meta_description": self._score_meta_description(
|
||||||
|
meta.get("description", "")
|
||||||
|
),
|
||||||
|
"canonical": self._score_canonical(meta.get("canonical"), url),
|
||||||
|
"robot_ok": 1.0 if "noindex" not in meta.get("robots", "") else 0.0,
|
||||||
|
"schema_org": self._score_schema_org(head_content),
|
||||||
|
"url_quality": self._score_url_quality(parsed_url),
|
||||||
|
}
|
||||||
|
|
||||||
|
total_score = sum(
|
||||||
|
weight * scores[factor] for factor, weight in self._weights.items()
|
||||||
|
)
|
||||||
|
|
||||||
|
decision = total_score >= self.threshold
|
||||||
|
self._update_stats(decision)
|
||||||
|
return decision
|
||||||
|
|
||||||
|
def _score_title_length(self, title: str) -> float:
|
||||||
|
length = len(title)
|
||||||
|
if 50 <= length <= 60:
|
||||||
|
return 1.0
|
||||||
|
if 40 <= length < 50 or 60 < length <= 70:
|
||||||
|
return 0.7
|
||||||
|
return 0.3 # Poor length
|
||||||
|
|
||||||
|
def _score_keyword_presence(self, text: str) -> float:
|
||||||
|
if not self._kw_patterns:
|
||||||
|
return 0.0
|
||||||
|
matches = len(self._kw_patterns.findall(text))
|
||||||
|
return min(matches * 0.3, 1.0) # Max 3 matches
|
||||||
|
|
||||||
|
def _score_meta_description(self, desc: str) -> float:
|
||||||
|
length = len(desc)
|
||||||
|
if 140 <= length <= 160:
|
||||||
|
return 1.0
|
||||||
|
return 0.5 if 120 <= length <= 200 else 0.2
|
||||||
|
|
||||||
|
def _score_canonical(self, canonical: str, original: str) -> float:
|
||||||
|
if not canonical:
|
||||||
|
return 0.5 # Neutral score
|
||||||
|
return 1.0 if canonical == original else 0.2
|
||||||
|
|
||||||
|
def _score_schema_org(self, html: str) -> float:
|
||||||
|
# Detect any schema.org markup in head
|
||||||
|
return (
|
||||||
|
1.0
|
||||||
|
if re.search(r'<script[^>]+type=["\']application/ld\+json', html)
|
||||||
|
else 0.0
|
||||||
|
)
|
||||||
|
|
||||||
|
def _score_url_quality(self, parsed_url) -> float:
|
||||||
|
score = 1.0
|
||||||
|
path = parsed_url.path.lower()
|
||||||
|
|
||||||
|
# Penalty factors
|
||||||
|
if len(path) > 80:
|
||||||
|
score *= 0.7
|
||||||
|
if re.search(r"\d{4}", path):
|
||||||
|
score *= 0.8 # Numbers in path
|
||||||
|
if parsed_url.query:
|
||||||
|
score *= 0.6 # URL parameters
|
||||||
|
if "_" in path:
|
||||||
|
score *= 0.9 # Underscores vs hyphens
|
||||||
|
|
||||||
|
return score
|
||||||
519
crawl4ai/deep_crawling/scorers.py
Normal file
519
crawl4ai/deep_crawling/scorers.py
Normal file
@@ -0,0 +1,519 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List, Dict, Optional
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from urllib.parse import urlparse, unquote
|
||||||
|
import re
|
||||||
|
import logging
|
||||||
|
from functools import lru_cache
|
||||||
|
from array import array
|
||||||
|
import ctypes
|
||||||
|
import platform
|
||||||
|
PLATFORM = platform.system()
|
||||||
|
|
||||||
|
# Pre-computed scores for common year differences
|
||||||
|
_SCORE_LOOKUP = [1.0, 0.5, 0.3333333333333333, 0.25]
|
||||||
|
|
||||||
|
# Pre-computed scores for common year differences
|
||||||
|
_FRESHNESS_SCORES = [
|
||||||
|
1.0, # Current year
|
||||||
|
0.9, # Last year
|
||||||
|
0.8, # 2 years ago
|
||||||
|
0.7, # 3 years ago
|
||||||
|
0.6, # 4 years ago
|
||||||
|
0.5, # 5 years ago
|
||||||
|
]
|
||||||
|
|
||||||
|
class ScoringStats:
|
||||||
|
__slots__ = ('_urls_scored', '_total_score', '_min_score', '_max_score')
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._urls_scored = 0
|
||||||
|
self._total_score = 0.0
|
||||||
|
self._min_score = None # Lazy initialization
|
||||||
|
self._max_score = None
|
||||||
|
|
||||||
|
def update(self, score: float) -> None:
|
||||||
|
"""Optimized update with minimal operations"""
|
||||||
|
self._urls_scored += 1
|
||||||
|
self._total_score += score
|
||||||
|
|
||||||
|
# Lazy min/max tracking - only if actually accessed
|
||||||
|
if self._min_score is not None:
|
||||||
|
if score < self._min_score:
|
||||||
|
self._min_score = score
|
||||||
|
if self._max_score is not None:
|
||||||
|
if score > self._max_score:
|
||||||
|
self._max_score = score
|
||||||
|
|
||||||
|
def get_average(self) -> float:
|
||||||
|
"""Direct calculation instead of property"""
|
||||||
|
return self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||||
|
|
||||||
|
def get_min(self) -> float:
|
||||||
|
"""Lazy min calculation"""
|
||||||
|
if self._min_score is None:
|
||||||
|
self._min_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||||
|
return self._min_score
|
||||||
|
|
||||||
|
def get_max(self) -> float:
|
||||||
|
"""Lazy max calculation"""
|
||||||
|
if self._max_score is None:
|
||||||
|
self._max_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||||
|
return self._max_score
|
||||||
|
class URLScorer(ABC):
|
||||||
|
__slots__ = ('_weight', '_stats')
|
||||||
|
|
||||||
|
def __init__(self, weight: float = 1.0):
|
||||||
|
# Store weight directly as float32 for memory efficiency
|
||||||
|
self._weight = ctypes.c_float(weight).value
|
||||||
|
self._stats = ScoringStats()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _calculate_score(self, url: str) -> float:
|
||||||
|
"""Calculate raw score for URL."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def score(self, url: str) -> float:
|
||||||
|
"""Calculate weighted score with minimal overhead."""
|
||||||
|
score = self._calculate_score(url) * self._weight
|
||||||
|
self._stats.update(score)
|
||||||
|
return score
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stats(self):
|
||||||
|
"""Access to scoring statistics."""
|
||||||
|
return self._stats
|
||||||
|
|
||||||
|
@property
|
||||||
|
def weight(self):
|
||||||
|
return self._weight
|
||||||
|
|
||||||
|
class CompositeScorer(URLScorer):
|
||||||
|
__slots__ = ('_scorers', '_normalize', '_weights_array', '_score_array')
|
||||||
|
|
||||||
|
def __init__(self, scorers: List[URLScorer], normalize: bool = True):
|
||||||
|
"""Initialize composite scorer combining multiple scoring strategies.
|
||||||
|
|
||||||
|
Optimized for:
|
||||||
|
- Fast parallel scoring
|
||||||
|
- Memory efficient score aggregation
|
||||||
|
- Quick short-circuit conditions
|
||||||
|
- Pre-allocated arrays
|
||||||
|
|
||||||
|
Args:
|
||||||
|
scorers: List of scoring strategies to combine
|
||||||
|
normalize: Whether to normalize final score by scorer count
|
||||||
|
"""
|
||||||
|
super().__init__(weight=1.0)
|
||||||
|
self._scorers = scorers
|
||||||
|
self._normalize = normalize
|
||||||
|
|
||||||
|
# Pre-allocate arrays for scores and weights
|
||||||
|
self._weights_array = array('f', [s.weight for s in scorers])
|
||||||
|
self._score_array = array('f', [0.0] * len(scorers))
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _calculate_score(self, url: str) -> float:
|
||||||
|
"""Calculate combined score from all scoring strategies.
|
||||||
|
|
||||||
|
Uses:
|
||||||
|
1. Pre-allocated arrays for scores
|
||||||
|
2. Short-circuit on zero scores
|
||||||
|
3. Optimized normalization
|
||||||
|
4. Vectorized operations where possible
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: URL to score
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Combined and optionally normalized score
|
||||||
|
"""
|
||||||
|
total_score = 0.0
|
||||||
|
scores = self._score_array
|
||||||
|
|
||||||
|
# Get scores from all scorers
|
||||||
|
for i, scorer in enumerate(self._scorers):
|
||||||
|
# Use public score() method which applies weight
|
||||||
|
scores[i] = scorer.score(url)
|
||||||
|
total_score += scores[i]
|
||||||
|
|
||||||
|
# Normalize if requested
|
||||||
|
if self._normalize and self._scorers:
|
||||||
|
count = len(self._scorers)
|
||||||
|
return total_score / count
|
||||||
|
|
||||||
|
return total_score
|
||||||
|
|
||||||
|
def score(self, url: str) -> float:
|
||||||
|
"""Public scoring interface with stats tracking.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: URL to score
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Final combined score
|
||||||
|
"""
|
||||||
|
score = self._calculate_score(url)
|
||||||
|
self.stats.update(score)
|
||||||
|
return score
|
||||||
|
|
||||||
|
class KeywordRelevanceScorer(URLScorer):
|
||||||
|
__slots__ = ('_weight', '_stats', '_keywords', '_case_sensitive')
|
||||||
|
|
||||||
|
def __init__(self, keywords: List[str], weight: float = 1.0, case_sensitive: bool = False):
|
||||||
|
super().__init__(weight=weight)
|
||||||
|
self._case_sensitive = case_sensitive
|
||||||
|
# Pre-process keywords once
|
||||||
|
self._keywords = [k if case_sensitive else k.lower() for k in keywords]
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _url_bytes(self, url: str) -> bytes:
|
||||||
|
"""Cache decoded URL bytes"""
|
||||||
|
return url.encode('utf-8') if self._case_sensitive else url.lower().encode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_score(self, url: str) -> float:
|
||||||
|
"""Fast string matching without regex or byte conversion"""
|
||||||
|
if not self._case_sensitive:
|
||||||
|
url = url.lower()
|
||||||
|
|
||||||
|
matches = sum(1 for k in self._keywords if k in url)
|
||||||
|
|
||||||
|
# Fast return paths
|
||||||
|
if not matches:
|
||||||
|
return 0.0
|
||||||
|
if matches == len(self._keywords):
|
||||||
|
return 1.0
|
||||||
|
|
||||||
|
return matches / len(self._keywords)
|
||||||
|
|
||||||
|
class PathDepthScorer(URLScorer):
|
||||||
|
__slots__ = ('_weight', '_stats', '_optimal_depth') # Remove _url_cache
|
||||||
|
|
||||||
|
def __init__(self, optimal_depth: int = 3, weight: float = 1.0):
|
||||||
|
super().__init__(weight=weight)
|
||||||
|
self._optimal_depth = optimal_depth
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _quick_depth(path: str) -> int:
|
||||||
|
"""Ultra fast path depth calculation.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- "http://example.com" -> 0 # No path segments
|
||||||
|
- "http://example.com/" -> 0 # Empty path
|
||||||
|
- "http://example.com/a" -> 1
|
||||||
|
- "http://example.com/a/b" -> 2
|
||||||
|
"""
|
||||||
|
if not path or path == '/':
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if '/' not in path:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
depth = 0
|
||||||
|
last_was_slash = True
|
||||||
|
|
||||||
|
for c in path:
|
||||||
|
if c == '/':
|
||||||
|
if not last_was_slash:
|
||||||
|
depth += 1
|
||||||
|
last_was_slash = True
|
||||||
|
else:
|
||||||
|
last_was_slash = False
|
||||||
|
|
||||||
|
if not last_was_slash:
|
||||||
|
depth += 1
|
||||||
|
|
||||||
|
return depth
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000) # Cache the whole calculation
|
||||||
|
def _calculate_score(self, url: str) -> float:
|
||||||
|
pos = url.find('/', url.find('://') + 3)
|
||||||
|
if pos == -1:
|
||||||
|
depth = 0
|
||||||
|
else:
|
||||||
|
depth = self._quick_depth(url[pos:])
|
||||||
|
|
||||||
|
# Use lookup table for common distances
|
||||||
|
distance = depth - self._optimal_depth
|
||||||
|
distance = distance if distance >= 0 else -distance # Faster than abs()
|
||||||
|
|
||||||
|
if distance < 4:
|
||||||
|
return _SCORE_LOOKUP[distance]
|
||||||
|
|
||||||
|
return 1.0 / (1.0 + distance)
|
||||||
|
|
||||||
|
class ContentTypeScorer(URLScorer):
|
||||||
|
__slots__ = ('_weight', '_exact_types', '_regex_types')
|
||||||
|
|
||||||
|
def __init__(self, type_weights: Dict[str, float], weight: float = 1.0):
|
||||||
|
"""Initialize scorer with type weights map.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
type_weights: Dict mapping file extensions/patterns to scores (e.g. {'.html$': 1.0})
|
||||||
|
weight: Overall weight multiplier for this scorer
|
||||||
|
"""
|
||||||
|
super().__init__(weight=weight)
|
||||||
|
self._exact_types = {} # Fast lookup for simple extensions
|
||||||
|
self._regex_types = [] # Fallback for complex patterns
|
||||||
|
|
||||||
|
# Split into exact vs regex matchers for performance
|
||||||
|
for pattern, score in type_weights.items():
|
||||||
|
if pattern.startswith('.') and pattern.endswith('$'):
|
||||||
|
ext = pattern[1:-1]
|
||||||
|
self._exact_types[ext] = score
|
||||||
|
else:
|
||||||
|
self._regex_types.append((re.compile(pattern), score))
|
||||||
|
|
||||||
|
# Sort complex patterns by score for early exit
|
||||||
|
self._regex_types.sort(key=lambda x: -x[1])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _quick_extension(url: str) -> str:
|
||||||
|
"""Extract file extension ultra-fast without regex/splits.
|
||||||
|
|
||||||
|
Handles:
|
||||||
|
- Basic extensions: "example.html" -> "html"
|
||||||
|
- Query strings: "page.php?id=1" -> "php"
|
||||||
|
- Fragments: "doc.pdf#page=1" -> "pdf"
|
||||||
|
- Path params: "file.jpg;width=100" -> "jpg"
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: URL to extract extension from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Extension without dot, or empty string if none found
|
||||||
|
"""
|
||||||
|
pos = url.rfind('.')
|
||||||
|
if pos == -1:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
# Find first non-alphanumeric char after extension
|
||||||
|
end = len(url)
|
||||||
|
for i in range(pos + 1, len(url)):
|
||||||
|
c = url[i]
|
||||||
|
# Stop at query string, fragment, path param or any non-alphanumeric
|
||||||
|
if c in '?#;' or not c.isalnum():
|
||||||
|
end = i
|
||||||
|
break
|
||||||
|
|
||||||
|
return url[pos + 1:end].lower()
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _calculate_score(self, url: str) -> float:
|
||||||
|
"""Calculate content type score for URL.
|
||||||
|
|
||||||
|
Uses staged approach:
|
||||||
|
1. Try exact extension match (fast path)
|
||||||
|
2. Fall back to regex patterns if needed
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: URL to score
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Score between 0.0 and 1.0 * weight
|
||||||
|
"""
|
||||||
|
# Fast path: direct extension lookup
|
||||||
|
ext = self._quick_extension(url)
|
||||||
|
if ext:
|
||||||
|
score = self._exact_types.get(ext, None)
|
||||||
|
if score is not None:
|
||||||
|
return score
|
||||||
|
|
||||||
|
# Slow path: regex patterns
|
||||||
|
for pattern, score in self._regex_types:
|
||||||
|
if pattern.search(url):
|
||||||
|
return score
|
||||||
|
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
class FreshnessScorer(URLScorer):
|
||||||
|
__slots__ = ('_weight', '_date_pattern', '_current_year')
|
||||||
|
|
||||||
|
def __init__(self, weight: float = 1.0, current_year: int = 2024):
|
||||||
|
"""Initialize freshness scorer.
|
||||||
|
|
||||||
|
Extracts and scores dates from URLs using format:
|
||||||
|
- YYYY/MM/DD
|
||||||
|
- YYYY-MM-DD
|
||||||
|
- YYYY_MM_DD
|
||||||
|
- YYYY (year only)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
weight: Score multiplier
|
||||||
|
current_year: Year to calculate freshness against (default 2024)
|
||||||
|
"""
|
||||||
|
super().__init__(weight=weight)
|
||||||
|
self._current_year = current_year
|
||||||
|
|
||||||
|
# Combined pattern for all date formats
|
||||||
|
# Uses non-capturing groups (?:) and alternation
|
||||||
|
self._date_pattern = re.compile(
|
||||||
|
r'(?:/' # Path separator
|
||||||
|
r'|[-_])' # or date separators
|
||||||
|
r'((?:19|20)\d{2})' # Year group (1900-2099)
|
||||||
|
r'(?:' # Optional month/day group
|
||||||
|
r'(?:/|[-_])' # Date separator
|
||||||
|
r'(?:\d{2})' # Month
|
||||||
|
r'(?:' # Optional day
|
||||||
|
r'(?:/|[-_])' # Date separator
|
||||||
|
r'(?:\d{2})' # Day
|
||||||
|
r')?' # Day is optional
|
||||||
|
r')?' # Month/day group is optional
|
||||||
|
)
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _extract_year(self, url: str) -> Optional[int]:
|
||||||
|
"""Extract the most recent year from URL.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: URL to extract year from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Year as int or None if no valid year found
|
||||||
|
"""
|
||||||
|
matches = self._date_pattern.finditer(url)
|
||||||
|
latest_year = None
|
||||||
|
|
||||||
|
# Find most recent year
|
||||||
|
for match in matches:
|
||||||
|
year = int(match.group(1))
|
||||||
|
if (year <= self._current_year and # Sanity check
|
||||||
|
(latest_year is None or year > latest_year)):
|
||||||
|
latest_year = year
|
||||||
|
|
||||||
|
return latest_year
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _calculate_score(self, url: str) -> float:
|
||||||
|
"""Calculate freshness score based on URL date.
|
||||||
|
|
||||||
|
More recent years score higher. Uses pre-computed scoring
|
||||||
|
table for common year differences.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: URL to score
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Score between 0.0 and 1.0 * weight
|
||||||
|
"""
|
||||||
|
year = self._extract_year(url)
|
||||||
|
if year is None:
|
||||||
|
return 0.5 # Default score
|
||||||
|
|
||||||
|
# Use lookup table for common year differences
|
||||||
|
year_diff = self._current_year - year
|
||||||
|
if year_diff < len(_FRESHNESS_SCORES):
|
||||||
|
return _FRESHNESS_SCORES[year_diff]
|
||||||
|
|
||||||
|
# Fallback calculation for older content
|
||||||
|
return max(0.1, 1.0 - year_diff * 0.1)
|
||||||
|
|
||||||
|
class DomainAuthorityScorer(URLScorer):
|
||||||
|
__slots__ = ('_weight', '_domain_weights', '_default_weight', '_top_domains')
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
domain_weights: Dict[str, float],
|
||||||
|
default_weight: float = 0.5,
|
||||||
|
weight: float = 1.0,
|
||||||
|
):
|
||||||
|
"""Initialize domain authority scorer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_weights: Dict mapping domains to authority scores
|
||||||
|
default_weight: Score for unknown domains
|
||||||
|
weight: Overall scorer weight multiplier
|
||||||
|
|
||||||
|
Example:
|
||||||
|
{
|
||||||
|
'python.org': 1.0,
|
||||||
|
'github.com': 0.9,
|
||||||
|
'medium.com': 0.7
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
super().__init__(weight=weight)
|
||||||
|
|
||||||
|
# Pre-process domains for faster lookup
|
||||||
|
self._domain_weights = {
|
||||||
|
domain.lower(): score
|
||||||
|
for domain, score in domain_weights.items()
|
||||||
|
}
|
||||||
|
self._default_weight = default_weight
|
||||||
|
|
||||||
|
# Cache top domains for fast path
|
||||||
|
self._top_domains = {
|
||||||
|
domain: score
|
||||||
|
for domain, score in sorted(
|
||||||
|
domain_weights.items(),
|
||||||
|
key=lambda x: -x[1]
|
||||||
|
)[:5] # Keep top 5 highest scoring domains
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _extract_domain(url: str) -> str:
|
||||||
|
"""Extract domain from URL ultra-fast.
|
||||||
|
|
||||||
|
Handles:
|
||||||
|
- Basic domains: "example.com"
|
||||||
|
- Subdomains: "sub.example.com"
|
||||||
|
- Ports: "example.com:8080"
|
||||||
|
- IPv4: "192.168.1.1"
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: Full URL to extract domain from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Lowercase domain without port
|
||||||
|
"""
|
||||||
|
# Find domain start
|
||||||
|
start = url.find('://')
|
||||||
|
if start == -1:
|
||||||
|
start = 0
|
||||||
|
else:
|
||||||
|
start += 3
|
||||||
|
|
||||||
|
# Find domain end
|
||||||
|
end = url.find('/', start)
|
||||||
|
if end == -1:
|
||||||
|
end = url.find('?', start)
|
||||||
|
if end == -1:
|
||||||
|
end = url.find('#', start)
|
||||||
|
if end == -1:
|
||||||
|
end = len(url)
|
||||||
|
|
||||||
|
# Extract domain and remove port
|
||||||
|
domain = url[start:end]
|
||||||
|
port_idx = domain.rfind(':')
|
||||||
|
if port_idx != -1:
|
||||||
|
domain = domain[:port_idx]
|
||||||
|
|
||||||
|
return domain.lower()
|
||||||
|
|
||||||
|
@lru_cache(maxsize=10000)
|
||||||
|
def _calculate_score(self, url: str) -> float:
|
||||||
|
"""Calculate domain authority score.
|
||||||
|
|
||||||
|
Uses staged approach:
|
||||||
|
1. Check top domains (fastest)
|
||||||
|
2. Check full domain weights
|
||||||
|
3. Return default weight
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: URL to score
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Authority score between 0.0 and 1.0 * weight
|
||||||
|
"""
|
||||||
|
domain = self._extract_domain(url)
|
||||||
|
|
||||||
|
# Fast path: check top domains first
|
||||||
|
score = self._top_domains.get(domain)
|
||||||
|
if score is not None:
|
||||||
|
return score
|
||||||
|
|
||||||
|
# Regular path: check all domains
|
||||||
|
return self._domain_weights.get(domain, self._default_weight)
|
||||||
170
crawl4ai/docker_client.py
Normal file
170
crawl4ai/docker_client.py
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
from typing import List, Optional, Union, AsyncGenerator, Dict, Any
|
||||||
|
import httpx
|
||||||
|
import json
|
||||||
|
from urllib.parse import urljoin
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||||
|
from .models import CrawlResult
|
||||||
|
from .async_logger import AsyncLogger, LogLevel
|
||||||
|
|
||||||
|
|
||||||
|
class Crawl4aiClientError(Exception):
|
||||||
|
"""Base exception for Crawl4ai Docker client errors."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionError(Crawl4aiClientError):
|
||||||
|
"""Raised when connection to the Docker server fails."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RequestError(Crawl4aiClientError):
|
||||||
|
"""Raised when the server returns an error response."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Crawl4aiDockerClient:
|
||||||
|
"""Client for interacting with Crawl4AI Docker server with token authentication."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
base_url: str = "http://localhost:8000",
|
||||||
|
timeout: float = 30.0,
|
||||||
|
verify_ssl: bool = True,
|
||||||
|
verbose: bool = True,
|
||||||
|
log_file: Optional[str] = None
|
||||||
|
):
|
||||||
|
self.base_url = base_url.rstrip('/')
|
||||||
|
self.timeout = timeout
|
||||||
|
self.logger = AsyncLogger(log_file=log_file, log_level=LogLevel.DEBUG, verbose=verbose)
|
||||||
|
self._http_client = httpx.AsyncClient(
|
||||||
|
timeout=timeout,
|
||||||
|
verify=verify_ssl,
|
||||||
|
headers={"Content-Type": "application/json"}
|
||||||
|
)
|
||||||
|
self._token: Optional[str] = None
|
||||||
|
|
||||||
|
async def authenticate(self, email: str) -> None:
|
||||||
|
"""Authenticate with the server and store the token."""
|
||||||
|
url = urljoin(self.base_url, "/token")
|
||||||
|
try:
|
||||||
|
self.logger.info(f"Authenticating with email: {email}", tag="AUTH")
|
||||||
|
response = await self._http_client.post(url, json={"email": email})
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
self._token = data["access_token"]
|
||||||
|
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
|
||||||
|
self.logger.success("Authentication successful", tag="AUTH")
|
||||||
|
except (httpx.RequestError, httpx.HTTPStatusError) as e:
|
||||||
|
error_msg = f"Authentication failed: {str(e)}"
|
||||||
|
self.logger.error(error_msg, tag="ERROR")
|
||||||
|
raise ConnectionError(error_msg)
|
||||||
|
|
||||||
|
async def _check_server(self) -> None:
|
||||||
|
"""Check if server is reachable, raising an error if not."""
|
||||||
|
try:
|
||||||
|
await self._http_client.get(urljoin(self.base_url, "/health"))
|
||||||
|
self.logger.success(f"Connected to {self.base_url}", tag="READY")
|
||||||
|
except httpx.RequestError as e:
|
||||||
|
self.logger.error(f"Server unreachable: {str(e)}", tag="ERROR")
|
||||||
|
raise ConnectionError(f"Cannot connect to server: {str(e)}")
|
||||||
|
|
||||||
|
def _prepare_request(self, urls: List[str], browser_config: Optional[BrowserConfig] = None,
|
||||||
|
crawler_config: Optional[CrawlerRunConfig] = None) -> Dict[str, Any]:
|
||||||
|
"""Prepare request data from configs."""
|
||||||
|
return {
|
||||||
|
"urls": urls,
|
||||||
|
"browser_config": browser_config.dump() if browser_config else {},
|
||||||
|
"crawler_config": crawler_config.dump() if crawler_config else {}
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
|
||||||
|
"""Make an HTTP request with error handling."""
|
||||||
|
url = urljoin(self.base_url, endpoint)
|
||||||
|
try:
|
||||||
|
response = await self._http_client.request(method, url, **kwargs)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response
|
||||||
|
except httpx.TimeoutException as e:
|
||||||
|
raise ConnectionError(f"Request timed out: {str(e)}")
|
||||||
|
except httpx.RequestError as e:
|
||||||
|
raise ConnectionError(f"Failed to connect: {str(e)}")
|
||||||
|
except httpx.HTTPStatusError as e:
|
||||||
|
error_msg = (e.response.json().get("detail", str(e))
|
||||||
|
if "application/json" in e.response.headers.get("content-type", "")
|
||||||
|
else str(e))
|
||||||
|
raise RequestError(f"Server error {e.response.status_code}: {error_msg}")
|
||||||
|
|
||||||
|
async def crawl(
|
||||||
|
self,
|
||||||
|
urls: List[str],
|
||||||
|
browser_config: Optional[BrowserConfig] = None,
|
||||||
|
crawler_config: Optional[CrawlerRunConfig] = None
|
||||||
|
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
|
||||||
|
"""Execute a crawl operation."""
|
||||||
|
if not self._token:
|
||||||
|
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||||
|
await self._check_server()
|
||||||
|
|
||||||
|
data = self._prepare_request(urls, browser_config, crawler_config)
|
||||||
|
is_streaming = crawler_config and crawler_config.stream
|
||||||
|
|
||||||
|
self.logger.info(f"Crawling {len(urls)} URLs {'(streaming)' if is_streaming else ''}", tag="CRAWL")
|
||||||
|
|
||||||
|
if is_streaming:
|
||||||
|
async def stream_results() -> AsyncGenerator[CrawlResult, None]:
|
||||||
|
async with self._http_client.stream("POST", f"{self.base_url}/crawl/stream", json=data) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
async for line in response.aiter_lines():
|
||||||
|
if line.strip():
|
||||||
|
result = json.loads(line)
|
||||||
|
if "error" in result:
|
||||||
|
self.logger.error_status(url=result.get("url", "unknown"), error=result["error"])
|
||||||
|
continue
|
||||||
|
self.logger.url_status(url=result.get("url", "unknown"), success=True, timing=result.get("timing", 0.0))
|
||||||
|
if result.get("status") == "completed":
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
yield CrawlResult(**result)
|
||||||
|
return stream_results()
|
||||||
|
|
||||||
|
response = await self._request("POST", "/crawl", json=data)
|
||||||
|
result_data = response.json()
|
||||||
|
if not result_data.get("success", False):
|
||||||
|
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
|
||||||
|
|
||||||
|
results = [CrawlResult(**r) for r in result_data.get("results", [])]
|
||||||
|
self.logger.success(f"Crawl completed with {len(results)} results", tag="CRAWL")
|
||||||
|
return results[0] if len(results) == 1 else results
|
||||||
|
|
||||||
|
async def get_schema(self) -> Dict[str, Any]:
|
||||||
|
"""Retrieve configuration schemas."""
|
||||||
|
if not self._token:
|
||||||
|
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||||
|
response = await self._request("GET", "/schema")
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
async def close(self) -> None:
|
||||||
|
"""Close the HTTP client session."""
|
||||||
|
self.logger.info("Closing client", tag="CLOSE")
|
||||||
|
await self._http_client.aclose()
|
||||||
|
|
||||||
|
async def __aenter__(self) -> "Crawl4aiDockerClient":
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __aexit__(self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Optional[Any]) -> None:
|
||||||
|
await self.close()
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
async def main():
|
||||||
|
async with Crawl4aiDockerClient(verbose=True) as client:
|
||||||
|
await client.authenticate("user@example.com")
|
||||||
|
result = await client.crawl(["https://example.com"])
|
||||||
|
print(result)
|
||||||
|
schema = await client.get_schema()
|
||||||
|
print(schema)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -510,6 +510,7 @@ class HTML2Text(html.parser.HTMLParser):
|
|||||||
|
|
||||||
if tag == "a" and not self.ignore_links:
|
if tag == "a" and not self.ignore_links:
|
||||||
if start:
|
if start:
|
||||||
|
self.inside_link = True
|
||||||
if (
|
if (
|
||||||
"href" in attrs
|
"href" in attrs
|
||||||
and attrs["href"] is not None
|
and attrs["href"] is not None
|
||||||
@@ -526,6 +527,7 @@ class HTML2Text(html.parser.HTMLParser):
|
|||||||
else:
|
else:
|
||||||
self.astack.append(None)
|
self.astack.append(None)
|
||||||
else:
|
else:
|
||||||
|
self.inside_link = False
|
||||||
if self.astack:
|
if self.astack:
|
||||||
a = self.astack.pop()
|
a = self.astack.pop()
|
||||||
if self.maybe_automatic_link and not self.empty_link:
|
if self.maybe_automatic_link and not self.empty_link:
|
||||||
@@ -610,13 +612,22 @@ class HTML2Text(html.parser.HTMLParser):
|
|||||||
self.o("[" + str(a_props.count) + "]")
|
self.o("[" + str(a_props.count) + "]")
|
||||||
|
|
||||||
if tag == "dl" and start:
|
if tag == "dl" and start:
|
||||||
self.p()
|
self.p() # Add paragraph break before list starts
|
||||||
if tag == "dt" and not start:
|
self.p_p = 0 # Reset paragraph state
|
||||||
self.pbr()
|
|
||||||
if tag == "dd" and start:
|
elif tag == "dt" and start:
|
||||||
self.o(" ")
|
if self.p_p == 0: # If not first term
|
||||||
if tag == "dd" and not start:
|
self.o("\n\n") # Add spacing before new term-definition pair
|
||||||
self.pbr()
|
self.p_p = 0 # Reset paragraph state
|
||||||
|
|
||||||
|
elif tag == "dt" and not start:
|
||||||
|
self.o("\n") # Single newline between term and definition
|
||||||
|
|
||||||
|
elif tag == "dd" and start:
|
||||||
|
self.o(" ") # Indent definition
|
||||||
|
|
||||||
|
elif tag == "dd" and not start:
|
||||||
|
self.p_p = 0
|
||||||
|
|
||||||
if tag in ["ol", "ul"]:
|
if tag in ["ol", "ul"]:
|
||||||
# Google Docs create sub lists as top level lists
|
# Google Docs create sub lists as top level lists
|
||||||
@@ -903,7 +914,13 @@ class HTML2Text(html.parser.HTMLParser):
|
|||||||
self.empty_link = False
|
self.empty_link = False
|
||||||
|
|
||||||
if not self.code and not self.pre and not entity_char:
|
if not self.code and not self.pre and not entity_char:
|
||||||
data = escape_md_section(data, snob=self.escape_snob, escape_dot=self.escape_dot, escape_plus=self.escape_plus, escape_dash=self.escape_dash)
|
data = escape_md_section(
|
||||||
|
data,
|
||||||
|
snob=self.escape_snob,
|
||||||
|
escape_dot=self.escape_dot,
|
||||||
|
escape_plus=self.escape_plus,
|
||||||
|
escape_dash=self.escape_dash,
|
||||||
|
)
|
||||||
self.preceding_data = data
|
self.preceding_data = data
|
||||||
self.o(data, puredata=True)
|
self.o(data, puredata=True)
|
||||||
|
|
||||||
@@ -1006,6 +1023,7 @@ class HTML2Text(html.parser.HTMLParser):
|
|||||||
newlines += 1
|
newlines += 1
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) -> str:
|
def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) -> str:
|
||||||
if bodywidth is None:
|
if bodywidth is None:
|
||||||
bodywidth = config.BODY_WIDTH
|
bodywidth = config.BODY_WIDTH
|
||||||
@@ -1013,11 +1031,13 @@ def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) ->
|
|||||||
|
|
||||||
return h.handle(html)
|
return h.handle(html)
|
||||||
|
|
||||||
|
|
||||||
class CustomHTML2Text(HTML2Text):
|
class CustomHTML2Text(HTML2Text):
|
||||||
def __init__(self, *args, handle_code_in_pre=False, **kwargs):
|
def __init__(self, *args, handle_code_in_pre=False, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.inside_pre = False
|
self.inside_pre = False
|
||||||
self.inside_code = False
|
self.inside_code = False
|
||||||
|
self.inside_link = False
|
||||||
self.preserve_tags = set() # Set of tags to preserve
|
self.preserve_tags = set() # Set of tags to preserve
|
||||||
self.current_preserved_tag = None
|
self.current_preserved_tag = None
|
||||||
self.preserved_content = []
|
self.preserved_content = []
|
||||||
@@ -1041,9 +1061,9 @@ class CustomHTML2Text(HTML2Text):
|
|||||||
def update_params(self, **kwargs):
|
def update_params(self, **kwargs):
|
||||||
"""Update parameters and set preserved tags."""
|
"""Update parameters and set preserved tags."""
|
||||||
for key, value in kwargs.items():
|
for key, value in kwargs.items():
|
||||||
if key == 'preserve_tags':
|
if key == "preserve_tags":
|
||||||
self.preserve_tags = set(value)
|
self.preserve_tags = set(value)
|
||||||
elif key == 'handle_code_in_pre':
|
elif key == "handle_code_in_pre":
|
||||||
self.handle_code_in_pre = value
|
self.handle_code_in_pre = value
|
||||||
else:
|
else:
|
||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
@@ -1056,17 +1076,19 @@ class CustomHTML2Text(HTML2Text):
|
|||||||
self.current_preserved_tag = tag
|
self.current_preserved_tag = tag
|
||||||
self.preserved_content = []
|
self.preserved_content = []
|
||||||
# Format opening tag with attributes
|
# Format opening tag with attributes
|
||||||
attr_str = ''.join(f' {k}="{v}"' for k, v in attrs.items() if v is not None)
|
attr_str = "".join(
|
||||||
self.preserved_content.append(f'<{tag}{attr_str}>')
|
f' {k}="{v}"' for k, v in attrs.items() if v is not None
|
||||||
|
)
|
||||||
|
self.preserved_content.append(f"<{tag}{attr_str}>")
|
||||||
self.preserve_depth += 1
|
self.preserve_depth += 1
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
self.preserve_depth -= 1
|
self.preserve_depth -= 1
|
||||||
if self.preserve_depth == 0:
|
if self.preserve_depth == 0:
|
||||||
self.preserved_content.append(f'</{tag}>')
|
self.preserved_content.append(f"</{tag}>")
|
||||||
# Output the preserved HTML block with proper spacing
|
# Output the preserved HTML block with proper spacing
|
||||||
preserved_html = ''.join(self.preserved_content)
|
preserved_html = "".join(self.preserved_content)
|
||||||
self.o('\n' + preserved_html + '\n')
|
self.o("\n" + preserved_html + "\n")
|
||||||
self.current_preserved_tag = None
|
self.current_preserved_tag = None
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1074,30 +1096,38 @@ class CustomHTML2Text(HTML2Text):
|
|||||||
if self.preserve_depth > 0:
|
if self.preserve_depth > 0:
|
||||||
if start:
|
if start:
|
||||||
# Format nested tags with attributes
|
# Format nested tags with attributes
|
||||||
attr_str = ''.join(f' {k}="{v}"' for k, v in attrs.items() if v is not None)
|
attr_str = "".join(
|
||||||
self.preserved_content.append(f'<{tag}{attr_str}>')
|
f' {k}="{v}"' for k, v in attrs.items() if v is not None
|
||||||
|
)
|
||||||
|
self.preserved_content.append(f"<{tag}{attr_str}>")
|
||||||
else:
|
else:
|
||||||
self.preserved_content.append(f'</{tag}>')
|
self.preserved_content.append(f"</{tag}>")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Handle pre tags
|
# Handle pre tags
|
||||||
if tag == 'pre':
|
if tag == "pre":
|
||||||
if start:
|
if start:
|
||||||
self.o('```\n') # Markdown code block start
|
self.o("```\n") # Markdown code block start
|
||||||
self.inside_pre = True
|
self.inside_pre = True
|
||||||
else:
|
else:
|
||||||
self.o('\n```\n') # Markdown code block end
|
self.o("\n```\n") # Markdown code block end
|
||||||
self.inside_pre = False
|
self.inside_pre = False
|
||||||
elif tag == 'code':
|
elif tag == "code":
|
||||||
if self.inside_pre and not self.handle_code_in_pre:
|
if self.inside_pre and not self.handle_code_in_pre:
|
||||||
# Ignore code tags inside pre blocks if handle_code_in_pre is False
|
# Ignore code tags inside pre blocks if handle_code_in_pre is False
|
||||||
return
|
return
|
||||||
if start:
|
if start:
|
||||||
self.o('`') # Markdown inline code start
|
if not self.inside_link:
|
||||||
|
self.o("`") # Only output backtick if not inside a link
|
||||||
self.inside_code = True
|
self.inside_code = True
|
||||||
else:
|
else:
|
||||||
self.o('`') # Markdown inline code end
|
if not self.inside_link:
|
||||||
|
self.o("`") # Only output backtick if not inside a link
|
||||||
self.inside_code = False
|
self.inside_code = False
|
||||||
|
|
||||||
|
# If inside a link, let the parent class handle the content
|
||||||
|
if self.inside_link:
|
||||||
|
super().handle_tag(tag, attrs, start)
|
||||||
else:
|
else:
|
||||||
super().handle_tag(tag, attrs, start)
|
super().handle_tag(tag, attrs, start)
|
||||||
|
|
||||||
@@ -1113,13 +1143,12 @@ class CustomHTML2Text(HTML2Text):
|
|||||||
return
|
return
|
||||||
if self.inside_code:
|
if self.inside_code:
|
||||||
# Inline code: no newlines allowed
|
# Inline code: no newlines allowed
|
||||||
self.o(data.replace('\n', ' '))
|
self.o(data.replace("\n", " "))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Default behavior for other tags
|
# Default behavior for other tags
|
||||||
super().handle_data(data, entity_char)
|
super().handle_data(data, entity_char)
|
||||||
|
|
||||||
|
|
||||||
# # Handle pre tags
|
# # Handle pre tags
|
||||||
# if tag == 'pre':
|
# if tag == 'pre':
|
||||||
# if start:
|
# if start:
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
class OutCallback:
|
class OutCallback:
|
||||||
def __call__(self, s: str) -> None: ...
|
def __call__(self, s: str) -> None:
|
||||||
|
...
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ def escape_md_section(
|
|||||||
snob: bool = False,
|
snob: bool = False,
|
||||||
escape_dot: bool = True,
|
escape_dot: bool = True,
|
||||||
escape_plus: bool = True,
|
escape_plus: bool = True,
|
||||||
escape_dash: bool = True
|
escape_dash: bool = True,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Escapes markdown-sensitive characters across whole document sections.
|
Escapes markdown-sensitive characters across whole document sections.
|
||||||
@@ -233,6 +233,7 @@ def escape_md_section(
|
|||||||
|
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
|
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
|
||||||
"""
|
"""
|
||||||
Given the lines of a table
|
Given the lines of a table
|
||||||
|
|||||||
69
crawl4ai/hub.py
Normal file
69
crawl4ai/hub.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# crawl4ai/hub.py
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, Type, Union
|
||||||
|
import logging
|
||||||
|
import importlib
|
||||||
|
from pathlib import Path
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCrawler(ABC):
|
||||||
|
def __init__(self):
|
||||||
|
self.logger = logging.getLogger(self.__class__.__name__)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def run(self, url: str = "", **kwargs) -> str:
|
||||||
|
"""
|
||||||
|
Implement this method to return JSON string.
|
||||||
|
Must accept URL + arbitrary kwargs for flexibility.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __init_subclass__(cls, **kwargs):
|
||||||
|
"""Enforce interface validation on subclassing"""
|
||||||
|
super().__init_subclass__(**kwargs)
|
||||||
|
|
||||||
|
# Verify run method signature
|
||||||
|
run_method = cls.run
|
||||||
|
if not run_method.__code__.co_argcount >= 2: # self + url
|
||||||
|
raise TypeError(f"{cls.__name__} must implement 'run(self, url: str, **kwargs)'")
|
||||||
|
|
||||||
|
# Verify async nature
|
||||||
|
if not inspect.iscoroutinefunction(run_method):
|
||||||
|
raise TypeError(f"{cls.__name__}.run must be async")
|
||||||
|
|
||||||
|
class CrawlerHub:
|
||||||
|
_crawlers: Dict[str, Type[BaseCrawler]] = {}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _discover_crawlers(cls):
|
||||||
|
"""Dynamically load crawlers from /crawlers in 3 lines"""
|
||||||
|
base_path = Path(__file__).parent / "crawlers"
|
||||||
|
for crawler_dir in base_path.iterdir():
|
||||||
|
if crawler_dir.is_dir():
|
||||||
|
try:
|
||||||
|
module = importlib.import_module(
|
||||||
|
f"crawl4ai.crawlers.{crawler_dir.name}.crawler"
|
||||||
|
)
|
||||||
|
for attr in dir(module):
|
||||||
|
cls._maybe_register_crawler(
|
||||||
|
getattr(module, attr), crawler_dir.name
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed {crawler_dir.name}: {str(e)}")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _maybe_register_crawler(cls, obj, name: str):
|
||||||
|
"""Brilliant one-liner registration"""
|
||||||
|
if isinstance(obj, type) and issubclass(obj, BaseCrawler) and obj != BaseCrawler:
|
||||||
|
module = importlib.import_module(obj.__module__)
|
||||||
|
obj.meta = getattr(module, "__meta__", {})
|
||||||
|
cls._crawlers[name] = obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get(cls, name: str) -> Union[Type[BaseCrawler], None]:
|
||||||
|
if not cls._crawlers:
|
||||||
|
cls._discover_crawlers()
|
||||||
|
return cls._crawlers.get(name)
|
||||||
@@ -2,29 +2,81 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import asyncio
|
import asyncio
|
||||||
from .async_logger import AsyncLogger, LogLevel
|
from .async_logger import AsyncLogger, LogLevel
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
# Initialize logger
|
# Initialize logger
|
||||||
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||||
|
|
||||||
|
def setup_home_directory():
|
||||||
|
"""Set up the .crawl4ai folder structure in the user's home directory."""
|
||||||
|
base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY")
|
||||||
|
crawl4ai_folder = Path(base_dir) if base_dir else Path.home()
|
||||||
|
crawl4ai_config = crawl4ai_folder / "global.yml"
|
||||||
|
crawl4ai_folder = crawl4ai_folder / ".crawl4ai"
|
||||||
|
cache_folder = crawl4ai_folder / "cache"
|
||||||
|
content_folders = [
|
||||||
|
"html_content",
|
||||||
|
"cleaned_html",
|
||||||
|
"markdown_content",
|
||||||
|
"extracted_content",
|
||||||
|
"screenshots",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Clean up old cache if exists
|
||||||
|
if cache_folder.exists():
|
||||||
|
shutil.rmtree(cache_folder)
|
||||||
|
|
||||||
|
# Create new folder structure
|
||||||
|
crawl4ai_folder.mkdir(exist_ok=True)
|
||||||
|
cache_folder.mkdir(exist_ok=True)
|
||||||
|
for folder in content_folders:
|
||||||
|
(crawl4ai_folder / folder).mkdir(exist_ok=True)
|
||||||
|
|
||||||
|
# If config file does not exist, create it
|
||||||
|
if not crawl4ai_config.exists():
|
||||||
|
with open(crawl4ai_config, "w") as f:
|
||||||
|
f.write("")
|
||||||
|
|
||||||
def post_install():
|
def post_install():
|
||||||
"""Run all post-installation tasks"""
|
"""Run all post-installation tasks"""
|
||||||
logger.info("Running post-installation setup...", tag="INIT")
|
logger.info("Running post-installation setup...", tag="INIT")
|
||||||
|
setup_home_directory()
|
||||||
install_playwright()
|
install_playwright()
|
||||||
run_migration()
|
run_migration()
|
||||||
logger.success("Post-installation setup completed!", tag="COMPLETE")
|
logger.success("Post-installation setup completed!", tag="COMPLETE")
|
||||||
|
|
||||||
|
|
||||||
def install_playwright():
|
def install_playwright():
|
||||||
logger.info("Installing Playwright browsers...", tag="INIT")
|
logger.info("Installing Playwright browsers...", tag="INIT")
|
||||||
try:
|
try:
|
||||||
# subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chrome"])
|
# subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chrome"])
|
||||||
subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chromium"])
|
subprocess.check_call(
|
||||||
logger.success("Playwright installation completed successfully.", tag="COMPLETE")
|
[
|
||||||
except subprocess.CalledProcessError as e:
|
sys.executable,
|
||||||
|
"-m",
|
||||||
|
"playwright",
|
||||||
|
"install",
|
||||||
|
"--with-deps",
|
||||||
|
"--force",
|
||||||
|
"chromium",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
logger.success(
|
||||||
|
"Playwright installation completed successfully.", tag="COMPLETE"
|
||||||
|
)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
# logger.error(f"Error during Playwright installation: {e}", tag="ERROR")
|
# logger.error(f"Error during Playwright installation: {e}", tag="ERROR")
|
||||||
logger.warning(f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation.")
|
logger.warning(
|
||||||
except Exception as e:
|
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
# logger.error(f"Unexpected error during Playwright installation: {e}", tag="ERROR")
|
# logger.error(f"Unexpected error during Playwright installation: {e}", tag="ERROR")
|
||||||
logger.warning(f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation.")
|
logger.warning(
|
||||||
|
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def run_migration():
|
def run_migration():
|
||||||
"""Initialize database during installation"""
|
"""Initialize database during installation"""
|
||||||
@@ -33,18 +85,26 @@ def run_migration():
|
|||||||
from crawl4ai.async_database import async_db_manager
|
from crawl4ai.async_database import async_db_manager
|
||||||
|
|
||||||
asyncio.run(async_db_manager.initialize())
|
asyncio.run(async_db_manager.initialize())
|
||||||
logger.success("Database initialization completed successfully.", tag="COMPLETE")
|
logger.success(
|
||||||
|
"Database initialization completed successfully.", tag="COMPLETE"
|
||||||
|
)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
logger.warning("Database module not found. Will initialize on first use.")
|
logger.warning("Database module not found. Will initialize on first use.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Database initialization failed: {e}")
|
logger.warning(f"Database initialization failed: {e}")
|
||||||
logger.warning("Database will be initialized on first use")
|
logger.warning("Database will be initialized on first use")
|
||||||
|
|
||||||
|
|
||||||
async def run_doctor():
|
async def run_doctor():
|
||||||
"""Test if Crawl4AI is working properly"""
|
"""Test if Crawl4AI is working properly"""
|
||||||
logger.info("Running Crawl4AI health check...", tag="INIT")
|
logger.info("Running Crawl4AI health check...", tag="INIT")
|
||||||
try:
|
try:
|
||||||
from .async_webcrawler import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
from .async_webcrawler import (
|
||||||
|
AsyncWebCrawler,
|
||||||
|
BrowserConfig,
|
||||||
|
CrawlerRunConfig,
|
||||||
|
CacheMode,
|
||||||
|
)
|
||||||
|
|
||||||
browser_config = BrowserConfig(
|
browser_config = BrowserConfig(
|
||||||
headless=True,
|
headless=True,
|
||||||
@@ -52,7 +112,7 @@ async def run_doctor():
|
|||||||
ignore_https_errors=True,
|
ignore_https_errors=True,
|
||||||
light_mode=True,
|
light_mode=True,
|
||||||
viewport_width=1280,
|
viewport_width=1280,
|
||||||
viewport_height=720
|
viewport_height=720,
|
||||||
)
|
)
|
||||||
|
|
||||||
run_config = CrawlerRunConfig(
|
run_config = CrawlerRunConfig(
|
||||||
@@ -62,10 +122,7 @@ async def run_doctor():
|
|||||||
|
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
logger.info("Testing crawling capabilities...", tag="TEST")
|
logger.info("Testing crawling capabilities...", tag="TEST")
|
||||||
result = await crawler.arun(
|
result = await crawler.arun(url="https://crawl4ai.com", config=run_config)
|
||||||
url="https://crawl4ai.com",
|
|
||||||
config=run_config
|
|
||||||
)
|
|
||||||
|
|
||||||
if result and result.markdown:
|
if result and result.markdown:
|
||||||
logger.success("✅ Crawling test passed!", tag="COMPLETE")
|
logger.success("✅ Crawling test passed!", tag="COMPLETE")
|
||||||
@@ -77,7 +134,10 @@ async def run_doctor():
|
|||||||
logger.error(f"❌ Test failed: {e}", tag="ERROR")
|
logger.error(f"❌ Test failed: {e}", tag="ERROR")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def doctor():
|
def doctor():
|
||||||
"""Entry point for the doctor command"""
|
"""Entry point for the doctor command"""
|
||||||
import asyncio
|
import asyncio
|
||||||
return asyncio.run(run_doctor())
|
|
||||||
|
asyncio.run(run_doctor())
|
||||||
|
sys.exit(0)
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
import os, sys
|
import os
|
||||||
|
|
||||||
|
|
||||||
# Create a function get name of a js script, then load from the CURRENT folder of this script and return its content as string, make sure its error free
|
# Create a function get name of a js script, then load from the CURRENT folder of this script and return its content as string, make sure its error free
|
||||||
def load_js_script(script_name):
|
def load_js_script(script_name):
|
||||||
# Get the path of the current script
|
# Get the path of the current script
|
||||||
current_script_path = os.path.dirname(os.path.realpath(__file__))
|
current_script_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
# Get the path of the script to load
|
# Get the path of the script to load
|
||||||
script_path = os.path.join(current_script_path, script_name + '.js')
|
script_path = os.path.join(current_script_path, script_name + ".js")
|
||||||
# Check if the script exists
|
# Check if the script exists
|
||||||
if not os.path.exists(script_path):
|
if not os.path.exists(script_path):
|
||||||
raise ValueError(f"Script {script_name} not found in the folder {current_script_path}")
|
raise ValueError(
|
||||||
|
f"Script {script_name} not found in the folder {current_script_path}"
|
||||||
|
)
|
||||||
# Load the content of the script
|
# Load the content of the script
|
||||||
with open(script_path, 'r') as f:
|
with open(script_path, "r") as f:
|
||||||
script_content = f.read()
|
script_content = f.read()
|
||||||
return script_content
|
return script_content
|
||||||
|
|||||||
0
crawl4ai/legacy/__init__.py
Normal file
0
crawl4ai/legacy/__init__.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
import click
|
||||||
|
import sys
|
||||||
|
import asyncio
|
||||||
|
from typing import List
|
||||||
|
from .docs_manager import DocsManager
|
||||||
|
from .async_logger import AsyncLogger
|
||||||
|
|
||||||
|
logger = AsyncLogger(verbose=True)
|
||||||
|
docs_manager = DocsManager(logger)
|
||||||
|
|
||||||
|
|
||||||
|
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
|
||||||
|
"""Print formatted table with headers and rows"""
|
||||||
|
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
|
||||||
|
border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+"
|
||||||
|
|
||||||
|
def format_row(row):
|
||||||
|
return (
|
||||||
|
"|"
|
||||||
|
+ "|".join(
|
||||||
|
f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
|
||||||
|
for cell, w in zip(row, widths)
|
||||||
|
)
|
||||||
|
+ "|"
|
||||||
|
)
|
||||||
|
|
||||||
|
click.echo(border)
|
||||||
|
click.echo(format_row(headers))
|
||||||
|
click.echo(border)
|
||||||
|
for row in rows:
|
||||||
|
click.echo(format_row(row))
|
||||||
|
click.echo(border)
|
||||||
|
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
def cli():
|
||||||
|
"""Crawl4AI Command Line Interface"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@cli.group()
|
||||||
|
def docs():
|
||||||
|
"""Documentation operations"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@docs.command()
|
||||||
|
@click.argument("sections", nargs=-1)
|
||||||
|
@click.option(
|
||||||
|
"--mode", type=click.Choice(["extended", "condensed"]), default="extended"
|
||||||
|
)
|
||||||
|
def combine(sections: tuple, mode: str):
|
||||||
|
"""Combine documentation sections"""
|
||||||
|
try:
|
||||||
|
asyncio.run(docs_manager.ensure_docs_exist())
|
||||||
|
click.echo(docs_manager.generate(sections, mode))
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(str(e), tag="ERROR")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@docs.command()
|
||||||
|
@click.argument("query")
|
||||||
|
@click.option("--top-k", "-k", default=5)
|
||||||
|
@click.option("--build-index", is_flag=True, help="Build index if missing")
|
||||||
|
def search(query: str, top_k: int, build_index: bool):
|
||||||
|
"""Search documentation"""
|
||||||
|
try:
|
||||||
|
result = docs_manager.search(query, top_k)
|
||||||
|
if result == "No search index available. Call build_search_index() first.":
|
||||||
|
if build_index or click.confirm("No search index found. Build it now?"):
|
||||||
|
asyncio.run(docs_manager.llm_text.generate_index_files())
|
||||||
|
result = docs_manager.search(query, top_k)
|
||||||
|
click.echo(result)
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Error: {str(e)}", err=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@docs.command()
|
||||||
|
def update():
|
||||||
|
"""Update docs from GitHub"""
|
||||||
|
try:
|
||||||
|
asyncio.run(docs_manager.fetch_docs())
|
||||||
|
click.echo("Documentation updated successfully")
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Error: {str(e)}", err=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@docs.command()
|
||||||
|
@click.option("--force-facts", is_flag=True, help="Force regenerate fact files")
|
||||||
|
@click.option("--clear-cache", is_flag=True, help="Clear BM25 cache")
|
||||||
|
def index(force_facts: bool, clear_cache: bool):
|
||||||
|
"""Build or rebuild search indexes"""
|
||||||
|
try:
|
||||||
|
asyncio.run(docs_manager.ensure_docs_exist())
|
||||||
|
asyncio.run(
|
||||||
|
docs_manager.llm_text.generate_index_files(
|
||||||
|
force_generate_facts=force_facts, clear_bm25_cache=clear_cache
|
||||||
|
)
|
||||||
|
)
|
||||||
|
click.echo("Search indexes built successfully")
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Error: {str(e)}", err=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
# Add docs list command
|
||||||
|
@docs.command()
|
||||||
|
def list():
|
||||||
|
"""List available documentation sections"""
|
||||||
|
try:
|
||||||
|
sections = docs_manager.list()
|
||||||
|
print_table(["Sections"], [[section] for section in sections])
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Error: {str(e)}", err=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
cli()
|
||||||
@@ -15,32 +15,30 @@ import logging, time
|
|||||||
import base64
|
import base64
|
||||||
from PIL import Image, ImageDraw, ImageFont
|
from PIL import Image, ImageDraw, ImageFont
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from typing import List, Callable
|
from typing import Callable
|
||||||
import requests
|
import requests
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from .utils import *
|
from .utils import *
|
||||||
|
|
||||||
logger = logging.getLogger('selenium.webdriver.remote.remote_connection')
|
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
|
||||||
logger.setLevel(logging.WARNING)
|
logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
logger_driver = logging.getLogger('selenium.webdriver.common.service')
|
logger_driver = logging.getLogger("selenium.webdriver.common.service")
|
||||||
logger_driver.setLevel(logging.WARNING)
|
logger_driver.setLevel(logging.WARNING)
|
||||||
|
|
||||||
urllib3_logger = logging.getLogger('urllib3.connectionpool')
|
urllib3_logger = logging.getLogger("urllib3.connectionpool")
|
||||||
urllib3_logger.setLevel(logging.WARNING)
|
urllib3_logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
# Disable http.client logging
|
# Disable http.client logging
|
||||||
http_client_logger = logging.getLogger('http.client')
|
http_client_logger = logging.getLogger("http.client")
|
||||||
http_client_logger.setLevel(logging.WARNING)
|
http_client_logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
# Disable driver_finder and service logging
|
# Disable driver_finder and service logging
|
||||||
driver_finder_logger = logging.getLogger('selenium.webdriver.common.driver_finder')
|
driver_finder_logger = logging.getLogger("selenium.webdriver.common.driver_finder")
|
||||||
driver_finder_logger.setLevel(logging.WARNING)
|
driver_finder_logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class CrawlerStrategy(ABC):
|
class CrawlerStrategy(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def crawl(self, url: str, **kwargs) -> str:
|
def crawl(self, url: str, **kwargs) -> str:
|
||||||
@@ -58,8 +56,9 @@ class CrawlerStrategy(ABC):
|
|||||||
def set_hook(self, hook_type: str, hook: Callable):
|
def set_hook(self, hook_type: str, hook: Callable):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class CloudCrawlerStrategy(CrawlerStrategy):
|
class CloudCrawlerStrategy(CrawlerStrategy):
|
||||||
def __init__(self, use_cached_html = False):
|
def __init__(self, use_cached_html=False):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.use_cached_html = use_cached_html
|
self.use_cached_html = use_cached_html
|
||||||
|
|
||||||
@@ -76,6 +75,7 @@ class CloudCrawlerStrategy(CrawlerStrategy):
|
|||||||
html = response["results"][0]["html"]
|
html = response["results"][0]["html"]
|
||||||
return sanitize_input_encode(html)
|
return sanitize_input_encode(html)
|
||||||
|
|
||||||
|
|
||||||
class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||||
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
|
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@@ -87,9 +87,14 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
if kwargs.get("user_agent"):
|
if kwargs.get("user_agent"):
|
||||||
self.options.add_argument("--user-agent=" + kwargs.get("user_agent"))
|
self.options.add_argument("--user-agent=" + kwargs.get("user_agent"))
|
||||||
else:
|
else:
|
||||||
user_agent = kwargs.get("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
user_agent = kwargs.get(
|
||||||
|
"user_agent",
|
||||||
|
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||||
|
)
|
||||||
self.options.add_argument(f"--user-agent={user_agent}")
|
self.options.add_argument(f"--user-agent={user_agent}")
|
||||||
self.options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
self.options.add_argument(
|
||||||
|
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
||||||
|
)
|
||||||
|
|
||||||
self.options.headless = kwargs.get("headless", True)
|
self.options.headless = kwargs.get("headless", True)
|
||||||
if self.options.headless:
|
if self.options.headless:
|
||||||
@@ -123,11 +128,11 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
|
|
||||||
# Hooks
|
# Hooks
|
||||||
self.hooks = {
|
self.hooks = {
|
||||||
'on_driver_created': None,
|
"on_driver_created": None,
|
||||||
'on_user_agent_updated': None,
|
"on_user_agent_updated": None,
|
||||||
'before_get_url': None,
|
"before_get_url": None,
|
||||||
'after_get_url': None,
|
"after_get_url": None,
|
||||||
'before_return_html': None
|
"before_return_html": None,
|
||||||
}
|
}
|
||||||
|
|
||||||
# chromedriver_autoinstaller.install()
|
# chromedriver_autoinstaller.install()
|
||||||
@@ -138,7 +143,6 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
# chromedriver_path = chromedriver_autoinstaller.utils.download_chromedriver()
|
# chromedriver_path = chromedriver_autoinstaller.utils.download_chromedriver()
|
||||||
# self.service = Service(chromedriver_autoinstaller.install())
|
# self.service = Service(chromedriver_autoinstaller.install())
|
||||||
|
|
||||||
|
|
||||||
# chromedriver_path = ChromeDriverManager().install()
|
# chromedriver_path = ChromeDriverManager().install()
|
||||||
# self.service = Service(chromedriver_path)
|
# self.service = Service(chromedriver_path)
|
||||||
# self.service.log_path = "NUL"
|
# self.service.log_path = "NUL"
|
||||||
@@ -148,14 +152,12 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
self.service = Service()
|
self.service = Service()
|
||||||
self.driver = webdriver.Chrome(options=self.options)
|
self.driver = webdriver.Chrome(options=self.options)
|
||||||
|
|
||||||
self.driver = self.execute_hook('on_driver_created', self.driver)
|
self.driver = self.execute_hook("on_driver_created", self.driver)
|
||||||
|
|
||||||
if kwargs.get("cookies"):
|
if kwargs.get("cookies"):
|
||||||
for cookie in kwargs.get("cookies"):
|
for cookie in kwargs.get("cookies"):
|
||||||
self.driver.add_cookie(cookie)
|
self.driver.add_cookie(cookie)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def set_hook(self, hook_type: str, hook: Callable):
|
def set_hook(self, hook_type: str, hook: Callable):
|
||||||
if hook_type in self.hooks:
|
if hook_type in self.hooks:
|
||||||
self.hooks[hook_type] = hook
|
self.hooks[hook_type] = hook
|
||||||
@@ -170,7 +172,9 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
if isinstance(result, webdriver.Chrome):
|
if isinstance(result, webdriver.Chrome):
|
||||||
return result
|
return result
|
||||||
else:
|
else:
|
||||||
raise TypeError(f"Hook {hook_type} must return an instance of webdriver.Chrome or None.")
|
raise TypeError(
|
||||||
|
f"Hook {hook_type} must return an instance of webdriver.Chrome or None."
|
||||||
|
)
|
||||||
# If the hook returns None or there is no hook, return self.driver
|
# If the hook returns None or there is no hook, return self.driver
|
||||||
return self.driver
|
return self.driver
|
||||||
|
|
||||||
@@ -178,15 +182,15 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
self.options.add_argument(f"user-agent={user_agent}")
|
self.options.add_argument(f"user-agent={user_agent}")
|
||||||
self.driver.quit()
|
self.driver.quit()
|
||||||
self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||||
self.driver = self.execute_hook('on_user_agent_updated', self.driver)
|
self.driver = self.execute_hook("on_user_agent_updated", self.driver)
|
||||||
|
|
||||||
def set_custom_headers(self, headers: dict):
|
def set_custom_headers(self, headers: dict):
|
||||||
# Enable Network domain for sending headers
|
# Enable Network domain for sending headers
|
||||||
self.driver.execute_cdp_cmd('Network.enable', {})
|
self.driver.execute_cdp_cmd("Network.enable", {})
|
||||||
# Set extra HTTP headers
|
# Set extra HTTP headers
|
||||||
self.driver.execute_cdp_cmd('Network.setExtraHTTPHeaders', {'headers': headers})
|
self.driver.execute_cdp_cmd("Network.setExtraHTTPHeaders", {"headers": headers})
|
||||||
|
|
||||||
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
|
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
|
||||||
initial_length = len(self.driver.page_source)
|
initial_length = len(self.driver.page_source)
|
||||||
|
|
||||||
for ix in range(max_checks):
|
for ix in range(max_checks):
|
||||||
@@ -202,36 +206,53 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
def crawl(self, url: str, **kwargs) -> str:
|
def crawl(self, url: str, **kwargs) -> str:
|
||||||
# Create md5 hash of the URL
|
# Create md5 hash of the URL
|
||||||
import hashlib
|
import hashlib
|
||||||
|
|
||||||
url_hash = hashlib.md5(url.encode()).hexdigest()
|
url_hash = hashlib.md5(url.encode()).hexdigest()
|
||||||
|
|
||||||
if self.use_cached_html:
|
if self.use_cached_html:
|
||||||
cache_file_path = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai", "cache", url_hash)
|
cache_file_path = os.path.join(
|
||||||
|
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||||
|
".crawl4ai",
|
||||||
|
"cache",
|
||||||
|
url_hash,
|
||||||
|
)
|
||||||
if os.path.exists(cache_file_path):
|
if os.path.exists(cache_file_path):
|
||||||
with open(cache_file_path, "r") as f:
|
with open(cache_file_path, "r") as f:
|
||||||
return sanitize_input_encode(f.read())
|
return sanitize_input_encode(f.read())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.driver = self.execute_hook('before_get_url', self.driver)
|
self.driver = self.execute_hook("before_get_url", self.driver)
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(f"[LOG] 🕸️ Crawling {url} using LocalSeleniumCrawlerStrategy...")
|
print(f"[LOG] 🕸️ Crawling {url} using LocalSeleniumCrawlerStrategy...")
|
||||||
self.driver.get(url) #<html><head></head><body></body></html>
|
self.driver.get(url) # <html><head></head><body></body></html>
|
||||||
|
|
||||||
WebDriverWait(self.driver, 20).until(
|
WebDriverWait(self.driver, 20).until(
|
||||||
lambda d: d.execute_script('return document.readyState') == 'complete'
|
lambda d: d.execute_script("return document.readyState") == "complete"
|
||||||
)
|
)
|
||||||
WebDriverWait(self.driver, 10).until(
|
WebDriverWait(self.driver, 10).until(
|
||||||
EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
|
EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
|
||||||
)
|
)
|
||||||
|
|
||||||
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
|
self.driver.execute_script(
|
||||||
|
"window.scrollTo(0, document.body.scrollHeight);"
|
||||||
|
)
|
||||||
|
|
||||||
self.driver = self.execute_hook('after_get_url', self.driver)
|
self.driver = self.execute_hook("after_get_url", self.driver)
|
||||||
html = sanitize_input_encode(self._ensure_page_load()) # self.driver.page_source
|
html = sanitize_input_encode(
|
||||||
can_not_be_done_headless = False # Look at my creativity for naming variables
|
self._ensure_page_load()
|
||||||
|
) # self.driver.page_source
|
||||||
|
can_not_be_done_headless = (
|
||||||
|
False # Look at my creativity for naming variables
|
||||||
|
)
|
||||||
|
|
||||||
# TODO: Very ugly approach, but promise to change it!
|
# TODO: Very ugly approach, but promise to change it!
|
||||||
if kwargs.get('bypass_headless', False) or html == "<html><head></head><body></body></html>":
|
if (
|
||||||
print("[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode...")
|
kwargs.get("bypass_headless", False)
|
||||||
|
or html == "<html><head></head><body></body></html>"
|
||||||
|
):
|
||||||
|
print(
|
||||||
|
"[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode..."
|
||||||
|
)
|
||||||
can_not_be_done_headless = True
|
can_not_be_done_headless = True
|
||||||
options = Options()
|
options = Options()
|
||||||
options.headless = False
|
options.headless = False
|
||||||
@@ -239,7 +260,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
options.add_argument("--window-size=5,5")
|
options.add_argument("--window-size=5,5")
|
||||||
driver = webdriver.Chrome(service=self.service, options=options)
|
driver = webdriver.Chrome(service=self.service, options=options)
|
||||||
driver.get(url)
|
driver.get(url)
|
||||||
self.driver = self.execute_hook('after_get_url', driver)
|
self.driver = self.execute_hook("after_get_url", driver)
|
||||||
html = sanitize_input_encode(driver.page_source)
|
html = sanitize_input_encode(driver.page_source)
|
||||||
driver.quit()
|
driver.quit()
|
||||||
|
|
||||||
@@ -249,17 +270,21 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
self.driver.execute_script(self.js_code)
|
self.driver.execute_script(self.js_code)
|
||||||
# Optionally, wait for some condition after executing the JS code
|
# Optionally, wait for some condition after executing the JS code
|
||||||
WebDriverWait(self.driver, 10).until(
|
WebDriverWait(self.driver, 10).until(
|
||||||
lambda driver: driver.execute_script("return document.readyState") == "complete"
|
lambda driver: driver.execute_script("return document.readyState")
|
||||||
|
== "complete"
|
||||||
)
|
)
|
||||||
elif self.js_code and type(self.js_code) == list:
|
elif self.js_code and type(self.js_code) == list:
|
||||||
for js in self.js_code:
|
for js in self.js_code:
|
||||||
self.driver.execute_script(js)
|
self.driver.execute_script(js)
|
||||||
WebDriverWait(self.driver, 10).until(
|
WebDriverWait(self.driver, 10).until(
|
||||||
lambda driver: driver.execute_script("return document.readyState") == "complete"
|
lambda driver: driver.execute_script(
|
||||||
|
"return document.readyState"
|
||||||
|
)
|
||||||
|
== "complete"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Optionally, wait for some condition after executing the JS code : Contributed by (https://github.com/jonymusky)
|
# Optionally, wait for some condition after executing the JS code : Contributed by (https://github.com/jonymusky)
|
||||||
wait_for = kwargs.get('wait_for', False)
|
wait_for = kwargs.get("wait_for", False)
|
||||||
if wait_for:
|
if wait_for:
|
||||||
if callable(wait_for):
|
if callable(wait_for):
|
||||||
print("[LOG] 🔄 Waiting for condition...")
|
print("[LOG] 🔄 Waiting for condition...")
|
||||||
@@ -272,10 +297,15 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
|
|
||||||
if not can_not_be_done_headless:
|
if not can_not_be_done_headless:
|
||||||
html = sanitize_input_encode(self.driver.page_source)
|
html = sanitize_input_encode(self.driver.page_source)
|
||||||
self.driver = self.execute_hook('before_return_html', self.driver, html)
|
self.driver = self.execute_hook("before_return_html", self.driver, html)
|
||||||
|
|
||||||
# Store in cache
|
# Store in cache
|
||||||
cache_file_path = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai", "cache", url_hash)
|
cache_file_path = os.path.join(
|
||||||
|
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||||
|
".crawl4ai",
|
||||||
|
"cache",
|
||||||
|
url_hash,
|
||||||
|
)
|
||||||
with open(cache_file_path, "w", encoding="utf-8") as f:
|
with open(cache_file_path, "w", encoding="utf-8") as f:
|
||||||
f.write(html)
|
f.write(html)
|
||||||
|
|
||||||
@@ -284,16 +314,16 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
|
|
||||||
return html
|
return html
|
||||||
except InvalidArgumentException as e:
|
except InvalidArgumentException as e:
|
||||||
if not hasattr(e, 'msg'):
|
if not hasattr(e, "msg"):
|
||||||
e.msg = sanitize_input_encode(str(e))
|
e.msg = sanitize_input_encode(str(e))
|
||||||
raise InvalidArgumentException(f"Failed to crawl {url}: {e.msg}")
|
raise InvalidArgumentException(f"Failed to crawl {url}: {e.msg}")
|
||||||
except WebDriverException as e:
|
except WebDriverException as e:
|
||||||
# If e does nlt have msg attribute create it and set it to str(e)
|
# If e does nlt have msg attribute create it and set it to str(e)
|
||||||
if not hasattr(e, 'msg'):
|
if not hasattr(e, "msg"):
|
||||||
e.msg = sanitize_input_encode(str(e))
|
e.msg = sanitize_input_encode(str(e))
|
||||||
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
|
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if not hasattr(e, 'msg'):
|
if not hasattr(e, "msg"):
|
||||||
e.msg = sanitize_input_encode(str(e))
|
e.msg = sanitize_input_encode(str(e))
|
||||||
raise Exception(f"Failed to crawl {url}: {e.msg}")
|
raise Exception(f"Failed to crawl {url}: {e.msg}")
|
||||||
|
|
||||||
@@ -301,7 +331,9 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
try:
|
try:
|
||||||
# Get the dimensions of the page
|
# Get the dimensions of the page
|
||||||
total_width = self.driver.execute_script("return document.body.scrollWidth")
|
total_width = self.driver.execute_script("return document.body.scrollWidth")
|
||||||
total_height = self.driver.execute_script("return document.body.scrollHeight")
|
total_height = self.driver.execute_script(
|
||||||
|
"return document.body.scrollHeight"
|
||||||
|
)
|
||||||
|
|
||||||
# Set the window size to the dimensions of the page
|
# Set the window size to the dimensions of the page
|
||||||
self.driver.set_window_size(total_width, total_height)
|
self.driver.set_window_size(total_width, total_height)
|
||||||
@@ -313,23 +345,25 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
image = Image.open(BytesIO(screenshot))
|
image = Image.open(BytesIO(screenshot))
|
||||||
|
|
||||||
# Convert image to RGB mode (this will handle both RGB and RGBA images)
|
# Convert image to RGB mode (this will handle both RGB and RGBA images)
|
||||||
rgb_image = image.convert('RGB')
|
rgb_image = image.convert("RGB")
|
||||||
|
|
||||||
# Convert to JPEG and compress
|
# Convert to JPEG and compress
|
||||||
buffered = BytesIO()
|
buffered = BytesIO()
|
||||||
rgb_image.save(buffered, format="JPEG", quality=85)
|
rgb_image.save(buffered, format="JPEG", quality=85)
|
||||||
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||||
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(f"[LOG] 📸 Screenshot taken and converted to base64")
|
print("[LOG] 📸 Screenshot taken and converted to base64")
|
||||||
|
|
||||||
return img_base64
|
return img_base64
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_message = sanitize_input_encode(f"Failed to take screenshot: {str(e)}")
|
error_message = sanitize_input_encode(
|
||||||
|
f"Failed to take screenshot: {str(e)}"
|
||||||
|
)
|
||||||
print(error_message)
|
print(error_message)
|
||||||
|
|
||||||
# Generate an image with black background
|
# Generate an image with black background
|
||||||
img = Image.new('RGB', (800, 600), color='black')
|
img = Image.new("RGB", (800, 600), color="black")
|
||||||
draw = ImageDraw.Draw(img)
|
draw = ImageDraw.Draw(img)
|
||||||
|
|
||||||
# Load a font
|
# Load a font
|
||||||
@@ -352,7 +386,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
|||||||
# Convert to base64
|
# Convert to base64
|
||||||
buffered = BytesIO()
|
buffered = BytesIO()
|
||||||
img.save(buffered, format="JPEG")
|
img.save(buffered, format="JPEG")
|
||||||
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||||
|
|
||||||
return img_base64
|
return img_base64
|
||||||
|
|
||||||
@@ -7,11 +7,13 @@ DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".cra
|
|||||||
os.makedirs(DB_PATH, exist_ok=True)
|
os.makedirs(DB_PATH, exist_ok=True)
|
||||||
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
|
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
|
||||||
|
|
||||||
|
|
||||||
def init_db():
|
def init_db():
|
||||||
global DB_PATH
|
global DB_PATH
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute('''
|
cursor.execute(
|
||||||
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||||
url TEXT PRIMARY KEY,
|
url TEXT PRIMARY KEY,
|
||||||
html TEXT,
|
html TEXT,
|
||||||
@@ -24,31 +26,42 @@ def init_db():
|
|||||||
metadata TEXT DEFAULT "{}",
|
metadata TEXT DEFAULT "{}",
|
||||||
screenshot TEXT DEFAULT ""
|
screenshot TEXT DEFAULT ""
|
||||||
)
|
)
|
||||||
''')
|
"""
|
||||||
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
def alter_db_add_screenshot(new_column: str = "media"):
|
def alter_db_add_screenshot(new_column: str = "media"):
|
||||||
check_db_path()
|
check_db_path()
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
|
cursor.execute(
|
||||||
|
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||||
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error altering database to add screenshot column: {e}")
|
print(f"Error altering database to add screenshot column: {e}")
|
||||||
|
|
||||||
|
|
||||||
def check_db_path():
|
def check_db_path():
|
||||||
if not DB_PATH:
|
if not DB_PATH:
|
||||||
raise ValueError("Database path is not set or is empty.")
|
raise ValueError("Database path is not set or is empty.")
|
||||||
|
|
||||||
def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
|
||||||
|
def get_cached_url(
|
||||||
|
url: str,
|
||||||
|
) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
||||||
check_db_path()
|
check_db_path()
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute('SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?', (url,))
|
cursor.execute(
|
||||||
|
"SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?",
|
||||||
|
(url,),
|
||||||
|
)
|
||||||
result = cursor.fetchone()
|
result = cursor.fetchone()
|
||||||
conn.close()
|
conn.close()
|
||||||
return result
|
return result
|
||||||
@@ -56,12 +69,25 @@ def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, str, str
|
|||||||
print(f"Error retrieving cached URL: {e}")
|
print(f"Error retrieving cached URL: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool, media : str = "{}", links : str = "{}", metadata : str = "{}", screenshot: str = ""):
|
|
||||||
|
def cache_url(
|
||||||
|
url: str,
|
||||||
|
html: str,
|
||||||
|
cleaned_html: str,
|
||||||
|
markdown: str,
|
||||||
|
extracted_content: str,
|
||||||
|
success: bool,
|
||||||
|
media: str = "{}",
|
||||||
|
links: str = "{}",
|
||||||
|
metadata: str = "{}",
|
||||||
|
screenshot: str = "",
|
||||||
|
):
|
||||||
check_db_path()
|
check_db_path()
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute('''
|
cursor.execute(
|
||||||
|
"""
|
||||||
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot)
|
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
ON CONFLICT(url) DO UPDATE SET
|
ON CONFLICT(url) DO UPDATE SET
|
||||||
@@ -74,18 +100,32 @@ def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_c
|
|||||||
links = excluded.links,
|
links = excluded.links,
|
||||||
metadata = excluded.metadata,
|
metadata = excluded.metadata,
|
||||||
screenshot = excluded.screenshot
|
screenshot = excluded.screenshot
|
||||||
''', (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot))
|
""",
|
||||||
|
(
|
||||||
|
url,
|
||||||
|
html,
|
||||||
|
cleaned_html,
|
||||||
|
markdown,
|
||||||
|
extracted_content,
|
||||||
|
success,
|
||||||
|
media,
|
||||||
|
links,
|
||||||
|
metadata,
|
||||||
|
screenshot,
|
||||||
|
),
|
||||||
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error caching URL: {e}")
|
print(f"Error caching URL: {e}")
|
||||||
|
|
||||||
|
|
||||||
def get_total_count() -> int:
|
def get_total_count() -> int:
|
||||||
check_db_path()
|
check_db_path()
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute('SELECT COUNT(*) FROM crawled_data')
|
cursor.execute("SELECT COUNT(*) FROM crawled_data")
|
||||||
result = cursor.fetchone()
|
result = cursor.fetchone()
|
||||||
conn.close()
|
conn.close()
|
||||||
return result[0]
|
return result[0]
|
||||||
@@ -93,43 +133,48 @@ def get_total_count() -> int:
|
|||||||
print(f"Error getting total count: {e}")
|
print(f"Error getting total count: {e}")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def clear_db():
|
def clear_db():
|
||||||
check_db_path()
|
check_db_path()
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute('DELETE FROM crawled_data')
|
cursor.execute("DELETE FROM crawled_data")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error clearing database: {e}")
|
print(f"Error clearing database: {e}")
|
||||||
|
|
||||||
|
|
||||||
def flush_db():
|
def flush_db():
|
||||||
check_db_path()
|
check_db_path()
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute('DROP TABLE crawled_data')
|
cursor.execute("DROP TABLE crawled_data")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error flushing database: {e}")
|
print(f"Error flushing database: {e}")
|
||||||
|
|
||||||
|
|
||||||
def update_existing_records(new_column: str = "media", default_value: str = "{}"):
|
def update_existing_records(new_column: str = "media", default_value: str = "{}"):
|
||||||
check_db_path()
|
check_db_path()
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(DB_PATH)
|
conn = sqlite3.connect(DB_PATH)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute(f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL')
|
cursor.execute(
|
||||||
|
f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL'
|
||||||
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error updating existing records: {e}")
|
print(f"Error updating existing records: {e}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Delete the existing database file
|
# Delete the existing database file
|
||||||
if os.path.exists(DB_PATH):
|
if os.path.exists(DB_PATH):
|
||||||
os.remove(DB_PATH)
|
os.remove(DB_PATH)
|
||||||
init_db()
|
init_db()
|
||||||
# alter_db_add_screenshot("COL_NAME")
|
# alter_db_add_screenshot("COL_NAME")
|
||||||
|
|
||||||
@@ -4,6 +4,7 @@ from pathlib import Path
|
|||||||
from crawl4ai.async_logger import AsyncLogger
|
from crawl4ai.async_logger import AsyncLogger
|
||||||
from crawl4ai.llmtxt import AsyncLLMTextManager
|
from crawl4ai.llmtxt import AsyncLLMTextManager
|
||||||
|
|
||||||
|
|
||||||
class DocsManager:
|
class DocsManager:
|
||||||
def __init__(self, logger=None):
|
def __init__(self, logger=None):
|
||||||
self.docs_dir = Path.home() / ".crawl4ai" / "docs"
|
self.docs_dir = Path.home() / ".crawl4ai" / "docs"
|
||||||
@@ -21,7 +22,10 @@ class DocsManager:
|
|||||||
"""Copy from local docs or download from GitHub"""
|
"""Copy from local docs or download from GitHub"""
|
||||||
try:
|
try:
|
||||||
# Try local first
|
# Try local first
|
||||||
if self.local_docs.exists() and (any(self.local_docs.glob("*.md")) or any(self.local_docs.glob("*.tokens"))):
|
if self.local_docs.exists() and (
|
||||||
|
any(self.local_docs.glob("*.md"))
|
||||||
|
or any(self.local_docs.glob("*.tokens"))
|
||||||
|
):
|
||||||
# Empty the local docs directory
|
# Empty the local docs directory
|
||||||
for file_path in self.docs_dir.glob("*.md"):
|
for file_path in self.docs_dir.glob("*.md"):
|
||||||
file_path.unlink()
|
file_path.unlink()
|
||||||
@@ -36,14 +40,14 @@ class DocsManager:
|
|||||||
# Fallback to GitHub
|
# Fallback to GitHub
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
"https://api.github.com/repos/unclecode/crawl4ai/contents/docs/llm.txt",
|
"https://api.github.com/repos/unclecode/crawl4ai/contents/docs/llm.txt",
|
||||||
headers={'Accept': 'application/vnd.github.v3+json'}
|
headers={"Accept": "application/vnd.github.v3+json"},
|
||||||
)
|
)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
for item in response.json():
|
for item in response.json():
|
||||||
if item['type'] == 'file' and item['name'].endswith('.md'):
|
if item["type"] == "file" and item["name"].endswith(".md"):
|
||||||
content = requests.get(item['download_url']).text
|
content = requests.get(item["download_url"]).text
|
||||||
with open(self.docs_dir / item['name'], 'w', encoding='utf-8') as f:
|
with open(self.docs_dir / item["name"], "w", encoding="utf-8") as f:
|
||||||
f.write(content)
|
f.write(content)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -57,7 +61,11 @@ class DocsManager:
|
|||||||
# Remove [0-9]+_ prefix
|
# Remove [0-9]+_ prefix
|
||||||
names = [name.split("_", 1)[1] if name[0].isdigit() else name for name in names]
|
names = [name.split("_", 1)[1] if name[0].isdigit() else name for name in names]
|
||||||
# Exclude those end with .xs.md and .q.md
|
# Exclude those end with .xs.md and .q.md
|
||||||
names = [name for name in names if not name.endswith(".xs") and not name.endswith(".q")]
|
names = [
|
||||||
|
name
|
||||||
|
for name in names
|
||||||
|
if not name.endswith(".xs") and not name.endswith(".q")
|
||||||
|
]
|
||||||
return names
|
return names
|
||||||
|
|
||||||
def generate(self, sections, mode="extended"):
|
def generate(self, sections, mode="extended"):
|
||||||
@@ -11,16 +11,16 @@ from rank_bm25 import BM25Okapi
|
|||||||
from nltk.tokenize import word_tokenize
|
from nltk.tokenize import word_tokenize
|
||||||
from nltk.corpus import stopwords
|
from nltk.corpus import stopwords
|
||||||
from nltk.stem import WordNetLemmatizer
|
from nltk.stem import WordNetLemmatizer
|
||||||
from litellm import completion, batch_completion
|
from litellm import batch_completion
|
||||||
from .async_logger import AsyncLogger
|
from .async_logger import AsyncLogger
|
||||||
import litellm
|
import litellm
|
||||||
import pickle
|
import pickle
|
||||||
import hashlib # <--- ADDED for file-hash
|
import hashlib # <--- ADDED for file-hash
|
||||||
from fnmatch import fnmatch
|
|
||||||
import glob
|
import glob
|
||||||
|
|
||||||
litellm.set_verbose = False
|
litellm.set_verbose = False
|
||||||
|
|
||||||
|
|
||||||
def _compute_file_hash(file_path: Path) -> str:
|
def _compute_file_hash(file_path: Path) -> str:
|
||||||
"""Compute MD5 hash for the file's entire content."""
|
"""Compute MD5 hash for the file's entire content."""
|
||||||
hash_md5 = hashlib.md5()
|
hash_md5 = hashlib.md5()
|
||||||
@@ -29,13 +29,14 @@ def _compute_file_hash(file_path: Path) -> str:
|
|||||||
hash_md5.update(chunk)
|
hash_md5.update(chunk)
|
||||||
return hash_md5.hexdigest()
|
return hash_md5.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
class AsyncLLMTextManager:
|
class AsyncLLMTextManager:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
docs_dir: Path,
|
docs_dir: Path,
|
||||||
logger: Optional[AsyncLogger] = None,
|
logger: Optional[AsyncLogger] = None,
|
||||||
max_concurrent_calls: int = 5,
|
max_concurrent_calls: int = 5,
|
||||||
batch_size: int = 3
|
batch_size: int = 3,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.docs_dir = docs_dir
|
self.docs_dir = docs_dir
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
@@ -51,7 +52,7 @@ class AsyncLLMTextManager:
|
|||||||
contents = []
|
contents = []
|
||||||
for file_path in doc_batch:
|
for file_path in doc_batch:
|
||||||
try:
|
try:
|
||||||
with open(file_path, 'r', encoding='utf-8') as f:
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
contents.append(f.read())
|
contents.append(f.read())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error reading {file_path}: {str(e)}")
|
self.logger.error(f"Error reading {file_path}: {str(e)}")
|
||||||
@@ -77,43 +78,53 @@ Wrap your response in <index>...</index> tags.
|
|||||||
# Prepare messages for batch processing
|
# Prepare messages for batch processing
|
||||||
messages_list = [
|
messages_list = [
|
||||||
[
|
[
|
||||||
{"role": "user", "content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}"}
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}",
|
||||||
|
}
|
||||||
]
|
]
|
||||||
for content in contents if content
|
for content in contents
|
||||||
|
if content
|
||||||
]
|
]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
responses = batch_completion(
|
responses = batch_completion(
|
||||||
model="anthropic/claude-3-5-sonnet-latest",
|
model="anthropic/claude-3-5-sonnet-latest",
|
||||||
messages=messages_list,
|
messages=messages_list,
|
||||||
logger_fn=None
|
logger_fn=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process responses and save index files
|
# Process responses and save index files
|
||||||
for response, file_path in zip(responses, doc_batch):
|
for response, file_path in zip(responses, doc_batch):
|
||||||
try:
|
try:
|
||||||
index_content_match = re.search(
|
index_content_match = re.search(
|
||||||
r'<index>(.*?)</index>',
|
r"<index>(.*?)</index>",
|
||||||
response.choices[0].message.content,
|
response.choices[0].message.content,
|
||||||
re.DOTALL
|
re.DOTALL,
|
||||||
)
|
)
|
||||||
if not index_content_match:
|
if not index_content_match:
|
||||||
self.logger.warning(f"No <index>...</index> content found for {file_path}")
|
self.logger.warning(
|
||||||
|
f"No <index>...</index> content found for {file_path}"
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
index_content = re.sub(
|
index_content = re.sub(
|
||||||
r"\n\s*\n", "\n", index_content_match.group(1)
|
r"\n\s*\n", "\n", index_content_match.group(1)
|
||||||
).strip()
|
).strip()
|
||||||
if index_content:
|
if index_content:
|
||||||
index_file = file_path.with_suffix('.q.md')
|
index_file = file_path.with_suffix(".q.md")
|
||||||
with open(index_file, 'w', encoding='utf-8') as f:
|
with open(index_file, "w", encoding="utf-8") as f:
|
||||||
f.write(index_content)
|
f.write(index_content)
|
||||||
self.logger.info(f"Created index file: {index_file}")
|
self.logger.info(f"Created index file: {index_file}")
|
||||||
else:
|
else:
|
||||||
self.logger.warning(f"No index content found in response for {file_path}")
|
self.logger.warning(
|
||||||
|
f"No index content found in response for {file_path}"
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error processing response for {file_path}: {str(e)}")
|
self.logger.error(
|
||||||
|
f"Error processing response for {file_path}: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error in batch completion: {str(e)}")
|
self.logger.error(f"Error in batch completion: {str(e)}")
|
||||||
@@ -171,7 +182,12 @@ Wrap your response in <index>...</index> tags.
|
|||||||
|
|
||||||
lemmatizer = WordNetLemmatizer()
|
lemmatizer = WordNetLemmatizer()
|
||||||
stop_words = set(stopwords.words("english")) - {
|
stop_words = set(stopwords.words("english")) - {
|
||||||
"how", "what", "when", "where", "why", "which",
|
"how",
|
||||||
|
"what",
|
||||||
|
"when",
|
||||||
|
"where",
|
||||||
|
"why",
|
||||||
|
"which",
|
||||||
}
|
}
|
||||||
|
|
||||||
tokens = []
|
tokens = []
|
||||||
@@ -222,7 +238,9 @@ Wrap your response in <index>...</index> tags.
|
|||||||
self.logger.info("Checking which .q.md files need (re)indexing...")
|
self.logger.info("Checking which .q.md files need (re)indexing...")
|
||||||
|
|
||||||
# Gather all .q.md files
|
# Gather all .q.md files
|
||||||
q_files = [self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md")]
|
q_files = [
|
||||||
|
self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md")
|
||||||
|
]
|
||||||
|
|
||||||
# We'll store known (unchanged) facts in these lists
|
# We'll store known (unchanged) facts in these lists
|
||||||
existing_facts: List[str] = []
|
existing_facts: List[str] = []
|
||||||
@@ -243,7 +261,9 @@ Wrap your response in <index>...</index> tags.
|
|||||||
# Otherwise, load the existing cache and compare hash
|
# Otherwise, load the existing cache and compare hash
|
||||||
cache = self._load_or_create_token_cache(qf)
|
cache = self._load_or_create_token_cache(qf)
|
||||||
# If the .q.tokens was out of date (i.e. changed hash), we reindex
|
# If the .q.tokens was out of date (i.e. changed hash), we reindex
|
||||||
if len(cache["facts"]) == 0 or cache.get("content_hash") != _compute_file_hash(qf):
|
if len(cache["facts"]) == 0 or cache.get(
|
||||||
|
"content_hash"
|
||||||
|
) != _compute_file_hash(qf):
|
||||||
needSet.append(qf)
|
needSet.append(qf)
|
||||||
else:
|
else:
|
||||||
# File is unchanged → retrieve cached token data
|
# File is unchanged → retrieve cached token data
|
||||||
@@ -255,20 +275,29 @@ Wrap your response in <index>...</index> tags.
|
|||||||
if not needSet and not clear_cache:
|
if not needSet and not clear_cache:
|
||||||
# If no file needs reindexing, try loading existing index
|
# If no file needs reindexing, try loading existing index
|
||||||
if self.maybe_load_bm25_index(clear_cache=False):
|
if self.maybe_load_bm25_index(clear_cache=False):
|
||||||
self.logger.info("No new/changed .q.md files found. Using existing BM25 index.")
|
self.logger.info(
|
||||||
|
"No new/changed .q.md files found. Using existing BM25 index."
|
||||||
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# If there's no existing index, we must build a fresh index from the old caches
|
# If there's no existing index, we must build a fresh index from the old caches
|
||||||
self.logger.info("No existing BM25 index found. Building from cached facts.")
|
self.logger.info(
|
||||||
|
"No existing BM25 index found. Building from cached facts."
|
||||||
|
)
|
||||||
if existing_facts:
|
if existing_facts:
|
||||||
self.logger.info(f"Building BM25 index with {len(existing_facts)} cached facts.")
|
self.logger.info(
|
||||||
|
f"Building BM25 index with {len(existing_facts)} cached facts."
|
||||||
|
)
|
||||||
self.bm25_index = BM25Okapi(existing_tokens)
|
self.bm25_index = BM25Okapi(existing_tokens)
|
||||||
self.tokenized_facts = existing_facts
|
self.tokenized_facts = existing_facts
|
||||||
with open(self.bm25_index_file, "wb") as f:
|
with open(self.bm25_index_file, "wb") as f:
|
||||||
pickle.dump({
|
pickle.dump(
|
||||||
"bm25_index": self.bm25_index,
|
{
|
||||||
"tokenized_facts": self.tokenized_facts
|
"bm25_index": self.bm25_index,
|
||||||
}, f)
|
"tokenized_facts": self.tokenized_facts,
|
||||||
|
},
|
||||||
|
f,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.logger.warning("No facts found at all. Index remains empty.")
|
self.logger.warning("No facts found at all. Index remains empty.")
|
||||||
return
|
return
|
||||||
@@ -311,7 +340,9 @@ Wrap your response in <index>...</index> tags.
|
|||||||
self._save_token_cache(file, fresh_cache)
|
self._save_token_cache(file, fresh_cache)
|
||||||
|
|
||||||
mem_usage = process.memory_info().rss / 1024 / 1024
|
mem_usage = process.memory_info().rss / 1024 / 1024
|
||||||
self.logger.debug(f"Memory usage after {file.name}: {mem_usage:.2f}MB")
|
self.logger.debug(
|
||||||
|
f"Memory usage after {file.name}: {mem_usage:.2f}MB"
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error processing {file}: {str(e)}")
|
self.logger.error(f"Error processing {file}: {str(e)}")
|
||||||
@@ -328,21 +359,28 @@ Wrap your response in <index>...</index> tags.
|
|||||||
all_tokens = existing_tokens + new_tokens
|
all_tokens = existing_tokens + new_tokens
|
||||||
|
|
||||||
# 3) Build BM25 index from combined facts
|
# 3) Build BM25 index from combined facts
|
||||||
self.logger.info(f"Building BM25 index with {len(all_facts)} total facts (old + new).")
|
self.logger.info(
|
||||||
|
f"Building BM25 index with {len(all_facts)} total facts (old + new)."
|
||||||
|
)
|
||||||
self.bm25_index = BM25Okapi(all_tokens)
|
self.bm25_index = BM25Okapi(all_tokens)
|
||||||
self.tokenized_facts = all_facts
|
self.tokenized_facts = all_facts
|
||||||
|
|
||||||
# 4) Save the updated BM25 index to disk
|
# 4) Save the updated BM25 index to disk
|
||||||
with open(self.bm25_index_file, "wb") as f:
|
with open(self.bm25_index_file, "wb") as f:
|
||||||
pickle.dump({
|
pickle.dump(
|
||||||
"bm25_index": self.bm25_index,
|
{
|
||||||
"tokenized_facts": self.tokenized_facts
|
"bm25_index": self.bm25_index,
|
||||||
}, f)
|
"tokenized_facts": self.tokenized_facts,
|
||||||
|
},
|
||||||
|
f,
|
||||||
|
)
|
||||||
|
|
||||||
final_mem = process.memory_info().rss / 1024 / 1024
|
final_mem = process.memory_info().rss / 1024 / 1024
|
||||||
self.logger.info(f"Search index updated. Final memory usage: {final_mem:.2f}MB")
|
self.logger.info(f"Search index updated. Final memory usage: {final_mem:.2f}MB")
|
||||||
|
|
||||||
async def generate_index_files(self, force_generate_facts: bool = False, clear_bm25_cache: bool = False) -> None:
|
async def generate_index_files(
|
||||||
|
self, force_generate_facts: bool = False, clear_bm25_cache: bool = False
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Generate index files for all documents in parallel batches
|
Generate index files for all documents in parallel batches
|
||||||
|
|
||||||
@@ -353,15 +391,17 @@ Wrap your response in <index>...</index> tags.
|
|||||||
self.logger.info("Starting index generation for documentation files.")
|
self.logger.info("Starting index generation for documentation files.")
|
||||||
|
|
||||||
md_files = [
|
md_files = [
|
||||||
self.docs_dir / f for f in os.listdir(self.docs_dir)
|
self.docs_dir / f
|
||||||
if f.endswith('.md') and not any(f.endswith(x) for x in ['.q.md', '.xs.md'])
|
for f in os.listdir(self.docs_dir)
|
||||||
|
if f.endswith(".md") and not any(f.endswith(x) for x in [".q.md", ".xs.md"])
|
||||||
]
|
]
|
||||||
|
|
||||||
# Filter out files that already have .q files unless force=True
|
# Filter out files that already have .q files unless force=True
|
||||||
if not force_generate_facts:
|
if not force_generate_facts:
|
||||||
md_files = [
|
md_files = [
|
||||||
f for f in md_files
|
f
|
||||||
if not (self.docs_dir / f.name.replace('.md', '.q.md')).exists()
|
for f in md_files
|
||||||
|
if not (self.docs_dir / f.name.replace(".md", ".q.md")).exists()
|
||||||
]
|
]
|
||||||
|
|
||||||
if not md_files:
|
if not md_files:
|
||||||
@@ -369,8 +409,10 @@ Wrap your response in <index>...</index> tags.
|
|||||||
else:
|
else:
|
||||||
# Process documents in batches
|
# Process documents in batches
|
||||||
for i in range(0, len(md_files), self.batch_size):
|
for i in range(0, len(md_files), self.batch_size):
|
||||||
batch = md_files[i:i + self.batch_size]
|
batch = md_files[i : i + self.batch_size]
|
||||||
self.logger.info(f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}")
|
self.logger.info(
|
||||||
|
f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}"
|
||||||
|
)
|
||||||
await self._process_document_batch(batch)
|
await self._process_document_batch(batch)
|
||||||
|
|
||||||
self.logger.info("Index generation complete, building/updating search index.")
|
self.logger.info("Index generation complete, building/updating search index.")
|
||||||
@@ -378,21 +420,31 @@ Wrap your response in <index>...</index> tags.
|
|||||||
|
|
||||||
def generate(self, sections: List[str], mode: str = "extended") -> str:
|
def generate(self, sections: List[str], mode: str = "extended") -> str:
|
||||||
# Get all markdown files
|
# Get all markdown files
|
||||||
all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + \
|
all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + glob.glob(
|
||||||
glob.glob(str(self.docs_dir / "[0-9]*.xs.md"))
|
str(self.docs_dir / "[0-9]*.xs.md")
|
||||||
|
)
|
||||||
|
|
||||||
# Extract base names without extensions
|
# Extract base names without extensions
|
||||||
base_docs = {Path(f).name.split('.')[0] for f in all_files
|
base_docs = {
|
||||||
if not Path(f).name.endswith('.q.md')}
|
Path(f).name.split(".")[0]
|
||||||
|
for f in all_files
|
||||||
|
if not Path(f).name.endswith(".q.md")
|
||||||
|
}
|
||||||
|
|
||||||
# Filter by sections if provided
|
# Filter by sections if provided
|
||||||
if sections:
|
if sections:
|
||||||
base_docs = {doc for doc in base_docs
|
base_docs = {
|
||||||
if any(section.lower() in doc.lower() for section in sections)}
|
doc
|
||||||
|
for doc in base_docs
|
||||||
|
if any(section.lower() in doc.lower() for section in sections)
|
||||||
|
}
|
||||||
|
|
||||||
# Get file paths based on mode
|
# Get file paths based on mode
|
||||||
files = []
|
files = []
|
||||||
for doc in sorted(base_docs, key=lambda x: int(x.split('_')[0]) if x.split('_')[0].isdigit() else 999999):
|
for doc in sorted(
|
||||||
|
base_docs,
|
||||||
|
key=lambda x: int(x.split("_")[0]) if x.split("_")[0].isdigit() else 999999,
|
||||||
|
):
|
||||||
if mode == "condensed":
|
if mode == "condensed":
|
||||||
xs_file = self.docs_dir / f"{doc}.xs.md"
|
xs_file = self.docs_dir / f"{doc}.xs.md"
|
||||||
regular_file = self.docs_dir / f"{doc}.md"
|
regular_file = self.docs_dir / f"{doc}.md"
|
||||||
@@ -404,7 +456,7 @@ Wrap your response in <index>...</index> tags.
|
|||||||
content = []
|
content = []
|
||||||
for file in files:
|
for file in files:
|
||||||
try:
|
try:
|
||||||
with open(file, 'r', encoding='utf-8') as f:
|
with open(file, "r", encoding="utf-8") as f:
|
||||||
fname = Path(file).name
|
fname = Path(file).name
|
||||||
content.append(f"{'#'*20}\n# {fname}\n{'#'*20}\n\n{f.read()}")
|
content.append(f"{'#'*20}\n# {fname}\n{'#'*20}\n\n{f.read()}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -443,15 +495,9 @@ Wrap your response in <index>...</index> tags.
|
|||||||
for file, _ in ranked_files:
|
for file, _ in ranked_files:
|
||||||
main_doc = str(file).replace(".q.md", ".md")
|
main_doc = str(file).replace(".q.md", ".md")
|
||||||
if os.path.exists(self.docs_dir / main_doc):
|
if os.path.exists(self.docs_dir / main_doc):
|
||||||
with open(self.docs_dir / main_doc, "r", encoding='utf-8') as f:
|
with open(self.docs_dir / main_doc, "r", encoding="utf-8") as f:
|
||||||
only_file_name = main_doc.split("/")[-1]
|
only_file_name = main_doc.split("/")[-1]
|
||||||
content = [
|
content = ["#" * 20, f"# {only_file_name}", "#" * 20, "", f.read()]
|
||||||
"#" * 20,
|
|
||||||
f"# {only_file_name}",
|
|
||||||
"#" * 20,
|
|
||||||
"",
|
|
||||||
f.read()
|
|
||||||
]
|
|
||||||
results.append("\n".join(content))
|
results.append("\n".join(content))
|
||||||
|
|
||||||
return "\n\n---\n\n".join(results)
|
return "\n\n---\n\n".join(results)
|
||||||
@@ -482,7 +528,9 @@ Wrap your response in <index>...</index> tags.
|
|||||||
if len(components) == 3:
|
if len(components) == 3:
|
||||||
code_ref = components[2].strip()
|
code_ref = components[2].strip()
|
||||||
code_tokens = self.preprocess_text(code_ref)
|
code_tokens = self.preprocess_text(code_ref)
|
||||||
code_match_score = len(set(query_tokens) & set(code_tokens)) / len(query_tokens)
|
code_match_score = len(set(query_tokens) & set(code_tokens)) / len(
|
||||||
|
query_tokens
|
||||||
|
)
|
||||||
|
|
||||||
file_data[file_path]["total_score"] += score
|
file_data[file_path]["total_score"] += score
|
||||||
file_data[file_path]["match_count"] += 1
|
file_data[file_path]["match_count"] += 1
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
# version_manager.py
|
# version_manager.py
|
||||||
import os
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from packaging import version
|
from packaging import version
|
||||||
from . import __version__
|
from . import __version__
|
||||||
|
|
||||||
|
|
||||||
class VersionManager:
|
class VersionManager:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.home_dir = Path.home() / ".crawl4ai"
|
self.home_dir = Path.home() / ".crawl4ai"
|
||||||
@@ -27,4 +27,3 @@ class VersionManager:
|
|||||||
installed = self.get_installed_version()
|
installed = self.get_installed_version()
|
||||||
current = version.parse(__version__.__version__)
|
current = version.parse(__version__.__version__)
|
||||||
return installed is None or installed < current
|
return installed is None or installed < current
|
||||||
|
|
||||||
294
crawl4ai/legacy/web_crawler.py
Normal file
294
crawl4ai/legacy/web_crawler.py
Normal file
@@ -0,0 +1,294 @@
|
|||||||
|
import os, time
|
||||||
|
|
||||||
|
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from .models import UrlModel, CrawlResult
|
||||||
|
from .database import init_db, get_cached_url, cache_url
|
||||||
|
from .utils import *
|
||||||
|
from .chunking_strategy import *
|
||||||
|
from .extraction_strategy import *
|
||||||
|
from .crawler_strategy import *
|
||||||
|
from typing import List
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from .content_scraping_strategy import WebScrapingStrategy
|
||||||
|
from .config import *
|
||||||
|
import warnings
|
||||||
|
import json
|
||||||
|
|
||||||
|
warnings.filterwarnings(
|
||||||
|
"ignore",
|
||||||
|
message='Field "model_name" has conflict with protected namespace "model_".',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WebCrawler:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
crawler_strategy: CrawlerStrategy = None,
|
||||||
|
always_by_pass_cache: bool = False,
|
||||||
|
verbose: bool = False,
|
||||||
|
):
|
||||||
|
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(
|
||||||
|
verbose=verbose
|
||||||
|
)
|
||||||
|
self.always_by_pass_cache = always_by_pass_cache
|
||||||
|
self.crawl4ai_folder = os.path.join(
|
||||||
|
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||||
|
)
|
||||||
|
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
||||||
|
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
||||||
|
init_db()
|
||||||
|
self.ready = False
|
||||||
|
|
||||||
|
def warmup(self):
|
||||||
|
print("[LOG] 🌤️ Warming up the WebCrawler")
|
||||||
|
self.run(
|
||||||
|
url="https://google.com/",
|
||||||
|
word_count_threshold=5,
|
||||||
|
extraction_strategy=NoExtractionStrategy(),
|
||||||
|
bypass_cache=False,
|
||||||
|
verbose=False,
|
||||||
|
)
|
||||||
|
self.ready = True
|
||||||
|
print("[LOG] 🌞 WebCrawler is ready to crawl")
|
||||||
|
|
||||||
|
def fetch_page(
|
||||||
|
self,
|
||||||
|
url_model: UrlModel,
|
||||||
|
provider: str = DEFAULT_PROVIDER,
|
||||||
|
api_token: str = None,
|
||||||
|
extract_blocks_flag: bool = True,
|
||||||
|
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||||
|
css_selector: str = None,
|
||||||
|
screenshot: bool = False,
|
||||||
|
use_cached_html: bool = False,
|
||||||
|
extraction_strategy: ExtractionStrategy = None,
|
||||||
|
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||||
|
**kwargs,
|
||||||
|
) -> CrawlResult:
|
||||||
|
return self.run(
|
||||||
|
url_model.url,
|
||||||
|
word_count_threshold,
|
||||||
|
extraction_strategy or NoExtractionStrategy(),
|
||||||
|
chunking_strategy,
|
||||||
|
bypass_cache=url_model.forced,
|
||||||
|
css_selector=css_selector,
|
||||||
|
screenshot=screenshot,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
pass
|
||||||
|
|
||||||
|
def fetch_pages(
|
||||||
|
self,
|
||||||
|
url_models: List[UrlModel],
|
||||||
|
provider: str = DEFAULT_PROVIDER,
|
||||||
|
api_token: str = None,
|
||||||
|
extract_blocks_flag: bool = True,
|
||||||
|
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||||
|
use_cached_html: bool = False,
|
||||||
|
css_selector: str = None,
|
||||||
|
screenshot: bool = False,
|
||||||
|
extraction_strategy: ExtractionStrategy = None,
|
||||||
|
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||||
|
**kwargs,
|
||||||
|
) -> List[CrawlResult]:
|
||||||
|
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||||
|
|
||||||
|
def fetch_page_wrapper(url_model, *args, **kwargs):
|
||||||
|
return self.fetch_page(url_model, *args, **kwargs)
|
||||||
|
|
||||||
|
with ThreadPoolExecutor() as executor:
|
||||||
|
results = list(
|
||||||
|
executor.map(
|
||||||
|
fetch_page_wrapper,
|
||||||
|
url_models,
|
||||||
|
[provider] * len(url_models),
|
||||||
|
[api_token] * len(url_models),
|
||||||
|
[extract_blocks_flag] * len(url_models),
|
||||||
|
[word_count_threshold] * len(url_models),
|
||||||
|
[css_selector] * len(url_models),
|
||||||
|
[screenshot] * len(url_models),
|
||||||
|
[use_cached_html] * len(url_models),
|
||||||
|
[extraction_strategy] * len(url_models),
|
||||||
|
[chunking_strategy] * len(url_models),
|
||||||
|
*[kwargs] * len(url_models),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def run(
|
||||||
|
self,
|
||||||
|
url: str,
|
||||||
|
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||||
|
extraction_strategy: ExtractionStrategy = None,
|
||||||
|
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||||
|
bypass_cache: bool = False,
|
||||||
|
css_selector: str = None,
|
||||||
|
screenshot: bool = False,
|
||||||
|
user_agent: str = None,
|
||||||
|
verbose=True,
|
||||||
|
**kwargs,
|
||||||
|
) -> CrawlResult:
|
||||||
|
try:
|
||||||
|
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||||
|
extraction_strategy.verbose = verbose
|
||||||
|
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||||
|
raise ValueError("Unsupported extraction strategy")
|
||||||
|
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||||
|
raise ValueError("Unsupported chunking strategy")
|
||||||
|
|
||||||
|
word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD)
|
||||||
|
|
||||||
|
cached = None
|
||||||
|
screenshot_data = None
|
||||||
|
extracted_content = None
|
||||||
|
if not bypass_cache and not self.always_by_pass_cache:
|
||||||
|
cached = get_cached_url(url)
|
||||||
|
|
||||||
|
if kwargs.get("warmup", True) and not self.ready:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if cached:
|
||||||
|
html = sanitize_input_encode(cached[1])
|
||||||
|
extracted_content = sanitize_input_encode(cached[4])
|
||||||
|
if screenshot:
|
||||||
|
screenshot_data = cached[9]
|
||||||
|
if not screenshot_data:
|
||||||
|
cached = None
|
||||||
|
|
||||||
|
if not cached or not html:
|
||||||
|
if user_agent:
|
||||||
|
self.crawler_strategy.update_user_agent(user_agent)
|
||||||
|
t1 = time.time()
|
||||||
|
html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs))
|
||||||
|
t2 = time.time()
|
||||||
|
if verbose:
|
||||||
|
print(
|
||||||
|
f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds"
|
||||||
|
)
|
||||||
|
if screenshot:
|
||||||
|
screenshot_data = self.crawler_strategy.take_screenshot()
|
||||||
|
|
||||||
|
crawl_result = self.process_html(
|
||||||
|
url,
|
||||||
|
html,
|
||||||
|
extracted_content,
|
||||||
|
word_count_threshold,
|
||||||
|
extraction_strategy,
|
||||||
|
chunking_strategy,
|
||||||
|
css_selector,
|
||||||
|
screenshot_data,
|
||||||
|
verbose,
|
||||||
|
bool(cached),
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
crawl_result.success = bool(html)
|
||||||
|
return crawl_result
|
||||||
|
except Exception as e:
|
||||||
|
if not hasattr(e, "msg"):
|
||||||
|
e.msg = str(e)
|
||||||
|
print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}")
|
||||||
|
return CrawlResult(url=url, html="", success=False, error_message=e.msg)
|
||||||
|
|
||||||
|
def process_html(
|
||||||
|
self,
|
||||||
|
url: str,
|
||||||
|
html: str,
|
||||||
|
extracted_content: str,
|
||||||
|
word_count_threshold: int,
|
||||||
|
extraction_strategy: ExtractionStrategy,
|
||||||
|
chunking_strategy: ChunkingStrategy,
|
||||||
|
css_selector: str,
|
||||||
|
screenshot: bool,
|
||||||
|
verbose: bool,
|
||||||
|
is_cached: bool,
|
||||||
|
**kwargs,
|
||||||
|
) -> CrawlResult:
|
||||||
|
t = time.time()
|
||||||
|
# Extract content from HTML
|
||||||
|
try:
|
||||||
|
t1 = time.time()
|
||||||
|
scrapping_strategy = WebScrapingStrategy()
|
||||||
|
extra_params = {
|
||||||
|
k: v
|
||||||
|
for k, v in kwargs.items()
|
||||||
|
if k not in ["only_text", "image_description_min_word_threshold"]
|
||||||
|
}
|
||||||
|
result = scrapping_strategy.scrap(
|
||||||
|
url,
|
||||||
|
html,
|
||||||
|
word_count_threshold=word_count_threshold,
|
||||||
|
css_selector=css_selector,
|
||||||
|
only_text=kwargs.get("only_text", False),
|
||||||
|
image_description_min_word_threshold=kwargs.get(
|
||||||
|
"image_description_min_word_threshold",
|
||||||
|
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||||
|
),
|
||||||
|
**extra_params,
|
||||||
|
)
|
||||||
|
|
||||||
|
# result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False))
|
||||||
|
if verbose:
|
||||||
|
print(
|
||||||
|
f"[LOG] 🚀 Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds"
|
||||||
|
)
|
||||||
|
|
||||||
|
if result is None:
|
||||||
|
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||||
|
except InvalidCSSSelectorError as e:
|
||||||
|
raise ValueError(str(e))
|
||||||
|
|
||||||
|
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||||
|
markdown = sanitize_input_encode(result.get("markdown", ""))
|
||||||
|
media = result.get("media", [])
|
||||||
|
links = result.get("links", [])
|
||||||
|
metadata = result.get("metadata", {})
|
||||||
|
|
||||||
|
if extracted_content is None:
|
||||||
|
if verbose:
|
||||||
|
print(
|
||||||
|
f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
sections = chunking_strategy.chunk(markdown)
|
||||||
|
extracted_content = extraction_strategy.run(url, sections)
|
||||||
|
extracted_content = json.dumps(
|
||||||
|
extracted_content, indent=4, default=str, ensure_ascii=False
|
||||||
|
)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(
|
||||||
|
f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t:.2f} seconds."
|
||||||
|
)
|
||||||
|
|
||||||
|
screenshot = None if not screenshot else screenshot
|
||||||
|
|
||||||
|
if not is_cached:
|
||||||
|
cache_url(
|
||||||
|
url,
|
||||||
|
html,
|
||||||
|
cleaned_html,
|
||||||
|
markdown,
|
||||||
|
extracted_content,
|
||||||
|
True,
|
||||||
|
json.dumps(media),
|
||||||
|
json.dumps(links),
|
||||||
|
json.dumps(metadata),
|
||||||
|
screenshot=screenshot,
|
||||||
|
)
|
||||||
|
|
||||||
|
return CrawlResult(
|
||||||
|
url=url,
|
||||||
|
html=html,
|
||||||
|
cleaned_html=format_html(cleaned_html),
|
||||||
|
markdown=markdown,
|
||||||
|
media=media,
|
||||||
|
links=links,
|
||||||
|
metadata=metadata,
|
||||||
|
screenshot=screenshot,
|
||||||
|
extracted_content=extracted_content,
|
||||||
|
success=True,
|
||||||
|
error_message="",
|
||||||
|
)
|
||||||
@@ -1,42 +1,55 @@
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from tabnanny import verbose
|
||||||
from typing import Optional, Dict, Any, Tuple
|
from typing import Optional, Dict, Any, Tuple
|
||||||
from .models import MarkdownGenerationResult
|
from .models import MarkdownGenerationResult
|
||||||
from .html2text import CustomHTML2Text
|
from .html2text import CustomHTML2Text
|
||||||
from .content_filter_strategy import RelevantContentFilter, BM25ContentFilter
|
from .content_filter_strategy import RelevantContentFilter
|
||||||
import re
|
import re
|
||||||
from urllib.parse import urljoin
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
# Pre-compile the regex pattern
|
# Pre-compile the regex pattern
|
||||||
LINK_PATTERN = re.compile(r'!?\[([^\]]+)\]\(([^)]+?)(?:\s+"([^"]*)")?\)')
|
LINK_PATTERN = re.compile(r'!?\[([^\]]+)\]\(([^)]+?)(?:\s+"([^"]*)")?\)')
|
||||||
|
|
||||||
|
|
||||||
def fast_urljoin(base: str, url: str) -> str:
|
def fast_urljoin(base: str, url: str) -> str:
|
||||||
"""Fast URL joining for common cases."""
|
"""Fast URL joining for common cases."""
|
||||||
if url.startswith(('http://', 'https://', 'mailto:', '//')):
|
if url.startswith(("http://", "https://", "mailto:", "//")):
|
||||||
return url
|
return url
|
||||||
if url.startswith('/'):
|
if url.startswith("/"):
|
||||||
# Handle absolute paths
|
# Handle absolute paths
|
||||||
if base.endswith('/'):
|
if base.endswith("/"):
|
||||||
return base[:-1] + url
|
return base[:-1] + url
|
||||||
return base + url
|
return base + url
|
||||||
return urljoin(base, url)
|
return urljoin(base, url)
|
||||||
|
|
||||||
|
|
||||||
class MarkdownGenerationStrategy(ABC):
|
class MarkdownGenerationStrategy(ABC):
|
||||||
"""Abstract base class for markdown generation strategies."""
|
"""Abstract base class for markdown generation strategies."""
|
||||||
def __init__(self, content_filter: Optional[RelevantContentFilter] = None, options: Optional[Dict[str, Any]] = None):
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
content_filter: Optional[RelevantContentFilter] = None,
|
||||||
|
options: Optional[Dict[str, Any]] = None,
|
||||||
|
verbose: bool = False,
|
||||||
|
):
|
||||||
self.content_filter = content_filter
|
self.content_filter = content_filter
|
||||||
self.options = options or {}
|
self.options = options or {}
|
||||||
|
self.verbose = verbose
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def generate_markdown(self,
|
def generate_markdown(
|
||||||
cleaned_html: str,
|
self,
|
||||||
base_url: str = "",
|
cleaned_html: str,
|
||||||
html2text_options: Optional[Dict[str, Any]] = None,
|
base_url: str = "",
|
||||||
content_filter: Optional[RelevantContentFilter] = None,
|
html2text_options: Optional[Dict[str, Any]] = None,
|
||||||
citations: bool = True,
|
content_filter: Optional[RelevantContentFilter] = None,
|
||||||
**kwargs) -> MarkdownGenerationResult:
|
citations: bool = True,
|
||||||
|
**kwargs,
|
||||||
|
) -> MarkdownGenerationResult:
|
||||||
"""Generate markdown from cleaned HTML."""
|
"""Generate markdown from cleaned HTML."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||||
"""
|
"""
|
||||||
Default implementation of markdown generation strategy.
|
Default implementation of markdown generation strategy.
|
||||||
@@ -54,10 +67,17 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
|||||||
Returns:
|
Returns:
|
||||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||||
"""
|
"""
|
||||||
def __init__(self, content_filter: Optional[RelevantContentFilter] = None, options: Optional[Dict[str, Any]] = None):
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
content_filter: Optional[RelevantContentFilter] = None,
|
||||||
|
options: Optional[Dict[str, Any]] = None,
|
||||||
|
):
|
||||||
super().__init__(content_filter, options)
|
super().__init__(content_filter, options)
|
||||||
|
|
||||||
def convert_links_to_citations(self, markdown: str, base_url: str = "") -> Tuple[str, str]:
|
def convert_links_to_citations(
|
||||||
|
self, markdown: str, base_url: str = ""
|
||||||
|
) -> Tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Convert links in markdown to citations.
|
Convert links in markdown to citations.
|
||||||
|
|
||||||
@@ -83,28 +103,34 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
|||||||
counter = 1
|
counter = 1
|
||||||
|
|
||||||
for match in LINK_PATTERN.finditer(markdown):
|
for match in LINK_PATTERN.finditer(markdown):
|
||||||
parts.append(markdown[last_end:match.start()])
|
parts.append(markdown[last_end : match.start()])
|
||||||
text, url, title = match.groups()
|
text, url, title = match.groups()
|
||||||
|
|
||||||
# Use cached URL if available, otherwise compute and cache
|
# Use cached URL if available, otherwise compute and cache
|
||||||
if base_url and not url.startswith(('http://', 'https://', 'mailto:')):
|
if base_url and not url.startswith(("http://", "https://", "mailto:")):
|
||||||
if url not in url_cache:
|
if url not in url_cache:
|
||||||
url_cache[url] = fast_urljoin(base_url, url)
|
url_cache[url] = fast_urljoin(base_url, url)
|
||||||
url = url_cache[url]
|
url = url_cache[url]
|
||||||
|
|
||||||
if url not in link_map:
|
if url not in link_map:
|
||||||
desc = []
|
desc = []
|
||||||
if title: desc.append(title)
|
if title:
|
||||||
if text and text != title: desc.append(text)
|
desc.append(title)
|
||||||
|
if text and text != title:
|
||||||
|
desc.append(text)
|
||||||
link_map[url] = (counter, ": " + " - ".join(desc) if desc else "")
|
link_map[url] = (counter, ": " + " - ".join(desc) if desc else "")
|
||||||
counter += 1
|
counter += 1
|
||||||
|
|
||||||
num = link_map[url][0]
|
num = link_map[url][0]
|
||||||
parts.append(f"{text}⟨{num}⟩" if not match.group(0).startswith('!') else f"![{text}⟨{num}⟩]")
|
parts.append(
|
||||||
|
f"{text}⟨{num}⟩"
|
||||||
|
if not match.group(0).startswith("!")
|
||||||
|
else f"![{text}⟨{num}⟩]"
|
||||||
|
)
|
||||||
last_end = match.end()
|
last_end = match.end()
|
||||||
|
|
||||||
parts.append(markdown[last_end:])
|
parts.append(markdown[last_end:])
|
||||||
converted_text = ''.join(parts)
|
converted_text = "".join(parts)
|
||||||
|
|
||||||
# Pre-build reference strings
|
# Pre-build reference strings
|
||||||
references = ["\n\n## References\n\n"]
|
references = ["\n\n## References\n\n"]
|
||||||
@@ -113,16 +139,18 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
|||||||
for url, (num, desc) in sorted(link_map.items(), key=lambda x: x[1][0])
|
for url, (num, desc) in sorted(link_map.items(), key=lambda x: x[1][0])
|
||||||
)
|
)
|
||||||
|
|
||||||
return converted_text, ''.join(references)
|
return converted_text, "".join(references)
|
||||||
|
|
||||||
def generate_markdown(self,
|
def generate_markdown(
|
||||||
cleaned_html: str,
|
self,
|
||||||
base_url: str = "",
|
cleaned_html: str,
|
||||||
html2text_options: Optional[Dict[str, Any]] = None,
|
base_url: str = "",
|
||||||
options: Optional[Dict[str, Any]] = None,
|
html2text_options: Optional[Dict[str, Any]] = None,
|
||||||
content_filter: Optional[RelevantContentFilter] = None,
|
options: Optional[Dict[str, Any]] = None,
|
||||||
citations: bool = True,
|
content_filter: Optional[RelevantContentFilter] = None,
|
||||||
**kwargs) -> MarkdownGenerationResult:
|
citations: bool = True,
|
||||||
|
**kwargs,
|
||||||
|
) -> MarkdownGenerationResult:
|
||||||
"""
|
"""
|
||||||
Generate markdown with citations from cleaned HTML.
|
Generate markdown with citations from cleaned HTML.
|
||||||
|
|
||||||
@@ -143,41 +171,86 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
|||||||
Returns:
|
Returns:
|
||||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||||
"""
|
"""
|
||||||
# Initialize HTML2Text with options
|
try:
|
||||||
h = CustomHTML2Text()
|
# Initialize HTML2Text with default options for better conversion
|
||||||
if html2text_options:
|
h = CustomHTML2Text(baseurl=base_url)
|
||||||
h.update_params(**html2text_options)
|
default_options = {
|
||||||
elif options:
|
"body_width": 0, # Disable text wrapping
|
||||||
h.update_params(**options)
|
"ignore_emphasis": False,
|
||||||
elif self.options:
|
"ignore_links": False,
|
||||||
h.update_params(**self.options)
|
"ignore_images": False,
|
||||||
|
"protect_links": False,
|
||||||
|
"single_line_break": True,
|
||||||
|
"mark_code": True,
|
||||||
|
"escape_snob": False,
|
||||||
|
}
|
||||||
|
|
||||||
# Generate raw markdown
|
# Update with custom options if provided
|
||||||
raw_markdown = h.handle(cleaned_html)
|
if html2text_options:
|
||||||
raw_markdown = raw_markdown.replace(' ```', '```')
|
default_options.update(html2text_options)
|
||||||
|
elif options:
|
||||||
|
default_options.update(options)
|
||||||
|
elif self.options:
|
||||||
|
default_options.update(self.options)
|
||||||
|
|
||||||
# Convert links to citations
|
h.update_params(**default_options)
|
||||||
markdown_with_citations: str = ""
|
|
||||||
references_markdown: str = ""
|
# Ensure we have valid input
|
||||||
if citations:
|
if not cleaned_html:
|
||||||
markdown_with_citations, references_markdown = self.convert_links_to_citations(
|
cleaned_html = ""
|
||||||
raw_markdown, base_url
|
elif not isinstance(cleaned_html, str):
|
||||||
|
cleaned_html = str(cleaned_html)
|
||||||
|
|
||||||
|
# Generate raw markdown
|
||||||
|
try:
|
||||||
|
raw_markdown = h.handle(cleaned_html)
|
||||||
|
except Exception as e:
|
||||||
|
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
|
||||||
|
|
||||||
|
raw_markdown = raw_markdown.replace(" ```", "```")
|
||||||
|
|
||||||
|
# Convert links to citations
|
||||||
|
markdown_with_citations: str = raw_markdown
|
||||||
|
references_markdown: str = ""
|
||||||
|
if citations:
|
||||||
|
try:
|
||||||
|
(
|
||||||
|
markdown_with_citations,
|
||||||
|
references_markdown,
|
||||||
|
) = self.convert_links_to_citations(raw_markdown, base_url)
|
||||||
|
except Exception as e:
|
||||||
|
markdown_with_citations = raw_markdown
|
||||||
|
references_markdown = f"Error generating citations: {str(e)}"
|
||||||
|
|
||||||
|
# Generate fit markdown if content filter is provided
|
||||||
|
fit_markdown: Optional[str] = ""
|
||||||
|
filtered_html: Optional[str] = ""
|
||||||
|
if content_filter or self.content_filter:
|
||||||
|
try:
|
||||||
|
content_filter = content_filter or self.content_filter
|
||||||
|
filtered_html = content_filter.filter_content(cleaned_html)
|
||||||
|
filtered_html = "\n".join(
|
||||||
|
"<div>{}</div>".format(s) for s in filtered_html
|
||||||
|
)
|
||||||
|
fit_markdown = h.handle(filtered_html)
|
||||||
|
except Exception as e:
|
||||||
|
fit_markdown = f"Error generating fit markdown: {str(e)}"
|
||||||
|
filtered_html = ""
|
||||||
|
|
||||||
|
return MarkdownGenerationResult(
|
||||||
|
raw_markdown=raw_markdown or "",
|
||||||
|
markdown_with_citations=markdown_with_citations or "",
|
||||||
|
references_markdown=references_markdown or "",
|
||||||
|
fit_markdown=fit_markdown or "",
|
||||||
|
fit_html=filtered_html or "",
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
# If anything fails, return empty strings with error message
|
||||||
|
error_msg = f"Error in markdown generation: {str(e)}"
|
||||||
|
return MarkdownGenerationResult(
|
||||||
|
raw_markdown=error_msg,
|
||||||
|
markdown_with_citations=error_msg,
|
||||||
|
references_markdown="",
|
||||||
|
fit_markdown="",
|
||||||
|
fit_html="",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Generate fit markdown if content filter is provided
|
|
||||||
fit_markdown: Optional[str] = ""
|
|
||||||
filtered_html: Optional[str] = ""
|
|
||||||
if content_filter or self.content_filter:
|
|
||||||
content_filter = content_filter or self.content_filter
|
|
||||||
filtered_html = content_filter.filter_content(cleaned_html)
|
|
||||||
filtered_html = '\n'.join('<div>{}</div>'.format(s) for s in filtered_html)
|
|
||||||
fit_markdown = h.handle(filtered_html)
|
|
||||||
|
|
||||||
return MarkdownGenerationResult(
|
|
||||||
raw_markdown=raw_markdown,
|
|
||||||
markdown_with_citations=markdown_with_citations,
|
|
||||||
references_markdown=references_markdown,
|
|
||||||
fit_markdown=fit_markdown,
|
|
||||||
fit_html=filtered_html,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
import os
|
import os
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import aiosqlite
|
import aiosqlite
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import xxhash
|
import xxhash
|
||||||
import aiofiles
|
import aiofiles
|
||||||
import shutil
|
import shutil
|
||||||
import time
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from .async_logger import AsyncLogger, LogLevel
|
from .async_logger import AsyncLogger, LogLevel
|
||||||
|
|
||||||
@@ -17,6 +15,7 @@ logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
|||||||
# logging.basicConfig(level=logging.INFO)
|
# logging.basicConfig(level=logging.INFO)
|
||||||
# logger = logging.getLogger(__name__)
|
# logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DatabaseMigration:
|
class DatabaseMigration:
|
||||||
def __init__(self, db_path: str):
|
def __init__(self, db_path: str):
|
||||||
self.db_path = db_path
|
self.db_path = db_path
|
||||||
@@ -24,11 +23,11 @@ class DatabaseMigration:
|
|||||||
|
|
||||||
def _ensure_content_dirs(self, base_path: str) -> dict:
|
def _ensure_content_dirs(self, base_path: str) -> dict:
|
||||||
dirs = {
|
dirs = {
|
||||||
'html': 'html_content',
|
"html": "html_content",
|
||||||
'cleaned': 'cleaned_html',
|
"cleaned": "cleaned_html",
|
||||||
'markdown': 'markdown_content',
|
"markdown": "markdown_content",
|
||||||
'extracted': 'extracted_content',
|
"extracted": "extracted_content",
|
||||||
'screenshots': 'screenshots'
|
"screenshots": "screenshots",
|
||||||
}
|
}
|
||||||
content_paths = {}
|
content_paths = {}
|
||||||
for key, dirname in dirs.items():
|
for key, dirname in dirs.items():
|
||||||
@@ -52,7 +51,7 @@ class DatabaseMigration:
|
|||||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||||
|
|
||||||
if not os.path.exists(file_path):
|
if not os.path.exists(file_path):
|
||||||
async with aiofiles.open(file_path, 'w', encoding='utf-8') as f:
|
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||||
await f.write(content)
|
await f.write(content)
|
||||||
|
|
||||||
return content_hash
|
return content_hash
|
||||||
@@ -66,24 +65,36 @@ class DatabaseMigration:
|
|||||||
async with aiosqlite.connect(self.db_path) as db:
|
async with aiosqlite.connect(self.db_path) as db:
|
||||||
# Get all rows
|
# Get all rows
|
||||||
async with db.execute(
|
async with db.execute(
|
||||||
'''SELECT url, html, cleaned_html, markdown,
|
"""SELECT url, html, cleaned_html, markdown,
|
||||||
extracted_content, screenshot FROM crawled_data'''
|
extracted_content, screenshot FROM crawled_data"""
|
||||||
) as cursor:
|
) as cursor:
|
||||||
rows = await cursor.fetchall()
|
rows = await cursor.fetchall()
|
||||||
|
|
||||||
migrated_count = 0
|
migrated_count = 0
|
||||||
for row in rows:
|
for row in rows:
|
||||||
url, html, cleaned_html, markdown, extracted_content, screenshot = row
|
(
|
||||||
|
url,
|
||||||
|
html,
|
||||||
|
cleaned_html,
|
||||||
|
markdown,
|
||||||
|
extracted_content,
|
||||||
|
screenshot,
|
||||||
|
) = row
|
||||||
|
|
||||||
# Store content in files and get hashes
|
# Store content in files and get hashes
|
||||||
html_hash = await self._store_content(html, 'html')
|
html_hash = await self._store_content(html, "html")
|
||||||
cleaned_hash = await self._store_content(cleaned_html, 'cleaned')
|
cleaned_hash = await self._store_content(cleaned_html, "cleaned")
|
||||||
markdown_hash = await self._store_content(markdown, 'markdown')
|
markdown_hash = await self._store_content(markdown, "markdown")
|
||||||
extracted_hash = await self._store_content(extracted_content, 'extracted')
|
extracted_hash = await self._store_content(
|
||||||
screenshot_hash = await self._store_content(screenshot, 'screenshots')
|
extracted_content, "extracted"
|
||||||
|
)
|
||||||
|
screenshot_hash = await self._store_content(
|
||||||
|
screenshot, "screenshots"
|
||||||
|
)
|
||||||
|
|
||||||
# Update database with hashes
|
# Update database with hashes
|
||||||
await db.execute('''
|
await db.execute(
|
||||||
|
"""
|
||||||
UPDATE crawled_data
|
UPDATE crawled_data
|
||||||
SET html = ?,
|
SET html = ?,
|
||||||
cleaned_html = ?,
|
cleaned_html = ?,
|
||||||
@@ -91,26 +102,37 @@ class DatabaseMigration:
|
|||||||
extracted_content = ?,
|
extracted_content = ?,
|
||||||
screenshot = ?
|
screenshot = ?
|
||||||
WHERE url = ?
|
WHERE url = ?
|
||||||
''', (html_hash, cleaned_hash, markdown_hash,
|
""",
|
||||||
extracted_hash, screenshot_hash, url))
|
(
|
||||||
|
html_hash,
|
||||||
|
cleaned_hash,
|
||||||
|
markdown_hash,
|
||||||
|
extracted_hash,
|
||||||
|
screenshot_hash,
|
||||||
|
url,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
migrated_count += 1
|
migrated_count += 1
|
||||||
if migrated_count % 100 == 0:
|
if migrated_count % 100 == 0:
|
||||||
logger.info(f"Migrated {migrated_count} records...", tag="INIT")
|
logger.info(f"Migrated {migrated_count} records...", tag="INIT")
|
||||||
|
|
||||||
|
|
||||||
await db.commit()
|
await db.commit()
|
||||||
logger.success(f"Migration completed. {migrated_count} records processed.", tag="COMPLETE")
|
logger.success(
|
||||||
|
f"Migration completed. {migrated_count} records processed.",
|
||||||
|
tag="COMPLETE",
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# logger.error(f"Migration failed: {e}")
|
# logger.error(f"Migration failed: {e}")
|
||||||
logger.error(
|
logger.error(
|
||||||
message="Migration failed: {error}",
|
message="Migration failed: {error}",
|
||||||
tag="ERROR",
|
tag="ERROR",
|
||||||
params={"error": str(e)}
|
params={"error": str(e)},
|
||||||
)
|
)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
async def backup_database(db_path: str) -> str:
|
async def backup_database(db_path: str) -> str:
|
||||||
"""Create backup of existing database"""
|
"""Create backup of existing database"""
|
||||||
if not os.path.exists(db_path):
|
if not os.path.exists(db_path):
|
||||||
@@ -118,7 +140,7 @@ async def backup_database(db_path: str) -> str:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
# Create backup with timestamp
|
# Create backup with timestamp
|
||||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
backup_path = f"{db_path}.backup_{timestamp}"
|
backup_path = f"{db_path}.backup_{timestamp}"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -132,12 +154,11 @@ async def backup_database(db_path: str) -> str:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
# logger.error(f"Backup failed: {e}")
|
# logger.error(f"Backup failed: {e}")
|
||||||
logger.error(
|
logger.error(
|
||||||
message="Migration failed: {error}",
|
message="Migration failed: {error}", tag="ERROR", params={"error": str(e)}
|
||||||
tag="ERROR",
|
)
|
||||||
params={"error": str(e)}
|
|
||||||
)
|
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
async def run_migration(db_path: Optional[str] = None):
|
async def run_migration(db_path: Optional[str] = None):
|
||||||
"""Run database migration"""
|
"""Run database migration"""
|
||||||
if db_path is None:
|
if db_path is None:
|
||||||
@@ -155,14 +176,19 @@ async def run_migration(db_path: Optional[str] = None):
|
|||||||
migration = DatabaseMigration(db_path)
|
migration = DatabaseMigration(db_path)
|
||||||
await migration.migrate_database()
|
await migration.migrate_database()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""CLI entry point for migration"""
|
"""CLI entry point for migration"""
|
||||||
import argparse
|
import argparse
|
||||||
parser = argparse.ArgumentParser(description='Migrate Crawl4AI database to file-based storage')
|
|
||||||
parser.add_argument('--db-path', help='Custom database path')
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Migrate Crawl4AI database to file-based storage"
|
||||||
|
)
|
||||||
|
parser.add_argument("--db-path", help="Custom database path")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
asyncio.run(run_migration(args.db_path))
|
asyncio.run(run_migration(args.db_path))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
@@ -2,75 +2,86 @@ from functools import lru_cache
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import subprocess, os
|
import subprocess, os
|
||||||
import shutil
|
import shutil
|
||||||
import tarfile
|
|
||||||
from .model_loader import *
|
from .model_loader import *
|
||||||
import argparse
|
import argparse
|
||||||
import urllib.request
|
|
||||||
from crawl4ai.config import MODEL_REPO_BRANCH
|
from crawl4ai.config import MODEL_REPO_BRANCH
|
||||||
|
|
||||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def get_available_memory(device):
|
def get_available_memory(device):
|
||||||
import torch
|
import torch
|
||||||
if device.type == 'cuda':
|
|
||||||
|
if device.type == "cuda":
|
||||||
return torch.cuda.get_device_properties(device).total_memory
|
return torch.cuda.get_device_properties(device).total_memory
|
||||||
elif device.type == 'mps':
|
elif device.type == "mps":
|
||||||
return 48 * 1024 ** 3 # Assuming 8GB for MPS, as a conservative estimate
|
return 48 * 1024**3 # Assuming 8GB for MPS, as a conservative estimate
|
||||||
else:
|
else:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def calculate_batch_size(device):
|
def calculate_batch_size(device):
|
||||||
available_memory = get_available_memory(device)
|
available_memory = get_available_memory(device)
|
||||||
|
|
||||||
if device.type == 'cpu':
|
if device.type == "cpu":
|
||||||
return 16
|
return 16
|
||||||
elif device.type in ['cuda', 'mps']:
|
elif device.type in ["cuda", "mps"]:
|
||||||
# Adjust these thresholds based on your model size and available memory
|
# Adjust these thresholds based on your model size and available memory
|
||||||
if available_memory >= 31 * 1024 ** 3: # > 32GB
|
if available_memory >= 31 * 1024**3: # > 32GB
|
||||||
return 256
|
return 256
|
||||||
elif available_memory >= 15 * 1024 ** 3: # > 16GB to 32GB
|
elif available_memory >= 15 * 1024**3: # > 16GB to 32GB
|
||||||
return 128
|
return 128
|
||||||
elif available_memory >= 8 * 1024 ** 3: # 8GB to 16GB
|
elif available_memory >= 8 * 1024**3: # 8GB to 16GB
|
||||||
return 64
|
return 64
|
||||||
else:
|
else:
|
||||||
return 32
|
return 32
|
||||||
else:
|
else:
|
||||||
return 16 # Default batch size
|
return 16 # Default batch size
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def get_device():
|
def get_device():
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
device = torch.device('cuda')
|
device = torch.device("cuda")
|
||||||
elif torch.backends.mps.is_available():
|
elif torch.backends.mps.is_available():
|
||||||
device = torch.device('mps')
|
device = torch.device("mps")
|
||||||
else:
|
else:
|
||||||
device = torch.device('cpu')
|
device = torch.device("cpu")
|
||||||
return device
|
return device
|
||||||
|
|
||||||
|
|
||||||
def set_model_device(model):
|
def set_model_device(model):
|
||||||
device = get_device()
|
device = get_device()
|
||||||
model.to(device)
|
model.to(device)
|
||||||
return model, device
|
return model, device
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def get_home_folder():
|
def get_home_folder():
|
||||||
home_folder = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
home_folder = os.path.join(
|
||||||
|
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||||
|
)
|
||||||
os.makedirs(home_folder, exist_ok=True)
|
os.makedirs(home_folder, exist_ok=True)
|
||||||
os.makedirs(f"{home_folder}/cache", exist_ok=True)
|
os.makedirs(f"{home_folder}/cache", exist_ok=True)
|
||||||
os.makedirs(f"{home_folder}/models", exist_ok=True)
|
os.makedirs(f"{home_folder}/models", exist_ok=True)
|
||||||
return home_folder
|
return home_folder
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def load_bert_base_uncased():
|
def load_bert_base_uncased():
|
||||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
from transformers import BertTokenizer, BertModel
|
||||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', resume_download=None)
|
|
||||||
model = BertModel.from_pretrained('bert-base-uncased', resume_download=None)
|
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", resume_download=None)
|
||||||
|
model = BertModel.from_pretrained("bert-base-uncased", resume_download=None)
|
||||||
model.eval()
|
model.eval()
|
||||||
model, device = set_model_device(model)
|
model, device = set_model_device(model)
|
||||||
return tokenizer, model
|
return tokenizer, model
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def load_HF_embedding_model(model_name="BAAI/bge-small-en-v1.5") -> tuple:
|
def load_HF_embedding_model(model_name="BAAI/bge-small-en-v1.5") -> tuple:
|
||||||
"""Load the Hugging Face model for embedding.
|
"""Load the Hugging Face model for embedding.
|
||||||
@@ -81,30 +92,35 @@ def load_HF_embedding_model(model_name="BAAI/bge-small-en-v1.5") -> tuple:
|
|||||||
Returns:
|
Returns:
|
||||||
tuple: The tokenizer and model.
|
tuple: The tokenizer and model.
|
||||||
"""
|
"""
|
||||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
from transformers import AutoTokenizer, AutoModel
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_name, resume_download=None)
|
tokenizer = AutoTokenizer.from_pretrained(model_name, resume_download=None)
|
||||||
model = AutoModel.from_pretrained(model_name, resume_download=None)
|
model = AutoModel.from_pretrained(model_name, resume_download=None)
|
||||||
model.eval()
|
model.eval()
|
||||||
model, device = set_model_device(model)
|
model, device = set_model_device(model)
|
||||||
return tokenizer, model
|
return tokenizer, model
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def load_text_classifier():
|
def load_text_classifier():
|
||||||
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||||
from transformers import pipeline
|
from transformers import pipeline
|
||||||
import torch
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
model = AutoModelForSequenceClassification.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||||
|
)
|
||||||
|
model = AutoModelForSequenceClassification.from_pretrained(
|
||||||
|
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||||
|
)
|
||||||
model.eval()
|
model.eval()
|
||||||
model, device = set_model_device(model)
|
model, device = set_model_device(model)
|
||||||
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
||||||
return pipe
|
return pipe
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def load_text_multilabel_classifier():
|
def load_text_multilabel_classifier():
|
||||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||||
import numpy as np
|
|
||||||
from scipy.special import expit
|
from scipy.special import expit
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
@@ -117,17 +133,26 @@ def load_text_multilabel_classifier():
|
|||||||
# device = torch.device("cpu")
|
# device = torch.device("cpu")
|
||||||
# # return load_spacy_model(), torch.device("cpu")
|
# # return load_spacy_model(), torch.device("cpu")
|
||||||
|
|
||||||
|
|
||||||
MODEL = "cardiffnlp/tweet-topic-21-multi"
|
MODEL = "cardiffnlp/tweet-topic-21-multi"
|
||||||
tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None)
|
tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None)
|
||||||
model = AutoModelForSequenceClassification.from_pretrained(MODEL, resume_download=None)
|
model = AutoModelForSequenceClassification.from_pretrained(
|
||||||
|
MODEL, resume_download=None
|
||||||
|
)
|
||||||
model.eval()
|
model.eval()
|
||||||
model, device = set_model_device(model)
|
model, device = set_model_device(model)
|
||||||
class_mapping = model.config.id2label
|
class_mapping = model.config.id2label
|
||||||
|
|
||||||
def _classifier(texts, threshold=0.5, max_length=64):
|
def _classifier(texts, threshold=0.5, max_length=64):
|
||||||
tokens = tokenizer(texts, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
tokens = tokenizer(
|
||||||
tokens = {key: val.to(device) for key, val in tokens.items()} # Move tokens to the selected device
|
texts,
|
||||||
|
return_tensors="pt",
|
||||||
|
padding=True,
|
||||||
|
truncation=True,
|
||||||
|
max_length=max_length,
|
||||||
|
)
|
||||||
|
tokens = {
|
||||||
|
key: val.to(device) for key, val in tokens.items()
|
||||||
|
} # Move tokens to the selected device
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
output = model(**tokens)
|
output = model(**tokens)
|
||||||
@@ -138,25 +163,31 @@ def load_text_multilabel_classifier():
|
|||||||
|
|
||||||
batch_labels = []
|
batch_labels = []
|
||||||
for prediction in predictions:
|
for prediction in predictions:
|
||||||
labels = [class_mapping[i] for i, value in enumerate(prediction) if value == 1]
|
labels = [
|
||||||
|
class_mapping[i] for i, value in enumerate(prediction) if value == 1
|
||||||
|
]
|
||||||
batch_labels.append(labels)
|
batch_labels.append(labels)
|
||||||
|
|
||||||
return batch_labels
|
return batch_labels
|
||||||
|
|
||||||
return _classifier, device
|
return _classifier, device
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def load_nltk_punkt():
|
def load_nltk_punkt():
|
||||||
import nltk
|
import nltk
|
||||||
|
|
||||||
try:
|
try:
|
||||||
nltk.data.find('tokenizers/punkt')
|
nltk.data.find("tokenizers/punkt")
|
||||||
except LookupError:
|
except LookupError:
|
||||||
nltk.download('punkt')
|
nltk.download("punkt")
|
||||||
return nltk.data.find('tokenizers/punkt')
|
return nltk.data.find("tokenizers/punkt")
|
||||||
|
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def load_spacy_model():
|
def load_spacy_model():
|
||||||
import spacy
|
import spacy
|
||||||
|
|
||||||
name = "models/reuters"
|
name = "models/reuters"
|
||||||
home_folder = get_home_folder()
|
home_folder = get_home_folder()
|
||||||
model_folder = Path(home_folder) / name
|
model_folder = Path(home_folder) / name
|
||||||
@@ -176,7 +207,9 @@ def load_spacy_model():
|
|||||||
if model_folder.exists():
|
if model_folder.exists():
|
||||||
shutil.rmtree(model_folder)
|
shutil.rmtree(model_folder)
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
print("[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:")
|
print(
|
||||||
|
"[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:"
|
||||||
|
)
|
||||||
print(f"- {repo_folder}")
|
print(f"- {repo_folder}")
|
||||||
print(f"- {model_folder}")
|
print(f"- {model_folder}")
|
||||||
return None
|
return None
|
||||||
@@ -187,7 +220,7 @@ def load_spacy_model():
|
|||||||
["git", "clone", "-b", branch, repo_url, str(repo_folder)],
|
["git", "clone", "-b", branch, repo_url, str(repo_folder)],
|
||||||
stdout=subprocess.DEVNULL,
|
stdout=subprocess.DEVNULL,
|
||||||
stderr=subprocess.DEVNULL,
|
stderr=subprocess.DEVNULL,
|
||||||
check=True
|
check=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create the models directory if it doesn't exist
|
# Create the models directory if it doesn't exist
|
||||||
@@ -215,6 +248,7 @@ def load_spacy_model():
|
|||||||
print(f"Error loading spacy model: {e}")
|
print(f"Error loading spacy model: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def download_all_models(remove_existing=False):
|
def download_all_models(remove_existing=False):
|
||||||
"""Download all models required for Crawl4AI."""
|
"""Download all models required for Crawl4AI."""
|
||||||
if remove_existing:
|
if remove_existing:
|
||||||
@@ -243,14 +277,20 @@ def download_all_models(remove_existing=False):
|
|||||||
load_nltk_punkt()
|
load_nltk_punkt()
|
||||||
print("[LOG] ✅ All models downloaded successfully.")
|
print("[LOG] ✅ All models downloaded successfully.")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
print("[LOG] Welcome to the Crawl4AI Model Downloader!")
|
print("[LOG] Welcome to the Crawl4AI Model Downloader!")
|
||||||
print("[LOG] This script will download all the models required for Crawl4AI.")
|
print("[LOG] This script will download all the models required for Crawl4AI.")
|
||||||
parser = argparse.ArgumentParser(description="Crawl4AI Model Downloader")
|
parser = argparse.ArgumentParser(description="Crawl4AI Model Downloader")
|
||||||
parser.add_argument('--remove-existing', action='store_true', help="Remove existing models before downloading")
|
parser.add_argument(
|
||||||
|
"--remove-existing",
|
||||||
|
action="store_true",
|
||||||
|
help="Remove existing models before downloading",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
download_all_models(remove_existing=args.remove_existing)
|
download_all_models(remove_existing=args.remove_existing)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -1,8 +1,70 @@
|
|||||||
from pydantic import BaseModel, HttpUrl
|
from re import U
|
||||||
|
from pydantic import BaseModel, HttpUrl, PrivateAttr
|
||||||
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
||||||
|
from enum import Enum
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from .ssl_certificate import SSLCertificate
|
from .ssl_certificate import SSLCertificate
|
||||||
|
from datetime import datetime
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
|
|
||||||
|
###############################
|
||||||
|
# Dispatcher Models
|
||||||
|
###############################
|
||||||
|
@dataclass
|
||||||
|
class DomainState:
|
||||||
|
last_request_time: float = 0
|
||||||
|
current_delay: float = 0
|
||||||
|
fail_count: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CrawlerTaskResult:
|
||||||
|
task_id: str
|
||||||
|
url: str
|
||||||
|
result: "CrawlResult"
|
||||||
|
memory_usage: float
|
||||||
|
peak_memory: float
|
||||||
|
start_time: Union[datetime, float]
|
||||||
|
end_time: Union[datetime, float]
|
||||||
|
error_message: str = ""
|
||||||
|
|
||||||
|
|
||||||
|
class CrawlStatus(Enum):
|
||||||
|
QUEUED = "QUEUED"
|
||||||
|
IN_PROGRESS = "IN_PROGRESS"
|
||||||
|
COMPLETED = "COMPLETED"
|
||||||
|
FAILED = "FAILED"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CrawlStats:
|
||||||
|
task_id: str
|
||||||
|
url: str
|
||||||
|
status: CrawlStatus
|
||||||
|
start_time: Optional[datetime] = None
|
||||||
|
end_time: Optional[datetime] = None
|
||||||
|
memory_usage: float = 0.0
|
||||||
|
peak_memory: float = 0.0
|
||||||
|
error_message: str = ""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def duration(self) -> str:
|
||||||
|
if not self.start_time:
|
||||||
|
return "0:00"
|
||||||
|
end = self.end_time or datetime.now()
|
||||||
|
duration = end - self.start_time
|
||||||
|
return str(timedelta(seconds=int(duration.total_seconds())))
|
||||||
|
|
||||||
|
|
||||||
|
class DisplayMode(Enum):
|
||||||
|
DETAILED = "DETAILED"
|
||||||
|
AGGREGATED = "AGGREGATED"
|
||||||
|
|
||||||
|
|
||||||
|
###############################
|
||||||
|
# Crawler Models
|
||||||
|
###############################
|
||||||
@dataclass
|
@dataclass
|
||||||
class TokenUsage:
|
class TokenUsage:
|
||||||
completion_tokens: int = 0
|
completion_tokens: int = 0
|
||||||
@@ -16,6 +78,7 @@ class UrlModel(BaseModel):
|
|||||||
url: HttpUrl
|
url: HttpUrl
|
||||||
forced: bool = False
|
forced: bool = False
|
||||||
|
|
||||||
|
|
||||||
class MarkdownGenerationResult(BaseModel):
|
class MarkdownGenerationResult(BaseModel):
|
||||||
raw_markdown: str
|
raw_markdown: str
|
||||||
markdown_with_citations: str
|
markdown_with_citations: str
|
||||||
@@ -23,6 +86,28 @@ class MarkdownGenerationResult(BaseModel):
|
|||||||
fit_markdown: Optional[str] = None
|
fit_markdown: Optional[str] = None
|
||||||
fit_html: Optional[str] = None
|
fit_html: Optional[str] = None
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.raw_markdown
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TraversalStats:
|
||||||
|
"""Statistics for the traversal process"""
|
||||||
|
|
||||||
|
start_time: datetime = datetime.now()
|
||||||
|
urls_processed: int = 0
|
||||||
|
urls_failed: int = 0
|
||||||
|
urls_skipped: int = 0
|
||||||
|
total_depth_reached: int = 0
|
||||||
|
current_depth: int = 0
|
||||||
|
|
||||||
|
class DispatchResult(BaseModel):
|
||||||
|
task_id: str
|
||||||
|
memory_usage: float
|
||||||
|
peak_memory: float
|
||||||
|
start_time: Union[datetime, float]
|
||||||
|
end_time: Union[datetime, float]
|
||||||
|
error_message: str = ""
|
||||||
|
|
||||||
class CrawlResult(BaseModel):
|
class CrawlResult(BaseModel):
|
||||||
url: str
|
url: str
|
||||||
html: str
|
html: str
|
||||||
@@ -31,12 +116,10 @@ class CrawlResult(BaseModel):
|
|||||||
media: Dict[str, List[Dict]] = {}
|
media: Dict[str, List[Dict]] = {}
|
||||||
links: Dict[str, List[Dict]] = {}
|
links: Dict[str, List[Dict]] = {}
|
||||||
downloaded_files: Optional[List[str]] = None
|
downloaded_files: Optional[List[str]] = None
|
||||||
|
js_execution_result: Optional[Dict[str, Any]] = None
|
||||||
screenshot: Optional[str] = None
|
screenshot: Optional[str] = None
|
||||||
pdf : Optional[bytes] = None
|
pdf: Optional[bytes] = None
|
||||||
markdown: Optional[Union[str, MarkdownGenerationResult]] = None
|
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||||
markdown_v2: Optional[MarkdownGenerationResult] = None
|
|
||||||
fit_markdown: Optional[str] = None
|
|
||||||
fit_html: Optional[str] = None
|
|
||||||
extracted_content: Optional[str] = None
|
extracted_content: Optional[str] = None
|
||||||
metadata: Optional[dict] = None
|
metadata: Optional[dict] = None
|
||||||
error_message: Optional[str] = None
|
error_message: Optional[str] = None
|
||||||
@@ -44,18 +127,181 @@ class CrawlResult(BaseModel):
|
|||||||
response_headers: Optional[dict] = None
|
response_headers: Optional[dict] = None
|
||||||
status_code: Optional[int] = None
|
status_code: Optional[int] = None
|
||||||
ssl_certificate: Optional[SSLCertificate] = None
|
ssl_certificate: Optional[SSLCertificate] = None
|
||||||
|
dispatch_result: Optional[DispatchResult] = None
|
||||||
|
redirected_url: Optional[str] = None
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
arbitrary_types_allowed = True
|
arbitrary_types_allowed = True
|
||||||
|
|
||||||
|
# NOTE: The StringCompatibleMarkdown class, custom __init__ method, property getters/setters,
|
||||||
|
# and model_dump override all exist to support a smooth transition from markdown as a string
|
||||||
|
# to markdown as a MarkdownGenerationResult object, while maintaining backward compatibility.
|
||||||
|
#
|
||||||
|
# This allows code that expects markdown to be a string to continue working, while also
|
||||||
|
# providing access to the full MarkdownGenerationResult object's properties.
|
||||||
|
#
|
||||||
|
# The markdown_v2 property is deprecated and raises an error directing users to use markdown.
|
||||||
|
#
|
||||||
|
# When backward compatibility is no longer needed in future versions, this entire mechanism
|
||||||
|
# can be simplified to a standard field with no custom accessors or serialization logic.
|
||||||
|
|
||||||
|
def __init__(self, **data):
|
||||||
|
markdown_result = data.pop('markdown', None)
|
||||||
|
super().__init__(**data)
|
||||||
|
if markdown_result is not None:
|
||||||
|
self._markdown = markdown_result
|
||||||
|
|
||||||
|
@property
|
||||||
|
def markdown(self):
|
||||||
|
"""
|
||||||
|
Property that returns a StringCompatibleMarkdown object that behaves like
|
||||||
|
a string but also provides access to MarkdownGenerationResult attributes.
|
||||||
|
|
||||||
|
This approach allows backward compatibility with code that expects 'markdown'
|
||||||
|
to be a string, while providing access to the full MarkdownGenerationResult.
|
||||||
|
"""
|
||||||
|
if self._markdown is None:
|
||||||
|
return None
|
||||||
|
return StringCompatibleMarkdown(self._markdown)
|
||||||
|
|
||||||
|
@markdown.setter
|
||||||
|
def markdown(self, value):
|
||||||
|
"""
|
||||||
|
Setter for the markdown property.
|
||||||
|
"""
|
||||||
|
self._markdown = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def markdown_v2(self):
|
||||||
|
"""
|
||||||
|
Deprecated property that raises an AttributeError when accessed.
|
||||||
|
|
||||||
|
This property exists to inform users that 'markdown_v2' has been
|
||||||
|
deprecated and they should use 'markdown' instead.
|
||||||
|
"""
|
||||||
|
raise AttributeError(
|
||||||
|
"The 'markdown_v2' attribute is deprecated and has been removed. "
|
||||||
|
"""Please use 'markdown' instead, which now returns a MarkdownGenerationResult, with
|
||||||
|
following properties:
|
||||||
|
- raw_markdown: The raw markdown string
|
||||||
|
- markdown_with_citations: The markdown string with citations
|
||||||
|
- references_markdown: The markdown string with references
|
||||||
|
- fit_markdown: The markdown string with fit text
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fit_markdown(self):
|
||||||
|
"""
|
||||||
|
Deprecated property that raises an AttributeError when accessed.
|
||||||
|
"""
|
||||||
|
raise AttributeError(
|
||||||
|
"The 'fit_markdown' attribute is deprecated and has been removed. "
|
||||||
|
"Please use 'markdown.fit_markdown' instead."
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fit_html(self):
|
||||||
|
"""
|
||||||
|
Deprecated property that raises an AttributeError when accessed.
|
||||||
|
"""
|
||||||
|
raise AttributeError(
|
||||||
|
"The 'fit_html' attribute is deprecated and has been removed. "
|
||||||
|
"Please use 'markdown.fit_html' instead."
|
||||||
|
)
|
||||||
|
|
||||||
|
def model_dump(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Override model_dump to include the _markdown private attribute in serialization.
|
||||||
|
|
||||||
|
This override is necessary because:
|
||||||
|
1. PrivateAttr fields are excluded from serialization by default
|
||||||
|
2. We need to maintain backward compatibility by including the 'markdown' field
|
||||||
|
in the serialized output
|
||||||
|
3. We're transitioning from 'markdown_v2' to enhancing 'markdown' to hold
|
||||||
|
the same type of data
|
||||||
|
|
||||||
|
Future developers: This method ensures that the markdown content is properly
|
||||||
|
serialized despite being stored in a private attribute. If the serialization
|
||||||
|
requirements change, this is where you would update the logic.
|
||||||
|
"""
|
||||||
|
result = super().model_dump(*args, **kwargs)
|
||||||
|
if self._markdown is not None:
|
||||||
|
result["markdown"] = self._markdown.model_dump()
|
||||||
|
return result
|
||||||
|
|
||||||
|
class StringCompatibleMarkdown(str):
|
||||||
|
"""A string subclass that also provides access to MarkdownGenerationResult attributes"""
|
||||||
|
def __new__(cls, markdown_result):
|
||||||
|
return super().__new__(cls, markdown_result.raw_markdown)
|
||||||
|
|
||||||
|
def __init__(self, markdown_result):
|
||||||
|
self._markdown_result = markdown_result
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self._markdown_result, name)
|
||||||
|
|
||||||
|
# END of backward compatibility code for markdown/markdown_v2.
|
||||||
|
# When removing this code in the future, make sure to:
|
||||||
|
# 1. Replace the private attribute and property with a standard field
|
||||||
|
# 2. Update any serialization logic that might depend on the current behavior
|
||||||
|
|
||||||
class AsyncCrawlResponse(BaseModel):
|
class AsyncCrawlResponse(BaseModel):
|
||||||
html: str
|
html: str
|
||||||
response_headers: Dict[str, str]
|
response_headers: Dict[str, str]
|
||||||
|
js_execution_result: Optional[Dict[str, Any]] = None
|
||||||
status_code: int
|
status_code: int
|
||||||
screenshot: Optional[str] = None
|
screenshot: Optional[str] = None
|
||||||
pdf_data: Optional[bytes] = None
|
pdf_data: Optional[bytes] = None
|
||||||
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
||||||
downloaded_files: Optional[List[str]] = None
|
downloaded_files: Optional[List[str]] = None
|
||||||
ssl_certificate: Optional[SSLCertificate] = None
|
ssl_certificate: Optional[SSLCertificate] = None
|
||||||
|
redirected_url: Optional[str] = None
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
arbitrary_types_allowed = True
|
arbitrary_types_allowed = True
|
||||||
|
|
||||||
|
|
||||||
|
###############################
|
||||||
|
# Scraping Models
|
||||||
|
###############################
|
||||||
|
class MediaItem(BaseModel):
|
||||||
|
src: Optional[str] = ""
|
||||||
|
data: Optional[str] = ""
|
||||||
|
alt: Optional[str] = ""
|
||||||
|
desc: Optional[str] = ""
|
||||||
|
score: Optional[int] = 0
|
||||||
|
type: str = "image"
|
||||||
|
group_id: Optional[int] = 0
|
||||||
|
format: Optional[str] = None
|
||||||
|
width: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
|
class Link(BaseModel):
|
||||||
|
href: Optional[str] = ""
|
||||||
|
text: Optional[str] = ""
|
||||||
|
title: Optional[str] = ""
|
||||||
|
base_domain: Optional[str] = ""
|
||||||
|
|
||||||
|
|
||||||
|
class Media(BaseModel):
|
||||||
|
images: List[MediaItem] = []
|
||||||
|
videos: List[
|
||||||
|
MediaItem
|
||||||
|
] = [] # Using MediaItem model for now, can be extended with Video model if needed
|
||||||
|
audios: List[
|
||||||
|
MediaItem
|
||||||
|
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
|
||||||
|
|
||||||
|
|
||||||
|
class Links(BaseModel):
|
||||||
|
internal: List[Link] = []
|
||||||
|
external: List[Link] = []
|
||||||
|
|
||||||
|
|
||||||
|
class ScrapingResult(BaseModel):
|
||||||
|
cleaned_html: str
|
||||||
|
success: bool
|
||||||
|
media: Media = Media()
|
||||||
|
links: Links = Links()
|
||||||
|
metadata: Dict[str, Any] = {}
|
||||||
|
|||||||
165
crawl4ai/processors/pdf/__init__.py
Normal file
165
crawl4ai/processors/pdf/__init__.py
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
import asyncio
|
||||||
|
from dataclasses import asdict
|
||||||
|
from crawl4ai.async_logger import AsyncLogger
|
||||||
|
from crawl4ai.async_crawler_strategy import AsyncCrawlerStrategy
|
||||||
|
from crawl4ai.models import AsyncCrawlResponse, ScrapingResult
|
||||||
|
from crawl4ai.content_scraping_strategy import ContentScrapingStrategy
|
||||||
|
from .processor import NaivePDFProcessorStrategy # Assuming your current PDF code is in pdf_processor.py
|
||||||
|
|
||||||
|
class PDFCrawlerStrategy(AsyncCrawlerStrategy):
|
||||||
|
def __init__(self, logger: AsyncLogger = None):
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
|
||||||
|
# Just pass through with empty HTML - scraper will handle actual processing
|
||||||
|
return AsyncCrawlResponse(
|
||||||
|
html="", # Scraper will handle the real work
|
||||||
|
response_headers={"Content-Type": "application/pdf"},
|
||||||
|
status_code=200
|
||||||
|
)
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def __aenter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
await self.close()
|
||||||
|
|
||||||
|
class PDFContentScrapingStrategy(ContentScrapingStrategy):
|
||||||
|
"""
|
||||||
|
A content scraping strategy for PDF files.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
save_images_locally (bool): Whether to save images locally.
|
||||||
|
extract_images (bool): Whether to extract images from PDF.
|
||||||
|
image_save_dir (str): Directory to save extracted images.
|
||||||
|
logger (AsyncLogger): Logger instance for recording events and errors.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
scrap(url: str, html: str, **params) -> ScrapingResult:
|
||||||
|
Scrap content from a PDF file.
|
||||||
|
ascrap(url: str, html: str, **kwargs) -> ScrapingResult:
|
||||||
|
Asynchronous version of scrap.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
strategy = PDFContentScrapingStrategy(
|
||||||
|
save_images_locally=False,
|
||||||
|
extract_images=False,
|
||||||
|
image_save_dir=None,
|
||||||
|
logger=logger
|
||||||
|
)
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self,
|
||||||
|
save_images_locally : bool = False,
|
||||||
|
extract_images : bool = False,
|
||||||
|
image_save_dir : str = None,
|
||||||
|
batch_size: int = 4,
|
||||||
|
logger: AsyncLogger = None):
|
||||||
|
self.logger = logger
|
||||||
|
self.pdf_processor = NaivePDFProcessorStrategy(
|
||||||
|
save_images_locally=save_images_locally,
|
||||||
|
extract_images=extract_images,
|
||||||
|
image_save_dir=image_save_dir,
|
||||||
|
batch_size=batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
def scrap(self, url: str, html: str, **params) -> ScrapingResult:
|
||||||
|
"""
|
||||||
|
Scrap content from a PDF file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url (str): The URL of the PDF file.
|
||||||
|
html (str): The HTML content of the page.
|
||||||
|
**params: Additional parameters.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ScrapingResult: The scraped content.
|
||||||
|
"""
|
||||||
|
# Download if URL or use local path
|
||||||
|
pdf_path = self._get_pdf_path(url)
|
||||||
|
try:
|
||||||
|
# Process PDF
|
||||||
|
# result = self.pdf_processor.process(Path(pdf_path))
|
||||||
|
result = self.pdf_processor.process_batch(Path(pdf_path))
|
||||||
|
|
||||||
|
# Combine page HTML
|
||||||
|
cleaned_html = f"""
|
||||||
|
<html>
|
||||||
|
<head><meta name="pdf-pages" content="{len(result.pages)}"></head>
|
||||||
|
<body>
|
||||||
|
{''.join(f'<div class="pdf-page" data-page="{i+1}">{page.html}</div>'
|
||||||
|
for i, page in enumerate(result.pages))}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Accumulate media and links with page numbers
|
||||||
|
media = {"images": []}
|
||||||
|
links = {"urls": []}
|
||||||
|
|
||||||
|
for page in result.pages:
|
||||||
|
# Add page number to each image
|
||||||
|
for img in page.images:
|
||||||
|
img["page"] = page.page_number
|
||||||
|
media["images"].append(img)
|
||||||
|
|
||||||
|
# Add page number to each link
|
||||||
|
for link in page.links:
|
||||||
|
links["urls"].append({
|
||||||
|
"url": link,
|
||||||
|
"page": page.page_number
|
||||||
|
})
|
||||||
|
|
||||||
|
return ScrapingResult(
|
||||||
|
cleaned_html=cleaned_html,
|
||||||
|
success=True,
|
||||||
|
media=media,
|
||||||
|
links=links,
|
||||||
|
metadata=asdict(result.metadata)
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
# Cleanup temp file if downloaded
|
||||||
|
if url.startswith(("http://", "https://")):
|
||||||
|
Path(pdf_path).unlink(missing_ok=True)
|
||||||
|
|
||||||
|
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
|
||||||
|
# For simple cases, you can use the sync version
|
||||||
|
return await asyncio.to_thread(self.scrap, url, html, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_pdf_path(self, url: str) -> str:
|
||||||
|
if url.startswith(("http://", "https://")):
|
||||||
|
import tempfile
|
||||||
|
import requests
|
||||||
|
|
||||||
|
# Create temp file with .pdf extension
|
||||||
|
temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Download PDF with streaming
|
||||||
|
response = requests.get(url, stream=True)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Write to temp file
|
||||||
|
with open(temp_file.name, 'wb') as f:
|
||||||
|
for chunk in response.iter_content(chunk_size=8192):
|
||||||
|
f.write(chunk)
|
||||||
|
|
||||||
|
return temp_file.name
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Clean up temp file if download fails
|
||||||
|
Path(temp_file.name).unlink(missing_ok=True)
|
||||||
|
raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}")
|
||||||
|
|
||||||
|
elif url.startswith("file://"):
|
||||||
|
return url[7:] # Strip file:// prefix
|
||||||
|
|
||||||
|
return url # Assume local path
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ["PDFCrawlerStrategy", "PDFContentScrapingStrategy"]
|
||||||
487
crawl4ai/processors/pdf/processor.py
Normal file
487
crawl4ai/processors/pdf/processor.py
Normal file
@@ -0,0 +1,487 @@
|
|||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from time import time
|
||||||
|
from dataclasses import dataclass, asdict, field
|
||||||
|
from typing import Dict, List, Optional, Any, Union
|
||||||
|
import base64
|
||||||
|
import tempfile
|
||||||
|
from .utils import *
|
||||||
|
from .utils import (
|
||||||
|
apply_png_predictor,
|
||||||
|
clean_pdf_text,
|
||||||
|
clean_pdf_text_to_html,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove direct PyPDF2 imports from the top
|
||||||
|
# import PyPDF2
|
||||||
|
# from PyPDF2 import PdfReader
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PDFMetadata:
|
||||||
|
title: Optional[str] = None
|
||||||
|
author: Optional[str] = None
|
||||||
|
producer: Optional[str] = None
|
||||||
|
created: Optional[datetime] = None
|
||||||
|
modified: Optional[datetime] = None
|
||||||
|
pages: int = 0
|
||||||
|
encrypted: bool = False
|
||||||
|
file_size: Optional[int] = None
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PDFPage:
|
||||||
|
page_number: int
|
||||||
|
raw_text: str = ""
|
||||||
|
markdown: str = ""
|
||||||
|
html: str = ""
|
||||||
|
images: List[Dict] = field(default_factory=list)
|
||||||
|
links: List[str] = field(default_factory=list)
|
||||||
|
layout: List[Dict] = field(default_factory=list)
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PDFProcessResult:
|
||||||
|
metadata: PDFMetadata
|
||||||
|
pages: List[PDFPage]
|
||||||
|
processing_time: float = 0.0
|
||||||
|
version: str = "1.0"
|
||||||
|
|
||||||
|
class PDFProcessorStrategy(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class NaivePDFProcessorStrategy(PDFProcessorStrategy):
|
||||||
|
def __init__(self, image_dpi: int = 144, image_quality: int = 85, extract_images: bool = True,
|
||||||
|
save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4):
|
||||||
|
# Import check at initialization time
|
||||||
|
try:
|
||||||
|
import PyPDF2
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||||
|
|
||||||
|
self.image_dpi = image_dpi
|
||||||
|
self.image_quality = image_quality
|
||||||
|
self.current_page_number = 0
|
||||||
|
self.extract_images = extract_images
|
||||||
|
self.save_images_locally = save_images_locally
|
||||||
|
self.image_save_dir = image_save_dir
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self._temp_dir = None
|
||||||
|
|
||||||
|
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||||
|
# Import inside method to allow dependency to be optional
|
||||||
|
try:
|
||||||
|
from PyPDF2 import PdfReader
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||||
|
|
||||||
|
start_time = time()
|
||||||
|
result = PDFProcessResult(
|
||||||
|
metadata=PDFMetadata(),
|
||||||
|
pages=[],
|
||||||
|
version="1.1"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with pdf_path.open('rb') as file:
|
||||||
|
reader = PdfReader(file)
|
||||||
|
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||||
|
|
||||||
|
# Handle image directory
|
||||||
|
image_dir = None
|
||||||
|
if self.extract_images and self.save_images_locally:
|
||||||
|
if self.image_save_dir:
|
||||||
|
image_dir = Path(self.image_save_dir)
|
||||||
|
image_dir.mkdir(exist_ok=True, parents=True)
|
||||||
|
else:
|
||||||
|
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||||
|
image_dir = Path(self._temp_dir)
|
||||||
|
|
||||||
|
for page_num, page in enumerate(reader.pages):
|
||||||
|
self.current_page_number = page_num + 1
|
||||||
|
pdf_page = self._process_page(page, image_dir)
|
||||||
|
result.pages.append(pdf_page)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to process PDF: {str(e)}")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
# Cleanup temp directory if it was created
|
||||||
|
if self._temp_dir and not self.image_save_dir:
|
||||||
|
import shutil
|
||||||
|
try:
|
||||||
|
shutil.rmtree(self._temp_dir)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||||
|
|
||||||
|
result.processing_time = time() - start_time
|
||||||
|
return result
|
||||||
|
|
||||||
|
def process_batch(self, pdf_path: Path) -> PDFProcessResult:
|
||||||
|
"""Like process() but processes PDF pages in parallel batches"""
|
||||||
|
# Import inside method to allow dependency to be optional
|
||||||
|
try:
|
||||||
|
from PyPDF2 import PdfReader
|
||||||
|
import PyPDF2 # For type checking
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||||
|
|
||||||
|
import concurrent.futures
|
||||||
|
import threading
|
||||||
|
|
||||||
|
# Initialize PyPDF2 thread support
|
||||||
|
if not hasattr(threading.current_thread(), "_children"):
|
||||||
|
threading.current_thread()._children = set()
|
||||||
|
|
||||||
|
start_time = time()
|
||||||
|
result = PDFProcessResult(
|
||||||
|
metadata=PDFMetadata(),
|
||||||
|
pages=[],
|
||||||
|
version="1.1"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get metadata and page count from main thread
|
||||||
|
with pdf_path.open('rb') as file:
|
||||||
|
reader = PdfReader(file)
|
||||||
|
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||||
|
total_pages = len(reader.pages)
|
||||||
|
|
||||||
|
# Handle image directory setup
|
||||||
|
image_dir = None
|
||||||
|
if self.extract_images and self.save_images_locally:
|
||||||
|
if self.image_save_dir:
|
||||||
|
image_dir = Path(self.image_save_dir)
|
||||||
|
image_dir.mkdir(exist_ok=True, parents=True)
|
||||||
|
else:
|
||||||
|
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||||
|
image_dir = Path(self._temp_dir)
|
||||||
|
|
||||||
|
def process_page_safely(page_num: int):
|
||||||
|
# Each thread opens its own file handle
|
||||||
|
with pdf_path.open('rb') as file:
|
||||||
|
thread_reader = PdfReader(file)
|
||||||
|
page = thread_reader.pages[page_num]
|
||||||
|
self.current_page_number = page_num + 1
|
||||||
|
return self._process_page(page, image_dir)
|
||||||
|
|
||||||
|
# Process pages in parallel batches
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=self.batch_size) as executor:
|
||||||
|
futures = []
|
||||||
|
for page_num in range(total_pages):
|
||||||
|
future = executor.submit(process_page_safely, page_num)
|
||||||
|
futures.append((page_num + 1, future))
|
||||||
|
|
||||||
|
# Collect results in order
|
||||||
|
result.pages = [None] * total_pages
|
||||||
|
for page_num, future in futures:
|
||||||
|
try:
|
||||||
|
pdf_page = future.result()
|
||||||
|
result.pages[page_num - 1] = pdf_page
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to process page {page_num}: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to process PDF: {str(e)}")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
# Cleanup temp directory if it was created
|
||||||
|
if self._temp_dir and not self.image_save_dir:
|
||||||
|
import shutil
|
||||||
|
try:
|
||||||
|
shutil.rmtree(self._temp_dir)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||||
|
|
||||||
|
result.processing_time = time() - start_time
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _process_page(self, page, image_dir: Optional[Path]) -> PDFPage:
|
||||||
|
pdf_page = PDFPage(
|
||||||
|
page_number=self.current_page_number,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Text and font extraction
|
||||||
|
def visitor_text(text, cm, tm, font_dict, font_size):
|
||||||
|
pdf_page.raw_text += text
|
||||||
|
pdf_page.layout.append({
|
||||||
|
"type": "text",
|
||||||
|
"text": text,
|
||||||
|
"x": tm[4],
|
||||||
|
"y": tm[5],
|
||||||
|
})
|
||||||
|
|
||||||
|
page.extract_text(visitor_text=visitor_text)
|
||||||
|
|
||||||
|
# Image extraction
|
||||||
|
if self.extract_images:
|
||||||
|
pdf_page.images = self._extract_images(page, image_dir)
|
||||||
|
|
||||||
|
# Link extraction
|
||||||
|
pdf_page.links = self._extract_links(page)
|
||||||
|
|
||||||
|
# Add markdown content
|
||||||
|
pdf_page.markdown = clean_pdf_text(self.current_page_number, pdf_page.raw_text)
|
||||||
|
pdf_page.html = clean_pdf_text_to_html(self.current_page_number, pdf_page.raw_text)
|
||||||
|
|
||||||
|
return pdf_page
|
||||||
|
|
||||||
|
def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]:
|
||||||
|
# Import PyPDF2 for type checking only when needed
|
||||||
|
try:
|
||||||
|
import PyPDF2
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||||
|
|
||||||
|
if not self.extract_images:
|
||||||
|
return []
|
||||||
|
|
||||||
|
images = []
|
||||||
|
try:
|
||||||
|
resources = page.get("/Resources")
|
||||||
|
if resources: # Check if resources exist
|
||||||
|
resources = resources.get_object() # Resolve IndirectObject
|
||||||
|
if '/XObject' in resources:
|
||||||
|
xobjects = resources['/XObject'].get_object()
|
||||||
|
img_count = 0
|
||||||
|
for obj_name in xobjects:
|
||||||
|
xobj = xobjects[obj_name]
|
||||||
|
if hasattr(xobj, 'get_object') and callable(xobj.get_object):
|
||||||
|
xobj = xobj.get_object()
|
||||||
|
if xobj.get('/Subtype') == '/Image':
|
||||||
|
try:
|
||||||
|
img_count += 1
|
||||||
|
img_filename = f"page_{self.current_page_number}_img_{img_count}"
|
||||||
|
data = xobj.get_data()
|
||||||
|
filters = xobj.get('/Filter', [])
|
||||||
|
if not isinstance(filters, list):
|
||||||
|
filters = [filters]
|
||||||
|
|
||||||
|
# Resolve IndirectObjects in properties
|
||||||
|
width = xobj.get('/Width', 0)
|
||||||
|
height = xobj.get('/Height', 0)
|
||||||
|
color_space = xobj.get('/ColorSpace', '/DeviceRGB')
|
||||||
|
if isinstance(color_space, PyPDF2.generic.IndirectObject):
|
||||||
|
color_space = color_space.get_object()
|
||||||
|
|
||||||
|
# Handle different image encodings
|
||||||
|
success = False
|
||||||
|
image_format = 'bin'
|
||||||
|
image_data = None
|
||||||
|
|
||||||
|
if '/FlateDecode' in filters:
|
||||||
|
try:
|
||||||
|
decode_parms = xobj.get('/DecodeParms', {})
|
||||||
|
if isinstance(decode_parms, PyPDF2.generic.IndirectObject):
|
||||||
|
decode_parms = decode_parms.get_object()
|
||||||
|
|
||||||
|
predictor = decode_parms.get('/Predictor', 1)
|
||||||
|
bits = xobj.get('/BitsPerComponent', 8)
|
||||||
|
colors = 3 if color_space == '/DeviceRGB' else 1
|
||||||
|
|
||||||
|
if predictor >= 10:
|
||||||
|
data = apply_png_predictor(data, width, bits, colors)
|
||||||
|
|
||||||
|
# Create PIL Image
|
||||||
|
from PIL import Image
|
||||||
|
mode = 'RGB' if color_space == '/DeviceRGB' else 'L'
|
||||||
|
img = Image.frombytes(mode, (width, height), data)
|
||||||
|
|
||||||
|
if self.save_images_locally:
|
||||||
|
final_path = (image_dir / img_filename).with_suffix('.png')
|
||||||
|
img.save(final_path)
|
||||||
|
image_data = str(final_path)
|
||||||
|
else:
|
||||||
|
import io
|
||||||
|
img_byte_arr = io.BytesIO()
|
||||||
|
img.save(img_byte_arr, format='PNG')
|
||||||
|
image_data = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
|
||||||
|
|
||||||
|
success = True
|
||||||
|
image_format = 'png'
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"FlateDecode error: {str(e)}")
|
||||||
|
|
||||||
|
elif '/DCTDecode' in filters:
|
||||||
|
# JPEG image
|
||||||
|
try:
|
||||||
|
if self.save_images_locally:
|
||||||
|
final_path = (image_dir / img_filename).with_suffix('.jpg')
|
||||||
|
with open(final_path, 'wb') as f:
|
||||||
|
f.write(data)
|
||||||
|
image_data = str(final_path)
|
||||||
|
else:
|
||||||
|
image_data = base64.b64encode(data).decode('utf-8')
|
||||||
|
success = True
|
||||||
|
image_format = 'jpeg'
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"JPEG save error: {str(e)}")
|
||||||
|
|
||||||
|
elif '/CCITTFaxDecode' in filters:
|
||||||
|
try:
|
||||||
|
if data[:4] != b'II*\x00':
|
||||||
|
# Add TIFF header if missing
|
||||||
|
tiff_header = b'II*\x00\x08\x00\x00\x00\x0e\x00\x00\x01\x03\x00\x01\x00\x00\x00' + \
|
||||||
|
width.to_bytes(4, 'little') + \
|
||||||
|
b'\x01\x03\x00\x01\x00\x00\x00' + \
|
||||||
|
height.to_bytes(4, 'little') + \
|
||||||
|
b'\x01\x12\x00\x03\x00\x00\x00\x01\x00\x01\x00\x00\x01\x17\x00\x04\x00\x00\x00\x01\x00\x00\x00J\x01\x1B\x00\x05\x00\x00\x00\x01\x00\x00\x00R\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\x00'
|
||||||
|
data = tiff_header + data
|
||||||
|
|
||||||
|
if self.save_images_locally:
|
||||||
|
final_path = (image_dir / img_filename).with_suffix('.tiff')
|
||||||
|
with open(final_path, 'wb') as f:
|
||||||
|
f.write(data)
|
||||||
|
image_data = str(final_path)
|
||||||
|
else:
|
||||||
|
image_data = base64.b64encode(data).decode('utf-8')
|
||||||
|
success = True
|
||||||
|
image_format = 'tiff'
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"CCITT save error: {str(e)}")
|
||||||
|
|
||||||
|
elif '/JPXDecode' in filters:
|
||||||
|
# JPEG 2000
|
||||||
|
try:
|
||||||
|
if self.save_images_locally:
|
||||||
|
final_path = (image_dir / img_filename).with_suffix('.jp2')
|
||||||
|
with open(final_path, 'wb') as f:
|
||||||
|
f.write(data)
|
||||||
|
image_data = str(final_path)
|
||||||
|
else:
|
||||||
|
image_data = base64.b64encode(data).decode('utf-8')
|
||||||
|
success = True
|
||||||
|
image_format = 'jpeg2000'
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"JPEG2000 save error: {str(e)}")
|
||||||
|
|
||||||
|
if success and image_data:
|
||||||
|
image_info = {
|
||||||
|
"format": image_format,
|
||||||
|
"width": width,
|
||||||
|
"height": height,
|
||||||
|
"color_space": str(color_space),
|
||||||
|
"bits_per_component": xobj.get('/BitsPerComponent', 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.save_images_locally:
|
||||||
|
image_info["path"] = image_data
|
||||||
|
else:
|
||||||
|
image_info["data"] = image_data
|
||||||
|
|
||||||
|
images.append(image_info)
|
||||||
|
else:
|
||||||
|
# Fallback: Save raw data
|
||||||
|
if self.save_images_locally:
|
||||||
|
final_path = (image_dir / img_filename).with_suffix('.bin')
|
||||||
|
with open(final_path, 'wb') as f:
|
||||||
|
f.write(data)
|
||||||
|
logger.warning(f"Saved raw image data to {final_path}")
|
||||||
|
else:
|
||||||
|
image_data = base64.b64encode(data).decode('utf-8')
|
||||||
|
images.append({
|
||||||
|
"format": "bin",
|
||||||
|
"width": width,
|
||||||
|
"height": height,
|
||||||
|
"color_space": str(color_space),
|
||||||
|
"bits_per_component": xobj.get('/BitsPerComponent', 1),
|
||||||
|
"data": image_data
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing image: {str(e)}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Image extraction error: {str(e)}")
|
||||||
|
|
||||||
|
return images
|
||||||
|
|
||||||
|
def _extract_links(self, page) -> List[str]:
|
||||||
|
links = []
|
||||||
|
if '/Annots' in page:
|
||||||
|
try:
|
||||||
|
for annot in page['/Annots']:
|
||||||
|
a = annot.get_object()
|
||||||
|
if '/A' in a and '/URI' in a['/A']:
|
||||||
|
links.append(a['/A']['/URI'])
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Link error: {str(e)}")
|
||||||
|
return links
|
||||||
|
|
||||||
|
def _extract_metadata(self, pdf_path: Path, reader = None) -> PDFMetadata:
|
||||||
|
# Import inside method to allow dependency to be optional
|
||||||
|
if reader is None:
|
||||||
|
try:
|
||||||
|
from PyPDF2 import PdfReader
|
||||||
|
reader = PdfReader(pdf_path)
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||||
|
|
||||||
|
meta = reader.metadata or {}
|
||||||
|
created = self._parse_pdf_date(meta.get('/CreationDate', ''))
|
||||||
|
modified = self._parse_pdf_date(meta.get('/ModDate', ''))
|
||||||
|
|
||||||
|
return PDFMetadata(
|
||||||
|
title=meta.get('/Title'),
|
||||||
|
author=meta.get('/Author'),
|
||||||
|
producer=meta.get('/Producer'),
|
||||||
|
created=created,
|
||||||
|
modified=modified,
|
||||||
|
pages=len(reader.pages),
|
||||||
|
encrypted=reader.is_encrypted,
|
||||||
|
file_size=pdf_path.stat().st_size
|
||||||
|
)
|
||||||
|
|
||||||
|
def _parse_pdf_date(self, date_str: str) -> Optional[datetime]:
|
||||||
|
try:
|
||||||
|
match = re.match(r'D:(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})', date_str)
|
||||||
|
if not match:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return datetime(
|
||||||
|
year=int(match[1]),
|
||||||
|
month=int(match[2]),
|
||||||
|
day=int(match[3]),
|
||||||
|
hour=int(match[4]),
|
||||||
|
minute=int(match[5]),
|
||||||
|
second=int(match[6])
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Usage example
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Import PyPDF2 only when running the file directly
|
||||||
|
import PyPDF2
|
||||||
|
from PyPDF2 import PdfReader
|
||||||
|
except ImportError:
|
||||||
|
print("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
current_dir = Path(__file__).resolve().parent
|
||||||
|
pdf_path = f'{current_dir}/test.pdf'
|
||||||
|
|
||||||
|
strategy = NaivePDFProcessorStrategy()
|
||||||
|
result = strategy.process(Path(pdf_path))
|
||||||
|
|
||||||
|
# Convert to JSON
|
||||||
|
json_output = asdict(result)
|
||||||
|
print(json.dumps(json_output, indent=2, default=str))
|
||||||
|
|
||||||
|
with open(f'{current_dir}/test.html', 'w') as f:
|
||||||
|
for page in result.pages:
|
||||||
|
f.write(f'<h1>Page {page["page_number"]}</h1>')
|
||||||
|
f.write(page['html'])
|
||||||
|
with open(f'{current_dir}/test.md', 'w') as f:
|
||||||
|
for page in result.pages:
|
||||||
|
f.write(f'# Page {page["page_number"]}\n\n')
|
||||||
|
f.write(clean_pdf_text(page["page_number"], page['raw_text']))
|
||||||
|
f.write('\n\n')
|
||||||
350
crawl4ai/processors/pdf/utils.py
Normal file
350
crawl4ai/processors/pdf/utils.py
Normal file
@@ -0,0 +1,350 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
def apply_png_predictor(data, width, bits, color_channels):
|
||||||
|
"""Decode PNG predictor (PDF 1.5+ filter)"""
|
||||||
|
bytes_per_pixel = (bits * color_channels) // 8
|
||||||
|
if (bits * color_channels) % 8 != 0:
|
||||||
|
bytes_per_pixel += 1
|
||||||
|
|
||||||
|
stride = width * bytes_per_pixel
|
||||||
|
scanline_length = stride + 1 # +1 for filter byte
|
||||||
|
|
||||||
|
if len(data) % scanline_length != 0:
|
||||||
|
raise ValueError("Invalid scanline structure")
|
||||||
|
|
||||||
|
num_lines = len(data) // scanline_length
|
||||||
|
output = bytearray()
|
||||||
|
prev_line = b'\x00' * stride
|
||||||
|
|
||||||
|
for i in range(num_lines):
|
||||||
|
line = data[i*scanline_length:(i+1)*scanline_length]
|
||||||
|
filter_type = line[0]
|
||||||
|
filtered = line[1:]
|
||||||
|
|
||||||
|
if filter_type == 0: # None
|
||||||
|
decoded = filtered
|
||||||
|
elif filter_type == 1: # Sub
|
||||||
|
decoded = bytearray(filtered)
|
||||||
|
for j in range(bytes_per_pixel, len(decoded)):
|
||||||
|
decoded[j] = (decoded[j] + decoded[j - bytes_per_pixel]) % 256
|
||||||
|
elif filter_type == 2: # Up
|
||||||
|
decoded = bytearray([(filtered[j] + prev_line[j]) % 256
|
||||||
|
for j in range(len(filtered))])
|
||||||
|
elif filter_type == 3: # Average
|
||||||
|
decoded = bytearray(filtered)
|
||||||
|
for j in range(len(decoded)):
|
||||||
|
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||||
|
up = prev_line[j]
|
||||||
|
avg = (left + up) // 2
|
||||||
|
decoded[j] = (decoded[j] + avg) % 256
|
||||||
|
elif filter_type == 4: # Paeth
|
||||||
|
decoded = bytearray(filtered)
|
||||||
|
for j in range(len(decoded)):
|
||||||
|
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||||
|
up = prev_line[j]
|
||||||
|
up_left = prev_line[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||||
|
paeth = paeth_predictor(left, up, up_left)
|
||||||
|
decoded[j] = (decoded[j] + paeth) % 256
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported filter type: {filter_type}")
|
||||||
|
|
||||||
|
output.extend(decoded)
|
||||||
|
prev_line = decoded
|
||||||
|
|
||||||
|
return bytes(output)
|
||||||
|
|
||||||
|
def paeth_predictor(a, b, c):
|
||||||
|
p = a + b - c
|
||||||
|
pa = abs(p - a)
|
||||||
|
pb = abs(p - b)
|
||||||
|
pc = abs(p - c)
|
||||||
|
if pa <= pb and pa <= pc:
|
||||||
|
return a
|
||||||
|
elif pb <= pc:
|
||||||
|
return b
|
||||||
|
else:
|
||||||
|
return c
|
||||||
|
|
||||||
|
import re
|
||||||
|
import html
|
||||||
|
|
||||||
|
def clean_pdf_text_to_html(page_number, text):
|
||||||
|
# Decode Unicode escapes and handle surrogate pairs
|
||||||
|
try:
|
||||||
|
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||||
|
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||||
|
except Exception as e:
|
||||||
|
decoded = text # Fallback if decoding fails
|
||||||
|
|
||||||
|
article_title_detected = False
|
||||||
|
# decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||||
|
# decoded = re.sub(r'\.\n', '<|break|>', decoded)
|
||||||
|
lines = decoded.split('\n')
|
||||||
|
output = []
|
||||||
|
current_paragraph = []
|
||||||
|
in_header = False
|
||||||
|
email_pattern = re.compile(r'\{.*?\}')
|
||||||
|
affiliation_pattern = re.compile(r'^†')
|
||||||
|
quote_pattern = re.compile(r'^["“]')
|
||||||
|
author_pattern = re.compile(
|
||||||
|
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||||
|
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||||
|
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||||
|
)
|
||||||
|
|
||||||
|
def flush_paragraph():
|
||||||
|
if current_paragraph:
|
||||||
|
para = ' '.join(current_paragraph)
|
||||||
|
para = re.sub(r'\s+', ' ', para).strip()
|
||||||
|
if para:
|
||||||
|
# escaped_para = html.escape(para)
|
||||||
|
escaped_para = para
|
||||||
|
# escaped_para = re.sub(r'\.\n', '.\n\n', escaped_para)
|
||||||
|
# Split escaped_para by <|break|> to avoid HTML escaping
|
||||||
|
escaped_para = escaped_para.split('.\n\n')
|
||||||
|
# Wrap each part in <p> tag
|
||||||
|
escaped_para = [f'<p>{part}</p>' for part in escaped_para]
|
||||||
|
output.append(f'<div class="paragraph">{"".join(escaped_para)}</div><hr/>')
|
||||||
|
current_paragraph.clear()
|
||||||
|
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
line = line.strip()
|
||||||
|
|
||||||
|
# Handle empty lines
|
||||||
|
if not line:
|
||||||
|
flush_paragraph()
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect article title (first line with reasonable length)
|
||||||
|
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and len(lines) > 1:
|
||||||
|
flush_paragraph()
|
||||||
|
escaped_line = html.escape(line)
|
||||||
|
output.append(f'<h2>{escaped_line}</h2>')
|
||||||
|
article_title_detected = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect numbered headers like "2.1 Background"
|
||||||
|
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||||
|
if i > 0 and not lines[i-1].strip() and numbered_header:
|
||||||
|
flush_paragraph()
|
||||||
|
level = numbered_header.group(1).count('.') + 1
|
||||||
|
header_text = numbered_header.group(2)
|
||||||
|
md_level = min(level + 1, 6)
|
||||||
|
escaped_header = html.escape(header_text)
|
||||||
|
output.append(f'<h{md_level}>{escaped_header}</h{md_level}>')
|
||||||
|
in_header = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect authors
|
||||||
|
if page_number == 1 and author_pattern.match(line):
|
||||||
|
authors = re.sub(r'[†â€]', '', line)
|
||||||
|
authors = re.split(r', | and ', authors)
|
||||||
|
formatted_authors = []
|
||||||
|
for author in authors:
|
||||||
|
if author.strip():
|
||||||
|
parts = [p for p in author.strip().split() if p]
|
||||||
|
formatted = ' '.join(parts)
|
||||||
|
escaped_author = html.escape(formatted)
|
||||||
|
formatted_authors.append(f'<strong>{escaped_author}</strong>')
|
||||||
|
|
||||||
|
if len(formatted_authors) > 1:
|
||||||
|
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||||
|
else:
|
||||||
|
joined = formatted_authors[0]
|
||||||
|
|
||||||
|
output.append(f'<p>{joined}</p>')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect affiliation
|
||||||
|
if affiliation_pattern.match(line):
|
||||||
|
escaped_line = html.escape(line)
|
||||||
|
output.append(f'<p><em>{escaped_line}</em></p>')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect emails
|
||||||
|
if email_pattern.match(line):
|
||||||
|
escaped_line = html.escape(line)
|
||||||
|
output.append(f'<p><code>{escaped_line}</code></p>')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect section headers
|
||||||
|
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||||
|
flush_paragraph()
|
||||||
|
escaped_line = html.escape(line)
|
||||||
|
output.append(f'<h2 class="section-header"><em>{escaped_line}</em></h2>')
|
||||||
|
in_header = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle quotes
|
||||||
|
if quote_pattern.match(line):
|
||||||
|
flush_paragraph()
|
||||||
|
escaped_line = html.escape(line)
|
||||||
|
output.append(f'<blockquote><p>{escaped_line}</p></blockquote>')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle hyphenated words
|
||||||
|
if line.endswith('-'):
|
||||||
|
current_paragraph.append(line[:-1].strip())
|
||||||
|
else:
|
||||||
|
current_paragraph.append(line)
|
||||||
|
|
||||||
|
# Handle paragraph breaks after headers
|
||||||
|
if in_header and not line.endswith(('.', '!', '?')):
|
||||||
|
flush_paragraph()
|
||||||
|
in_header = False
|
||||||
|
|
||||||
|
flush_paragraph()
|
||||||
|
|
||||||
|
# Post-process HTML
|
||||||
|
html_output = '\n'.join(output)
|
||||||
|
|
||||||
|
# Fix common citation patterns
|
||||||
|
html_output = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'<cite>\1</cite>', html_output)
|
||||||
|
|
||||||
|
# Fix escaped characters
|
||||||
|
html_output = html_output.replace('\\ud835', '').replace('\\u2020', '†')
|
||||||
|
|
||||||
|
# Remove leftover hyphens and fix spacing
|
||||||
|
html_output = re.sub(r'\s+-\s+', '', html_output)
|
||||||
|
html_output = re.sub(r'\s+([.,!?)])', r'\1', html_output)
|
||||||
|
|
||||||
|
return html_output
|
||||||
|
|
||||||
|
def clean_pdf_text(page_number, text):
|
||||||
|
# Decode Unicode escapes and handle surrogate pairs
|
||||||
|
try:
|
||||||
|
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||||
|
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||||
|
except Exception as e:
|
||||||
|
decoded = text # Fallback if decoding fails
|
||||||
|
|
||||||
|
article_title_detected = False
|
||||||
|
decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||||
|
lines = decoded.split('\n')
|
||||||
|
output = []
|
||||||
|
current_paragraph = []
|
||||||
|
in_header = False
|
||||||
|
email_pattern = re.compile(r'\{.*?\}')
|
||||||
|
affiliation_pattern = re.compile(r'^†')
|
||||||
|
quote_pattern = re.compile(r'^["“]')
|
||||||
|
author_pattern = re.compile(
|
||||||
|
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||||
|
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||||
|
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||||
|
)
|
||||||
|
|
||||||
|
def flush_paragraph():
|
||||||
|
if current_paragraph:
|
||||||
|
para = ' '.join(current_paragraph)
|
||||||
|
para = re.sub(r'\s+', ' ', para).strip()
|
||||||
|
if para:
|
||||||
|
output.append(para)
|
||||||
|
current_paragraph.clear()
|
||||||
|
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
line = line.strip()
|
||||||
|
|
||||||
|
# Handle special patterns
|
||||||
|
if not line:
|
||||||
|
flush_paragraph()
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect headline (first line, reasonable length, surrounded by empty lines)
|
||||||
|
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and (len(lines) > 1):
|
||||||
|
flush_paragraph()
|
||||||
|
output.append(f'## {line}')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect paragraph breaks for ALL paragraphs
|
||||||
|
if not line and current_paragraph:
|
||||||
|
flush_paragraph()
|
||||||
|
output.append('') # Add empty line between paragraphs
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect numbered headers like "2.1 Background"
|
||||||
|
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||||
|
if not lines[i-1].strip() and numbered_header:
|
||||||
|
flush_paragraph()
|
||||||
|
level = numbered_header.group(1).count('.') + 1 # Convert 2.1 → level 2
|
||||||
|
header_text = numbered_header.group(2)
|
||||||
|
# Never go beyond ### for subsections
|
||||||
|
md_level = min(level + 1, 6) # 1 → ##, 2 → ###, 3 → #### etc
|
||||||
|
output.append(f'{"#" * md_level} {header_text}')
|
||||||
|
in_header = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
# Detect authors
|
||||||
|
if page_number == 1 and author_pattern.match(line):
|
||||||
|
# Clean and format author names
|
||||||
|
authors = re.sub(r'[†â€]', '', line) # Remove affiliation markers
|
||||||
|
authors = re.split(r', | and ', authors)
|
||||||
|
formatted_authors = []
|
||||||
|
for author in authors:
|
||||||
|
if author.strip():
|
||||||
|
# Handle "First Last" formatting
|
||||||
|
parts = [p for p in author.strip().split() if p]
|
||||||
|
formatted = ' '.join(parts)
|
||||||
|
formatted_authors.append(f'**{formatted}**')
|
||||||
|
|
||||||
|
# Join with commas and "and"
|
||||||
|
if len(formatted_authors) > 1:
|
||||||
|
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||||
|
else:
|
||||||
|
joined = formatted_authors[0]
|
||||||
|
|
||||||
|
output.append(joined)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect affiliation
|
||||||
|
if affiliation_pattern.match(line):
|
||||||
|
output.append(f'*{line}*')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect emails
|
||||||
|
if email_pattern.match(line):
|
||||||
|
output.append(f'`{line}`')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect section headers
|
||||||
|
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||||
|
flush_paragraph()
|
||||||
|
output.append(f'_[{line}]_')
|
||||||
|
in_header = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
# Handle quotes
|
||||||
|
if quote_pattern.match(line):
|
||||||
|
flush_paragraph()
|
||||||
|
output.append(f'> {line}')
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle hyphenated words
|
||||||
|
if line.endswith('-'):
|
||||||
|
current_paragraph.append(line[:-1].strip())
|
||||||
|
else:
|
||||||
|
current_paragraph.append(line)
|
||||||
|
|
||||||
|
# Handle paragraph breaks after headers
|
||||||
|
if in_header and not line.endswith(('.', '!', '?')):
|
||||||
|
flush_paragraph()
|
||||||
|
in_header = False
|
||||||
|
|
||||||
|
flush_paragraph()
|
||||||
|
|
||||||
|
# Post-processing
|
||||||
|
markdown = '\n\n'.join(output)
|
||||||
|
|
||||||
|
# Fix common citation patterns
|
||||||
|
markdown = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'[\1]', markdown)
|
||||||
|
|
||||||
|
# Fix escaped characters
|
||||||
|
markdown = markdown.replace('\\ud835', '').replace('\\u2020', '†')
|
||||||
|
|
||||||
|
# Remove leftover hyphens and fix spacing
|
||||||
|
markdown = re.sub(r'\s+-\s+', '', markdown) # Join hyphenated words
|
||||||
|
markdown = re.sub(r'\s+([.,!?)])', r'\1', markdown) # Fix punctuation spacing
|
||||||
|
|
||||||
|
|
||||||
|
return markdown
|
||||||
@@ -198,7 +198,804 @@ Avoid Common Mistakes:
|
|||||||
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
|
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
|
||||||
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
|
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
|
||||||
- Do not miss closing </blocks> tag at the end of the JSON output.
|
- Do not miss closing </blocks> tag at the end of the JSON output.
|
||||||
- Do not generate the Python coee show me how to do the task, this is your task to extract the information and return it in JSON format.
|
- Do not generate the Python code show me how to do the task, this is your task to extract the information and return it in JSON format.
|
||||||
|
|
||||||
Result
|
Result
|
||||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
|
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
|
||||||
|
|
||||||
|
|
||||||
|
PROMPT_FILTER_CONTENT = """Your task is to filter and convert HTML content into clean, focused markdown that's optimized for use with LLMs and information retrieval systems.
|
||||||
|
|
||||||
|
TASK DETAILS:
|
||||||
|
1. Content Selection
|
||||||
|
- DO: Keep essential information, main content, key details
|
||||||
|
- DO: Preserve hierarchical structure using markdown headers
|
||||||
|
- DO: Keep code blocks, tables, key lists
|
||||||
|
- DON'T: Include navigation menus, ads, footers, cookie notices
|
||||||
|
- DON'T: Keep social media widgets, sidebars, related content
|
||||||
|
|
||||||
|
2. Content Transformation
|
||||||
|
- DO: Use proper markdown syntax (#, ##, **, `, etc)
|
||||||
|
- DO: Convert tables to markdown tables
|
||||||
|
- DO: Preserve code formatting with ```language blocks
|
||||||
|
- DO: Maintain link texts but remove tracking parameters
|
||||||
|
- DON'T: Include HTML tags in output
|
||||||
|
- DON'T: Keep class names, ids, or other HTML attributes
|
||||||
|
|
||||||
|
3. Content Organization
|
||||||
|
- DO: Maintain logical flow of information
|
||||||
|
- DO: Group related content under appropriate headers
|
||||||
|
- DO: Use consistent header levels
|
||||||
|
- DON'T: Fragment related content
|
||||||
|
- DON'T: Duplicate information
|
||||||
|
|
||||||
|
IMPORTANT: If user specific instruction is provided, ignore above guideline and prioritize those requirements over these general guidelines.
|
||||||
|
|
||||||
|
OUTPUT FORMAT:
|
||||||
|
Wrap your response in <content> tags. Use proper markdown throughout.
|
||||||
|
<content>
|
||||||
|
[Your markdown content here]
|
||||||
|
</content>
|
||||||
|
|
||||||
|
Begin filtering now.
|
||||||
|
|
||||||
|
--------------------------------------------
|
||||||
|
|
||||||
|
<|HTML_CONTENT_START|>
|
||||||
|
{HTML}
|
||||||
|
<|HTML_CONTENT_END|>
|
||||||
|
|
||||||
|
<|USER_INSTRUCTION_START|>
|
||||||
|
{REQUEST}
|
||||||
|
<|USER_INSTRUCTION_END|>
|
||||||
|
"""
|
||||||
|
|
||||||
|
JSON_SCHEMA_BUILDER= """
|
||||||
|
# HTML Schema Generation Instructions
|
||||||
|
You are a specialized model designed to analyze HTML patterns and generate extraction schemas. Your primary job is to create structured JSON schemas that can be used to extract data from HTML in a consistent and reliable way. When presented with HTML content, you must analyze its structure and generate a schema that captures all relevant data points.
|
||||||
|
|
||||||
|
## Your Core Responsibilities:
|
||||||
|
1. Analyze HTML structure to identify repeating patterns and important data points
|
||||||
|
2. Generate valid JSON schemas following the specified format
|
||||||
|
3. Create appropriate selectors that will work reliably for data extraction
|
||||||
|
4. Name fields meaningfully based on their content and purpose
|
||||||
|
5. Handle both specific user requests and autonomous pattern detection
|
||||||
|
|
||||||
|
## Available Schema Types You Can Generate:
|
||||||
|
|
||||||
|
<schema_types>
|
||||||
|
1. Basic Single-Level Schema
|
||||||
|
- Use for simple, flat data structures
|
||||||
|
- Example: Product cards, user profiles
|
||||||
|
- Direct field extractions
|
||||||
|
|
||||||
|
2. Nested Object Schema
|
||||||
|
- Use for hierarchical data
|
||||||
|
- Example: Articles with author details
|
||||||
|
- Contains objects within objects
|
||||||
|
|
||||||
|
3. List Schema
|
||||||
|
- Use for repeating elements
|
||||||
|
- Example: Comment sections, product lists
|
||||||
|
- Handles arrays of similar items
|
||||||
|
|
||||||
|
4. Complex Nested Lists
|
||||||
|
- Use for multi-level data
|
||||||
|
- Example: Categories with subcategories
|
||||||
|
- Multiple levels of nesting
|
||||||
|
|
||||||
|
5. Transformation Schema
|
||||||
|
- Use for data requiring processing
|
||||||
|
- Supports regex and text transformations
|
||||||
|
- Special attribute handling
|
||||||
|
</schema_types>
|
||||||
|
|
||||||
|
<schema_structure>
|
||||||
|
Your output must always be a JSON object with this structure:
|
||||||
|
{
|
||||||
|
"name": "Descriptive name of the pattern",
|
||||||
|
"baseSelector": "CSS selector for the repeating element",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "field_name",
|
||||||
|
"selector": "CSS selector",
|
||||||
|
"type": "text|attribute|nested|list|regex",
|
||||||
|
"attribute": "attribute_name", // Optional
|
||||||
|
"transform": "transformation_type", // Optional
|
||||||
|
"pattern": "regex_pattern", // Optional
|
||||||
|
"fields": [] // For nested/list types
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
</schema_structure>
|
||||||
|
|
||||||
|
<type_definitions>
|
||||||
|
Available field types:
|
||||||
|
- text: Direct text extraction
|
||||||
|
- attribute: HTML attribute extraction
|
||||||
|
- nested: Object containing other fields
|
||||||
|
- list: Array of similar items
|
||||||
|
- regex: Pattern-based extraction
|
||||||
|
</type_definitions>
|
||||||
|
|
||||||
|
<behavior_rules>
|
||||||
|
1. When given a specific query:
|
||||||
|
- Focus on extracting requested data points
|
||||||
|
- Use most specific selectors possible
|
||||||
|
- Include all fields mentioned in the query
|
||||||
|
|
||||||
|
2. When no query is provided:
|
||||||
|
- Identify main content areas
|
||||||
|
- Extract all meaningful data points
|
||||||
|
- Use semantic structure to determine importance
|
||||||
|
- Include prices, dates, titles, and other common data types
|
||||||
|
|
||||||
|
3. Always:
|
||||||
|
- Use reliable CSS selectors
|
||||||
|
- Handle dynamic class names appropriately
|
||||||
|
- Create descriptive field names
|
||||||
|
- Follow consistent naming conventions
|
||||||
|
</behavior_rules>
|
||||||
|
|
||||||
|
<examples>
|
||||||
|
1. Basic Product Card Example:
|
||||||
|
<html>
|
||||||
|
<div class="product-card" data-cat-id="electronics" data-subcat-id="laptops">
|
||||||
|
<h2 class="product-title">Gaming Laptop</h2>
|
||||||
|
<span class="price">$999.99</span>
|
||||||
|
<img src="laptop.jpg" alt="Gaming Laptop">
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Product Cards",
|
||||||
|
"baseSelector": ".product-card",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_cat_id", "type": "attribute", "attribute": "data-cat-id"},
|
||||||
|
{"name": "data_subcat_id", "type": "attribute", "attribute": "data-subcat-id"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": ".product-title",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "price",
|
||||||
|
"selector": ".price",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "image_url",
|
||||||
|
"selector": "img",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "src"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
2. Article with Author Details Example:
|
||||||
|
<html>
|
||||||
|
<article>
|
||||||
|
<h1>The Future of AI</h1>
|
||||||
|
<div class="author-info">
|
||||||
|
<span class="author-name">Dr. Smith</span>
|
||||||
|
<img src="author.jpg" alt="Dr. Smith">
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Article Details",
|
||||||
|
"baseSelector": "article",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": "h1",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "author",
|
||||||
|
"type": "nested",
|
||||||
|
"selector": ".author-info",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "name",
|
||||||
|
"selector": ".author-name",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "avatar",
|
||||||
|
"selector": "img",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "src"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
3. Comments Section Example:
|
||||||
|
<html>
|
||||||
|
<div class="comments-container">
|
||||||
|
<div class="comment" data-user-id="123">
|
||||||
|
<div class="user-name">John123</div>
|
||||||
|
<p class="comment-text">Great article!</p>
|
||||||
|
</div>
|
||||||
|
<div class="comment" data-user-id="456">
|
||||||
|
<div class="user-name">Alice456</div>
|
||||||
|
<p class="comment-text">Thanks for sharing.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Comment Section",
|
||||||
|
"baseSelector": ".comments-container",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_user_id", "type": "attribute", "attribute": "data-user-id"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "comments",
|
||||||
|
"type": "list",
|
||||||
|
"selector": ".comment",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "user",
|
||||||
|
"selector": ".user-name",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "content",
|
||||||
|
"selector": ".comment-text",
|
||||||
|
"type": "text"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
4. E-commerce Categories Example:
|
||||||
|
<html>
|
||||||
|
<div class="category-section" data-category="electronics">
|
||||||
|
<h2>Electronics</h2>
|
||||||
|
<div class="subcategory">
|
||||||
|
<h3>Laptops</h3>
|
||||||
|
<div class="product">
|
||||||
|
<span class="product-name">MacBook Pro</span>
|
||||||
|
<span class="price">$1299</span>
|
||||||
|
</div>
|
||||||
|
<div class="product">
|
||||||
|
<span class="product-name">Dell XPS</span>
|
||||||
|
<span class="price">$999</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "E-commerce Categories",
|
||||||
|
"baseSelector": ".category-section",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_category", "type": "attribute", "attribute": "data-category"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "category_name",
|
||||||
|
"selector": "h2",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "subcategories",
|
||||||
|
"type": "nested_list",
|
||||||
|
"selector": ".subcategory",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "name",
|
||||||
|
"selector": "h3",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "products",
|
||||||
|
"type": "list",
|
||||||
|
"selector": ".product",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "name",
|
||||||
|
"selector": ".product-name",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "price",
|
||||||
|
"selector": ".price",
|
||||||
|
"type": "text"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
5. Job Listings with Transformations Example:
|
||||||
|
<html>
|
||||||
|
<div class="job-post">
|
||||||
|
<h3 class="job-title">Senior Developer</h3>
|
||||||
|
<span class="salary-text">Salary: $120,000/year</span>
|
||||||
|
<span class="location"> New York, NY </span>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Job Listings",
|
||||||
|
"baseSelector": ".job-post",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": ".job-title",
|
||||||
|
"type": "text",
|
||||||
|
"transform": "uppercase"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "salary",
|
||||||
|
"selector": ".salary-text",
|
||||||
|
"type": "regex",
|
||||||
|
"pattern": "\\$([\\d,]+)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "location",
|
||||||
|
"selector": ".location",
|
||||||
|
"type": "text",
|
||||||
|
"transform": "strip"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
6. Skyscanner Place Card Example:
|
||||||
|
<html>
|
||||||
|
<div class="PlaceCard_descriptionContainer__M2NjN" data-testid="description-container">
|
||||||
|
<div class="PlaceCard_nameContainer__ZjZmY" tabindex="0" role="link">
|
||||||
|
<div class="PlaceCard_nameContent__ODUwZ">
|
||||||
|
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY">Doha</span>
|
||||||
|
</div>
|
||||||
|
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY PlaceCard_subName__NTVkY">Qatar</span>
|
||||||
|
</div>
|
||||||
|
<span class="PlaceCard_advertLabel__YTM0N">Sunny days and the warmest welcome awaits</span>
|
||||||
|
<a class="BpkLink_bpk-link__MmQwY PlaceCard_descriptionLink__NzYwN" href="/flights/del/doha/" data-testid="flights-link">
|
||||||
|
<div class="PriceDescription_container__NjEzM">
|
||||||
|
<span class="BpkText_bpk-text--heading-5__MTRjZ">₹17,559</span>
|
||||||
|
</div>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Skyscanner Place Cards",
|
||||||
|
"baseSelector": "div[class^='PlaceCard_descriptionContainer__']",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_testid", "type": "attribute", "attribute": "data-testid"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "city_name",
|
||||||
|
"selector": "div[class^='PlaceCard_nameContent__'] .BpkText_bpk-text--heading-4__",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "country_name",
|
||||||
|
"selector": "span[class*='PlaceCard_subName__']",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "description",
|
||||||
|
"selector": "span[class*='PlaceCard_advertLabel__']",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flight_price",
|
||||||
|
"selector": "a[data-testid='flights-link'] .BpkText_bpk-text--heading-5__",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flight_url",
|
||||||
|
"selector": "a[data-testid='flights-link']",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "href"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
</examples>
|
||||||
|
|
||||||
|
|
||||||
|
<output_requirements>
|
||||||
|
Your output must:
|
||||||
|
1. Be valid JSON only
|
||||||
|
2. Include no explanatory text
|
||||||
|
3. Follow the exact schema structure provided
|
||||||
|
4. Use appropriate field types
|
||||||
|
5. Include all required fields
|
||||||
|
6. Use valid CSS selectors
|
||||||
|
</output_requirements>
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
JSON_SCHEMA_BUILDER_XPATH = """
|
||||||
|
# HTML Schema Generation Instructions
|
||||||
|
You are a specialized model designed to analyze HTML patterns and generate extraction schemas. Your primary job is to create structured JSON schemas that can be used to extract data from HTML in a consistent and reliable way. When presented with HTML content, you must analyze its structure and generate a schema that captures all relevant data points.
|
||||||
|
|
||||||
|
## Your Core Responsibilities:
|
||||||
|
1. Analyze HTML structure to identify repeating patterns and important data points
|
||||||
|
2. Generate valid JSON schemas following the specified format
|
||||||
|
3. Create appropriate XPath selectors that will work reliably for data extraction
|
||||||
|
4. Name fields meaningfully based on their content and purpose
|
||||||
|
5. Handle both specific user requests and autonomous pattern detection
|
||||||
|
|
||||||
|
## Available Schema Types You Can Generate:
|
||||||
|
|
||||||
|
<schema_types>
|
||||||
|
1. Basic Single-Level Schema
|
||||||
|
- Use for simple, flat data structures
|
||||||
|
- Example: Product cards, user profiles
|
||||||
|
- Direct field extractions
|
||||||
|
|
||||||
|
2. Nested Object Schema
|
||||||
|
- Use for hierarchical data
|
||||||
|
- Example: Articles with author details
|
||||||
|
- Contains objects within objects
|
||||||
|
|
||||||
|
3. List Schema
|
||||||
|
- Use for repeating elements
|
||||||
|
- Example: Comment sections, product lists
|
||||||
|
- Handles arrays of similar items
|
||||||
|
|
||||||
|
4. Complex Nested Lists
|
||||||
|
- Use for multi-level data
|
||||||
|
- Example: Categories with subcategories
|
||||||
|
- Multiple levels of nesting
|
||||||
|
|
||||||
|
5. Transformation Schema
|
||||||
|
- Use for data requiring processing
|
||||||
|
- Supports regex and text transformations
|
||||||
|
- Special attribute handling
|
||||||
|
</schema_types>
|
||||||
|
|
||||||
|
<schema_structure>
|
||||||
|
Your output must always be a JSON object with this structure:
|
||||||
|
{
|
||||||
|
"name": "Descriptive name of the pattern",
|
||||||
|
"baseSelector": "XPath selector for the repeating element",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "field_name",
|
||||||
|
"selector": "XPath selector",
|
||||||
|
"type": "text|attribute|nested|list|regex",
|
||||||
|
"attribute": "attribute_name", // Optional
|
||||||
|
"transform": "transformation_type", // Optional
|
||||||
|
"pattern": "regex_pattern", // Optional
|
||||||
|
"fields": [] // For nested/list types
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
</schema_structure>
|
||||||
|
|
||||||
|
<type_definitions>
|
||||||
|
Available field types:
|
||||||
|
- text: Direct text extraction
|
||||||
|
- attribute: HTML attribute extraction
|
||||||
|
- nested: Object containing other fields
|
||||||
|
- list: Array of similar items
|
||||||
|
- regex: Pattern-based extraction
|
||||||
|
</type_definitions>
|
||||||
|
|
||||||
|
<behavior_rules>
|
||||||
|
1. When given a specific query:
|
||||||
|
- Focus on extracting requested data points
|
||||||
|
- Use most specific selectors possible
|
||||||
|
- Include all fields mentioned in the query
|
||||||
|
|
||||||
|
2. When no query is provided:
|
||||||
|
- Identify main content areas
|
||||||
|
- Extract all meaningful data points
|
||||||
|
- Use semantic structure to determine importance
|
||||||
|
- Include prices, dates, titles, and other common data types
|
||||||
|
|
||||||
|
3. Always:
|
||||||
|
- Use reliable XPath selectors
|
||||||
|
- Handle dynamic element IDs appropriately
|
||||||
|
- Create descriptive field names
|
||||||
|
- Follow consistent naming conventions
|
||||||
|
</behavior_rules>
|
||||||
|
|
||||||
|
<examples>
|
||||||
|
1. Basic Product Card Example:
|
||||||
|
<html>
|
||||||
|
<div class="product-card" data-cat-id="electronics" data-subcat-id="laptops">
|
||||||
|
<h2 class="product-title">Gaming Laptop</h2>
|
||||||
|
<span class="price">$999.99</span>
|
||||||
|
<img src="laptop.jpg" alt="Gaming Laptop">
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Product Cards",
|
||||||
|
"baseSelector": "//div[@class='product-card']",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_cat_id", "type": "attribute", "attribute": "data-cat-id"},
|
||||||
|
{"name": "data_subcat_id", "type": "attribute", "attribute": "data-subcat-id"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": ".//h2[@class='product-title']",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "price",
|
||||||
|
"selector": ".//span[@class='price']",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "image_url",
|
||||||
|
"selector": ".//img",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "src"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
2. Article with Author Details Example:
|
||||||
|
<html>
|
||||||
|
<article>
|
||||||
|
<h1>The Future of AI</h1>
|
||||||
|
<div class="author-info">
|
||||||
|
<span class="author-name">Dr. Smith</span>
|
||||||
|
<img src="author.jpg" alt="Dr. Smith">
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Article Details",
|
||||||
|
"baseSelector": "//article",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": ".//h1",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "author",
|
||||||
|
"type": "nested",
|
||||||
|
"selector": ".//div[@class='author-info']",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "name",
|
||||||
|
"selector": ".//span[@class='author-name']",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "avatar",
|
||||||
|
"selector": ".//img",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "src"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
3. Comments Section Example:
|
||||||
|
<html>
|
||||||
|
<div class="comments-container">
|
||||||
|
<div class="comment" data-user-id="123">
|
||||||
|
<div class="user-name">John123</div>
|
||||||
|
<p class="comment-text">Great article!</p>
|
||||||
|
</div>
|
||||||
|
<div class="comment" data-user-id="456">
|
||||||
|
<div class="user-name">Alice456</div>
|
||||||
|
<p class="comment-text">Thanks for sharing.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Comment Section",
|
||||||
|
"baseSelector": "//div[@class='comments-container']",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "comments",
|
||||||
|
"type": "list",
|
||||||
|
"selector": ".//div[@class='comment']",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_user_id", "type": "attribute", "attribute": "data-user-id"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "user",
|
||||||
|
"selector": ".//div[@class='user-name']",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "content",
|
||||||
|
"selector": ".//p[@class='comment-text']",
|
||||||
|
"type": "text"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
4. E-commerce Categories Example:
|
||||||
|
<html>
|
||||||
|
<div class="category-section" data-category="electronics">
|
||||||
|
<h2>Electronics</h2>
|
||||||
|
<div class="subcategory">
|
||||||
|
<h3>Laptops</h3>
|
||||||
|
<div class="product">
|
||||||
|
<span class="product-name">MacBook Pro</span>
|
||||||
|
<span class="price">$1299</span>
|
||||||
|
</div>
|
||||||
|
<div class="product">
|
||||||
|
<span class="product-name">Dell XPS</span>
|
||||||
|
<span class="price">$999</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "E-commerce Categories",
|
||||||
|
"baseSelector": "//div[@class='category-section']",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_category", "type": "attribute", "attribute": "data-category"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "category_name",
|
||||||
|
"selector": ".//h2",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "subcategories",
|
||||||
|
"type": "nested_list",
|
||||||
|
"selector": ".//div[@class='subcategory']",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "name",
|
||||||
|
"selector": ".//h3",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "products",
|
||||||
|
"type": "list",
|
||||||
|
"selector": ".//div[@class='product']",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "name",
|
||||||
|
"selector": ".//span[@class='product-name']",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "price",
|
||||||
|
"selector": ".//span[@class='price']",
|
||||||
|
"type": "text"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
5. Job Listings with Transformations Example:
|
||||||
|
<html>
|
||||||
|
<div class="job-post">
|
||||||
|
<h3 class="job-title">Senior Developer</h3>
|
||||||
|
<span class="salary-text">Salary: $120,000/year</span>
|
||||||
|
<span class="location"> New York, NY </span>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Job Listings",
|
||||||
|
"baseSelector": "//div[@class='job-post']",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": ".//h3[@class='job-title']",
|
||||||
|
"type": "text",
|
||||||
|
"transform": "uppercase"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "salary",
|
||||||
|
"selector": ".//span[@class='salary-text']",
|
||||||
|
"type": "regex",
|
||||||
|
"pattern": "\\$([\\d,]+)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "location",
|
||||||
|
"selector": ".//span[@class='location']",
|
||||||
|
"type": "text",
|
||||||
|
"transform": "strip"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
6. Skyscanner Place Card Example:
|
||||||
|
<html>
|
||||||
|
<div class="PlaceCard_descriptionContainer__M2NjN" data-testid="description-container">
|
||||||
|
<div class="PlaceCard_nameContainer__ZjZmY" tabindex="0" role="link">
|
||||||
|
<div class="PlaceCard_nameContent__ODUwZ">
|
||||||
|
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY">Doha</span>
|
||||||
|
</div>
|
||||||
|
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY PlaceCard_subName__NTVkY">Qatar</span>
|
||||||
|
</div>
|
||||||
|
<span class="PlaceCard_advertLabel__YTM0N">Sunny days and the warmest welcome awaits</span>
|
||||||
|
<a class="BpkLink_bpk-link__MmQwY PlaceCard_descriptionLink__NzYwN" href="/flights/del/doha/" data-testid="flights-link">
|
||||||
|
<div class="PriceDescription_container__NjEzM">
|
||||||
|
<span class="BpkText_bpk-text--heading-5__MTRjZ">₹17,559</span>
|
||||||
|
</div>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
Generated Schema:
|
||||||
|
{
|
||||||
|
"name": "Skyscanner Place Cards",
|
||||||
|
"baseSelector": "//div[contains(@class, 'PlaceCard_descriptionContainer__')]",
|
||||||
|
"baseFields": [
|
||||||
|
{"name": "data_testid", "type": "attribute", "attribute": "data-testid"}
|
||||||
|
],
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "city_name",
|
||||||
|
"selector": ".//div[contains(@class, 'PlaceCard_nameContent__')]//span[contains(@class, 'BpkText_bpk-text--heading-4__')]",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "country_name",
|
||||||
|
"selector": ".//span[contains(@class, 'PlaceCard_subName__')]",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "description",
|
||||||
|
"selector": ".//span[contains(@class, 'PlaceCard_advertLabel__')]",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flight_price",
|
||||||
|
"selector": ".//a[@data-testid='flights-link']//span[contains(@class, 'BpkText_bpk-text--heading-5__')]",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flight_url",
|
||||||
|
"selector": ".//a[@data-testid='flights-link']",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "href"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
</examples>
|
||||||
|
|
||||||
|
<output_requirements>
|
||||||
|
Your output must:
|
||||||
|
1. Be valid JSON only
|
||||||
|
2. Include no explanatory text
|
||||||
|
3. Follow the exact schema structure provided
|
||||||
|
4. Use appropriate field types
|
||||||
|
5. Include all required fields
|
||||||
|
6. Use valid XPath selectors
|
||||||
|
</output_requirements>
|
||||||
|
"""
|
||||||
44
crawl4ai/proxy_strategy.py
Normal file
44
crawl4ai/proxy_strategy.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
from typing import List, Dict, Optional
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from itertools import cycle
|
||||||
|
|
||||||
|
from crawl4ai.configs import ProxyConfig
|
||||||
|
|
||||||
|
class ProxyRotationStrategy(ABC):
|
||||||
|
"""Base abstract class for proxy rotation strategies"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_next_proxy(self) -> Optional[Dict]:
|
||||||
|
"""Get next proxy configuration from the strategy"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_proxies(self, proxies: List[Dict]):
|
||||||
|
"""Add proxy configurations to the strategy"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
class RoundRobinProxyStrategy:
|
||||||
|
"""Simple round-robin proxy rotation strategy using ProxyConfig objects"""
|
||||||
|
|
||||||
|
def __init__(self, proxies: List[ProxyConfig] = None):
|
||||||
|
"""
|
||||||
|
Initialize with optional list of proxy configurations
|
||||||
|
|
||||||
|
Args:
|
||||||
|
proxies: List of ProxyConfig objects
|
||||||
|
"""
|
||||||
|
self._proxies = []
|
||||||
|
self._proxy_cycle = None
|
||||||
|
if proxies:
|
||||||
|
self.add_proxies(proxies)
|
||||||
|
|
||||||
|
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||||
|
"""Add new proxies to the rotation pool"""
|
||||||
|
self._proxies.extend(proxies)
|
||||||
|
self._proxy_cycle = cycle(self._proxies)
|
||||||
|
|
||||||
|
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||||
|
"""Get next proxy in round-robin fashion"""
|
||||||
|
if not self._proxy_cycle:
|
||||||
|
return None
|
||||||
|
return next(self._proxy_cycle)
|
||||||
@@ -26,11 +26,12 @@ class SSLCertificate:
|
|||||||
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
|
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
|
||||||
export_as_text() -> str: Export the certificate as text format.
|
export_as_text() -> str: Export the certificate as text format.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, cert_info: Dict[str, Any]):
|
def __init__(self, cert_info: Dict[str, Any]):
|
||||||
self._cert_info = self._decode_cert_data(cert_info)
|
self._cert_info = self._decode_cert_data(cert_info)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']:
|
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||||
"""
|
"""
|
||||||
Create SSLCertificate instance from a URL.
|
Create SSLCertificate instance from a URL.
|
||||||
|
|
||||||
@@ -43,14 +44,16 @@ class SSLCertificate:
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
hostname = urlparse(url).netloc
|
hostname = urlparse(url).netloc
|
||||||
if ':' in hostname:
|
if ":" in hostname:
|
||||||
hostname = hostname.split(':')[0]
|
hostname = hostname.split(":")[0]
|
||||||
|
|
||||||
context = ssl.create_default_context()
|
context = ssl.create_default_context()
|
||||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||||
cert_binary = ssock.getpeercert(binary_form=True)
|
cert_binary = ssock.getpeercert(binary_form=True)
|
||||||
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert_binary)
|
x509 = OpenSSL.crypto.load_certificate(
|
||||||
|
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||||
|
)
|
||||||
|
|
||||||
cert_info = {
|
cert_info = {
|
||||||
"subject": dict(x509.get_subject().get_components()),
|
"subject": dict(x509.get_subject().get_components()),
|
||||||
@@ -61,32 +64,33 @@ class SSLCertificate:
|
|||||||
"not_after": x509.get_notAfter(),
|
"not_after": x509.get_notAfter(),
|
||||||
"fingerprint": x509.digest("sha256").hex(),
|
"fingerprint": x509.digest("sha256").hex(),
|
||||||
"signature_algorithm": x509.get_signature_algorithm(),
|
"signature_algorithm": x509.get_signature_algorithm(),
|
||||||
"raw_cert": base64.b64encode(cert_binary)
|
"raw_cert": base64.b64encode(cert_binary),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add extensions
|
# Add extensions
|
||||||
extensions = []
|
extensions = []
|
||||||
for i in range(x509.get_extension_count()):
|
for i in range(x509.get_extension_count()):
|
||||||
ext = x509.get_extension(i)
|
ext = x509.get_extension(i)
|
||||||
extensions.append({
|
extensions.append(
|
||||||
"name": ext.get_short_name(),
|
{"name": ext.get_short_name(), "value": str(ext)}
|
||||||
"value": str(ext)
|
)
|
||||||
})
|
|
||||||
cert_info["extensions"] = extensions
|
cert_info["extensions"] = extensions
|
||||||
|
|
||||||
return SSLCertificate(cert_info)
|
return SSLCertificate(cert_info)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _decode_cert_data(data: Any) -> Any:
|
def _decode_cert_data(data: Any) -> Any:
|
||||||
"""Helper method to decode bytes in certificate data."""
|
"""Helper method to decode bytes in certificate data."""
|
||||||
if isinstance(data, bytes):
|
if isinstance(data, bytes):
|
||||||
return data.decode('utf-8')
|
return data.decode("utf-8")
|
||||||
elif isinstance(data, dict):
|
elif isinstance(data, dict):
|
||||||
return {
|
return {
|
||||||
(k.decode('utf-8') if isinstance(k, bytes) else k): SSLCertificate._decode_cert_data(v)
|
(
|
||||||
|
k.decode("utf-8") if isinstance(k, bytes) else k
|
||||||
|
): SSLCertificate._decode_cert_data(v)
|
||||||
for k, v in data.items()
|
for k, v in data.items()
|
||||||
}
|
}
|
||||||
elif isinstance(data, list):
|
elif isinstance(data, list):
|
||||||
@@ -105,7 +109,7 @@ class SSLCertificate:
|
|||||||
"""
|
"""
|
||||||
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
|
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
|
||||||
if filepath:
|
if filepath:
|
||||||
Path(filepath).write_text(json_str, encoding='utf-8')
|
Path(filepath).write_text(json_str, encoding="utf-8")
|
||||||
return None
|
return None
|
||||||
return json_str
|
return json_str
|
||||||
|
|
||||||
@@ -122,18 +126,17 @@ class SSLCertificate:
|
|||||||
try:
|
try:
|
||||||
x509 = OpenSSL.crypto.load_certificate(
|
x509 = OpenSSL.crypto.load_certificate(
|
||||||
OpenSSL.crypto.FILETYPE_ASN1,
|
OpenSSL.crypto.FILETYPE_ASN1,
|
||||||
base64.b64decode(self._cert_info['raw_cert'])
|
base64.b64decode(self._cert_info["raw_cert"]),
|
||||||
)
|
)
|
||||||
pem_data = OpenSSL.crypto.dump_certificate(
|
pem_data = OpenSSL.crypto.dump_certificate(
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
OpenSSL.crypto.FILETYPE_PEM, x509
|
||||||
x509
|
).decode("utf-8")
|
||||||
).decode('utf-8')
|
|
||||||
|
|
||||||
if filepath:
|
if filepath:
|
||||||
Path(filepath).write_text(pem_data, encoding='utf-8')
|
Path(filepath).write_text(pem_data, encoding="utf-8")
|
||||||
return None
|
return None
|
||||||
return pem_data
|
return pem_data
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
||||||
@@ -147,7 +150,7 @@ class SSLCertificate:
|
|||||||
Optional[bytes]: DER bytes if successful, None otherwise.
|
Optional[bytes]: DER bytes if successful, None otherwise.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
der_data = base64.b64decode(self._cert_info['raw_cert'])
|
der_data = base64.b64decode(self._cert_info["raw_cert"])
|
||||||
if filepath:
|
if filepath:
|
||||||
Path(filepath).write_bytes(der_data)
|
Path(filepath).write_bytes(der_data)
|
||||||
return None
|
return None
|
||||||
@@ -158,24 +161,24 @@ class SSLCertificate:
|
|||||||
@property
|
@property
|
||||||
def issuer(self) -> Dict[str, str]:
|
def issuer(self) -> Dict[str, str]:
|
||||||
"""Get certificate issuer information."""
|
"""Get certificate issuer information."""
|
||||||
return self._cert_info.get('issuer', {})
|
return self._cert_info.get("issuer", {})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def subject(self) -> Dict[str, str]:
|
def subject(self) -> Dict[str, str]:
|
||||||
"""Get certificate subject information."""
|
"""Get certificate subject information."""
|
||||||
return self._cert_info.get('subject', {})
|
return self._cert_info.get("subject", {})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def valid_from(self) -> str:
|
def valid_from(self) -> str:
|
||||||
"""Get certificate validity start date."""
|
"""Get certificate validity start date."""
|
||||||
return self._cert_info.get('not_before', '')
|
return self._cert_info.get("not_before", "")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def valid_until(self) -> str:
|
def valid_until(self) -> str:
|
||||||
"""Get certificate validity end date."""
|
"""Get certificate validity end date."""
|
||||||
return self._cert_info.get('not_after', '')
|
return self._cert_info.get("not_after", "")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def fingerprint(self) -> str:
|
def fingerprint(self) -> str:
|
||||||
"""Get certificate fingerprint."""
|
"""Get certificate fingerprint."""
|
||||||
return self._cert_info.get('fingerprint', '')
|
return self._cert_info.get("fingerprint", "")
|
||||||
|
|||||||
14
crawl4ai/types.py
Normal file
14
crawl4ai/types.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
from typing import TYPE_CHECKING, Union
|
||||||
|
|
||||||
|
AsyncWebCrawler = Union['AsyncWebCrawlerType'] # Note the string literal
|
||||||
|
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||||
|
CrawlResult = Union['CrawlResultType']
|
||||||
|
RunManyReturn = Union['RunManyReturnType']
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from . import (
|
||||||
|
AsyncWebCrawler as AsyncWebCrawlerType,
|
||||||
|
CrawlerRunConfig as CrawlerRunConfigType,
|
||||||
|
CrawlResult as CrawlResultType,
|
||||||
|
RunManyReturn as RunManyReturnType,
|
||||||
|
)
|
||||||
@@ -2,8 +2,145 @@ import random
|
|||||||
from typing import Optional, Literal, List, Dict, Tuple
|
from typing import Optional, Literal, List, Dict, Tuple
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from fake_useragent import UserAgent
|
||||||
|
import requests
|
||||||
|
from lxml import html
|
||||||
|
import json
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
class UserAgentGenerator:
|
class UAGen(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def generate(self,
|
||||||
|
browsers: Optional[List[str]] = None,
|
||||||
|
os: Optional[Union[str, List[str]]] = None,
|
||||||
|
min_version: float = 0.0,
|
||||||
|
platforms: Optional[Union[str, List[str]]] = None,
|
||||||
|
pct_threshold: Optional[float] = None,
|
||||||
|
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> Union[str, Dict]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def generate_client_hints( user_agent: str) -> str:
|
||||||
|
"""Generate Sec-CH-UA header value based on user agent string"""
|
||||||
|
def _parse_user_agent(user_agent: str) -> Dict[str, str]:
|
||||||
|
"""Parse a user agent string to extract browser and version information"""
|
||||||
|
browsers = {
|
||||||
|
"chrome": r"Chrome/(\d+)",
|
||||||
|
"edge": r"Edg/(\d+)",
|
||||||
|
"safari": r"Version/(\d+)",
|
||||||
|
"firefox": r"Firefox/(\d+)",
|
||||||
|
}
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
for browser, pattern in browsers.items():
|
||||||
|
match = re.search(pattern, user_agent)
|
||||||
|
if match:
|
||||||
|
result[browser] = match.group(1)
|
||||||
|
|
||||||
|
return result
|
||||||
|
browsers = _parse_user_agent(user_agent)
|
||||||
|
|
||||||
|
# Client hints components
|
||||||
|
hints = []
|
||||||
|
|
||||||
|
# Handle different browser combinations
|
||||||
|
if "chrome" in browsers:
|
||||||
|
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
||||||
|
hints.append('"Not_A Brand";v="8"')
|
||||||
|
|
||||||
|
if "edge" in browsers:
|
||||||
|
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
||||||
|
else:
|
||||||
|
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
||||||
|
|
||||||
|
elif "firefox" in browsers:
|
||||||
|
# Firefox doesn't typically send Sec-CH-UA
|
||||||
|
return '""'
|
||||||
|
|
||||||
|
elif "safari" in browsers:
|
||||||
|
# Safari's format for client hints
|
||||||
|
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
||||||
|
hints.append('"Not_A Brand";v="8"')
|
||||||
|
|
||||||
|
return ", ".join(hints)
|
||||||
|
|
||||||
|
class ValidUAGenerator(UAGen):
|
||||||
|
def __init__(self):
|
||||||
|
self.ua = UserAgent()
|
||||||
|
|
||||||
|
def generate(self,
|
||||||
|
browsers: Optional[List[str]] = None,
|
||||||
|
os: Optional[Union[str, List[str]]] = None,
|
||||||
|
min_version: float = 0.0,
|
||||||
|
platforms: Optional[Union[str, List[str]]] = None,
|
||||||
|
pct_threshold: Optional[float] = None,
|
||||||
|
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> str:
|
||||||
|
|
||||||
|
self.ua = UserAgent(
|
||||||
|
browsers=browsers or ['Chrome', 'Firefox', 'Edge'],
|
||||||
|
os=os or ['Windows', 'Mac OS X'],
|
||||||
|
min_version=min_version,
|
||||||
|
platforms=platforms or ['desktop'],
|
||||||
|
fallback=fallback
|
||||||
|
)
|
||||||
|
return self.ua.random
|
||||||
|
|
||||||
|
class OnlineUAGenerator(UAGen):
|
||||||
|
def __init__(self):
|
||||||
|
self.agents = []
|
||||||
|
self._fetch_agents()
|
||||||
|
|
||||||
|
def _fetch_agents(self):
|
||||||
|
try:
|
||||||
|
response = requests.get(
|
||||||
|
'https://www.useragents.me/',
|
||||||
|
timeout=5,
|
||||||
|
headers={'Accept': 'text/html,application/xhtml+xml'}
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
tree = html.fromstring(response.content)
|
||||||
|
json_text = tree.cssselect('#most-common-desktop-useragents-json-csv > div:nth-child(1) > textarea')[0].text
|
||||||
|
self.agents = json.loads(json_text)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching agents: {e}")
|
||||||
|
|
||||||
|
def generate(self,
|
||||||
|
browsers: Optional[List[str]] = None,
|
||||||
|
os: Optional[Union[str, List[str]]] = None,
|
||||||
|
min_version: float = 0.0,
|
||||||
|
platforms: Optional[Union[str, List[str]]] = None,
|
||||||
|
pct_threshold: Optional[float] = None,
|
||||||
|
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> Dict:
|
||||||
|
|
||||||
|
if not self.agents:
|
||||||
|
self._fetch_agents()
|
||||||
|
|
||||||
|
filtered_agents = self.agents
|
||||||
|
|
||||||
|
if pct_threshold:
|
||||||
|
filtered_agents = [a for a in filtered_agents if a['pct'] >= pct_threshold]
|
||||||
|
|
||||||
|
if browsers:
|
||||||
|
filtered_agents = [a for a in filtered_agents
|
||||||
|
if any(b.lower() in a['ua'].lower() for b in browsers)]
|
||||||
|
|
||||||
|
if os:
|
||||||
|
os_list = [os] if isinstance(os, str) else os
|
||||||
|
filtered_agents = [a for a in filtered_agents
|
||||||
|
if any(o.lower() in a['ua'].lower() for o in os_list)]
|
||||||
|
|
||||||
|
if platforms:
|
||||||
|
platform_list = [platforms] if isinstance(platforms, str) else platforms
|
||||||
|
filtered_agents = [a for a in filtered_agents
|
||||||
|
if any(p.lower() in a['ua'].lower() for p in platform_list)]
|
||||||
|
|
||||||
|
return filtered_agents[0] if filtered_agents else {'ua': fallback, 'pct': 0}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class UserAgentGenerator():
|
||||||
"""
|
"""
|
||||||
Generate random user agents with specified constraints.
|
Generate random user agents with specified constraints.
|
||||||
|
|
||||||
@@ -32,6 +169,7 @@ class UserAgentGenerator:
|
|||||||
android_version: Optional[str] = None
|
android_version: Optional[str] = None
|
||||||
): Generates a random user agent string based on the specified parameters.
|
): Generates a random user agent string based on the specified parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Previous platform definitions remain the same...
|
# Previous platform definitions remain the same...
|
||||||
self.desktop_platforms = {
|
self.desktop_platforms = {
|
||||||
@@ -47,7 +185,7 @@ class UserAgentGenerator:
|
|||||||
"generic": "(X11; Linux x86_64)",
|
"generic": "(X11; Linux x86_64)",
|
||||||
"ubuntu": "(X11; Ubuntu; Linux x86_64)",
|
"ubuntu": "(X11; Ubuntu; Linux x86_64)",
|
||||||
"chrome_os": "(X11; CrOS x86_64 14541.0.0)",
|
"chrome_os": "(X11; CrOS x86_64 14541.0.0)",
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
self.mobile_platforms = {
|
self.mobile_platforms = {
|
||||||
@@ -60,26 +198,14 @@ class UserAgentGenerator:
|
|||||||
"ios": {
|
"ios": {
|
||||||
"iphone": "(iPhone; CPU iPhone OS 16_5 like Mac OS X)",
|
"iphone": "(iPhone; CPU iPhone OS 16_5 like Mac OS X)",
|
||||||
"ipad": "(iPad; CPU OS 16_5 like Mac OS X)",
|
"ipad": "(iPad; CPU OS 16_5 like Mac OS X)",
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Browser Combinations
|
# Browser Combinations
|
||||||
self.browser_combinations = {
|
self.browser_combinations = {
|
||||||
1: [
|
1: [["chrome"], ["firefox"], ["safari"], ["edge"]],
|
||||||
["chrome"],
|
2: [["gecko", "firefox"], ["chrome", "safari"], ["webkit", "safari"]],
|
||||||
["firefox"],
|
3: [["chrome", "safari", "edge"], ["webkit", "chrome", "safari"]],
|
||||||
["safari"],
|
|
||||||
["edge"]
|
|
||||||
],
|
|
||||||
2: [
|
|
||||||
["gecko", "firefox"],
|
|
||||||
["chrome", "safari"],
|
|
||||||
["webkit", "safari"]
|
|
||||||
],
|
|
||||||
3: [
|
|
||||||
["chrome", "safari", "edge"],
|
|
||||||
["webkit", "chrome", "safari"]
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Rendering Engines with versions
|
# Rendering Engines with versions
|
||||||
@@ -90,7 +216,7 @@ class UserAgentGenerator:
|
|||||||
"Gecko/20100101",
|
"Gecko/20100101",
|
||||||
"Gecko/20100101", # Firefox usually uses this constant version
|
"Gecko/20100101", # Firefox usually uses this constant version
|
||||||
"Gecko/2010010",
|
"Gecko/2010010",
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
# Browser Versions
|
# Browser Versions
|
||||||
@@ -170,12 +296,14 @@ class UserAgentGenerator:
|
|||||||
|
|
||||||
return browser_stack
|
return browser_stack
|
||||||
|
|
||||||
def generate(self,
|
def generate(
|
||||||
device_type: Optional[Literal['desktop', 'mobile']] = None,
|
self,
|
||||||
os_type: Optional[str] = None,
|
device_type: Optional[Literal["desktop", "mobile"]] = None,
|
||||||
device_brand: Optional[str] = None,
|
os_type: Optional[str] = None,
|
||||||
browser_type: Optional[Literal['chrome', 'edge', 'safari', 'firefox']] = None,
|
device_brand: Optional[str] = None,
|
||||||
num_browsers: int = 3) -> str:
|
browser_type: Optional[Literal["chrome", "edge", "safari", "firefox"]] = None,
|
||||||
|
num_browsers: int = 3,
|
||||||
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Generate a random user agent with specified constraints.
|
Generate a random user agent with specified constraints.
|
||||||
|
|
||||||
@@ -196,9 +324,15 @@ class UserAgentGenerator:
|
|||||||
browser_stack = self.get_browser_stack(num_browsers)
|
browser_stack = self.get_browser_stack(num_browsers)
|
||||||
|
|
||||||
# Add appropriate legacy token based on browser stack
|
# Add appropriate legacy token based on browser stack
|
||||||
if "Firefox" in str(browser_stack):
|
if "Firefox" in str(browser_stack) or browser_type == "firefox":
|
||||||
components.append(random.choice(self.rendering_engines["gecko"]))
|
components.append(random.choice(self.rendering_engines["gecko"]))
|
||||||
elif "Chrome" in str(browser_stack) or "Safari" in str(browser_stack):
|
elif "Chrome" in str(browser_stack) or "Safari" in str(browser_stack) or browser_type == "chrome":
|
||||||
|
components.append(self.rendering_engines["chrome_webkit"])
|
||||||
|
components.append("(KHTML, like Gecko)")
|
||||||
|
elif "Edge" in str(browser_stack) or browser_type == "edge":
|
||||||
|
components.append(self.rendering_engines["safari_webkit"])
|
||||||
|
components.append("(KHTML, like Gecko)")
|
||||||
|
elif "Safari" in str(browser_stack) or browser_type == "safari":
|
||||||
components.append(self.rendering_engines["chrome_webkit"])
|
components.append(self.rendering_engines["chrome_webkit"])
|
||||||
components.append("(KHTML, like Gecko)")
|
components.append("(KHTML, like Gecko)")
|
||||||
|
|
||||||
@@ -215,9 +349,13 @@ class UserAgentGenerator:
|
|||||||
|
|
||||||
def get_random_platform(self, device_type, os_type, device_brand):
|
def get_random_platform(self, device_type, os_type, device_brand):
|
||||||
"""Helper method to get random platform based on constraints"""
|
"""Helper method to get random platform based on constraints"""
|
||||||
platforms = self.desktop_platforms if device_type == 'desktop' else \
|
platforms = (
|
||||||
self.mobile_platforms if device_type == 'mobile' else \
|
self.desktop_platforms
|
||||||
{**self.desktop_platforms, **self.mobile_platforms}
|
if device_type == "desktop"
|
||||||
|
else self.mobile_platforms
|
||||||
|
if device_type == "mobile"
|
||||||
|
else {**self.desktop_platforms, **self.mobile_platforms}
|
||||||
|
)
|
||||||
|
|
||||||
if os_type:
|
if os_type:
|
||||||
for platform_group in [self.desktop_platforms, self.mobile_platforms]:
|
for platform_group in [self.desktop_platforms, self.mobile_platforms]:
|
||||||
@@ -233,10 +371,10 @@ class UserAgentGenerator:
|
|||||||
def parse_user_agent(self, user_agent: str) -> Dict[str, str]:
|
def parse_user_agent(self, user_agent: str) -> Dict[str, str]:
|
||||||
"""Parse a user agent string to extract browser and version information"""
|
"""Parse a user agent string to extract browser and version information"""
|
||||||
browsers = {
|
browsers = {
|
||||||
'chrome': r'Chrome/(\d+)',
|
"chrome": r"Chrome/(\d+)",
|
||||||
'edge': r'Edg/(\d+)',
|
"edge": r"Edg/(\d+)",
|
||||||
'safari': r'Version/(\d+)',
|
"safari": r"Version/(\d+)",
|
||||||
'firefox': r'Firefox/(\d+)'
|
"firefox": r"Firefox/(\d+)",
|
||||||
}
|
}
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
@@ -255,51 +393,36 @@ class UserAgentGenerator:
|
|||||||
hints = []
|
hints = []
|
||||||
|
|
||||||
# Handle different browser combinations
|
# Handle different browser combinations
|
||||||
if 'chrome' in browsers:
|
if "chrome" in browsers:
|
||||||
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
||||||
hints.append('"Not_A Brand";v="8"')
|
hints.append('"Not_A Brand";v="8"')
|
||||||
|
|
||||||
if 'edge' in browsers:
|
if "edge" in browsers:
|
||||||
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
||||||
else:
|
else:
|
||||||
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
||||||
|
|
||||||
elif 'firefox' in browsers:
|
elif "firefox" in browsers:
|
||||||
# Firefox doesn't typically send Sec-CH-UA
|
# Firefox doesn't typically send Sec-CH-UA
|
||||||
return '""'
|
return '""'
|
||||||
|
|
||||||
elif 'safari' in browsers:
|
elif "safari" in browsers:
|
||||||
# Safari's format for client hints
|
# Safari's format for client hints
|
||||||
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
||||||
hints.append('"Not_A Brand";v="8"')
|
hints.append('"Not_A Brand";v="8"')
|
||||||
|
|
||||||
return ', '.join(hints)
|
return ", ".join(hints)
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
# Example usage:
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
generator = UserAgentGenerator()
|
|
||||||
print(generator.generate())
|
|
||||||
|
|
||||||
print("\nSingle browser (Chrome):")
|
# Usage example:
|
||||||
print(generator.generate(num_browsers=1, browser_type='chrome'))
|
generator = ValidUAGenerator()
|
||||||
|
ua = generator.generate()
|
||||||
|
print(ua)
|
||||||
|
|
||||||
print("\nTwo browsers (Gecko/Firefox):")
|
generator = OnlineUAGenerator()
|
||||||
print(generator.generate(num_browsers=2))
|
ua = generator.generate()
|
||||||
|
print(ua)
|
||||||
|
|
||||||
print("\nThree browsers (Chrome/Safari/Edge):")
|
|
||||||
print(generator.generate(num_browsers=3))
|
|
||||||
|
|
||||||
print("\nFirefox on Linux:")
|
|
||||||
print(generator.generate(
|
|
||||||
device_type='desktop',
|
|
||||||
os_type='linux',
|
|
||||||
browser_type='firefox',
|
|
||||||
num_browsers=2
|
|
||||||
))
|
|
||||||
|
|
||||||
print("\nChrome/Safari/Edge on Windows:")
|
|
||||||
print(generator.generate(
|
|
||||||
device_type='desktop',
|
|
||||||
os_type='windows',
|
|
||||||
num_browsers=3
|
|
||||||
))
|
|
||||||
1691
crawl4ai/utils.py
1691
crawl4ai/utils.py
File diff suppressed because it is too large
Load Diff
@@ -1,253 +0,0 @@
|
|||||||
import os, time
|
|
||||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from .models import UrlModel, CrawlResult
|
|
||||||
from .database import init_db, get_cached_url, cache_url, DB_PATH, flush_db
|
|
||||||
from .utils import *
|
|
||||||
from .chunking_strategy import *
|
|
||||||
from .extraction_strategy import *
|
|
||||||
from .crawler_strategy import *
|
|
||||||
from typing import List
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
from .content_scraping_strategy import WebScrapingStrategy
|
|
||||||
from .config import *
|
|
||||||
import warnings
|
|
||||||
import json
|
|
||||||
warnings.filterwarnings("ignore", message='Field "model_name" has conflict with protected namespace "model_".')
|
|
||||||
|
|
||||||
|
|
||||||
class WebCrawler:
|
|
||||||
def __init__(self, crawler_strategy: CrawlerStrategy = None, always_by_pass_cache: bool = False, verbose: bool = False):
|
|
||||||
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(verbose=verbose)
|
|
||||||
self.always_by_pass_cache = always_by_pass_cache
|
|
||||||
self.crawl4ai_folder = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
|
||||||
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
|
||||||
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
|
||||||
init_db()
|
|
||||||
self.ready = False
|
|
||||||
|
|
||||||
def warmup(self):
|
|
||||||
print("[LOG] 🌤️ Warming up the WebCrawler")
|
|
||||||
self.run(
|
|
||||||
url='https://google.com/',
|
|
||||||
word_count_threshold=5,
|
|
||||||
extraction_strategy=NoExtractionStrategy(),
|
|
||||||
bypass_cache=False,
|
|
||||||
verbose=False
|
|
||||||
)
|
|
||||||
self.ready = True
|
|
||||||
print("[LOG] 🌞 WebCrawler is ready to crawl")
|
|
||||||
|
|
||||||
def fetch_page(
|
|
||||||
self,
|
|
||||||
url_model: UrlModel,
|
|
||||||
provider: str = DEFAULT_PROVIDER,
|
|
||||||
api_token: str = None,
|
|
||||||
extract_blocks_flag: bool = True,
|
|
||||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
|
||||||
css_selector: str = None,
|
|
||||||
screenshot: bool = False,
|
|
||||||
use_cached_html: bool = False,
|
|
||||||
extraction_strategy: ExtractionStrategy = None,
|
|
||||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
|
||||||
**kwargs,
|
|
||||||
) -> CrawlResult:
|
|
||||||
return self.run(
|
|
||||||
url_model.url,
|
|
||||||
word_count_threshold,
|
|
||||||
extraction_strategy or NoExtractionStrategy(),
|
|
||||||
chunking_strategy,
|
|
||||||
bypass_cache=url_model.forced,
|
|
||||||
css_selector=css_selector,
|
|
||||||
screenshot=screenshot,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
pass
|
|
||||||
|
|
||||||
def fetch_pages(
|
|
||||||
self,
|
|
||||||
url_models: List[UrlModel],
|
|
||||||
provider: str = DEFAULT_PROVIDER,
|
|
||||||
api_token: str = None,
|
|
||||||
extract_blocks_flag: bool = True,
|
|
||||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
|
||||||
use_cached_html: bool = False,
|
|
||||||
css_selector: str = None,
|
|
||||||
screenshot: bool = False,
|
|
||||||
extraction_strategy: ExtractionStrategy = None,
|
|
||||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
|
||||||
**kwargs,
|
|
||||||
) -> List[CrawlResult]:
|
|
||||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
|
||||||
def fetch_page_wrapper(url_model, *args, **kwargs):
|
|
||||||
return self.fetch_page(url_model, *args, **kwargs)
|
|
||||||
|
|
||||||
with ThreadPoolExecutor() as executor:
|
|
||||||
results = list(
|
|
||||||
executor.map(
|
|
||||||
fetch_page_wrapper,
|
|
||||||
url_models,
|
|
||||||
[provider] * len(url_models),
|
|
||||||
[api_token] * len(url_models),
|
|
||||||
[extract_blocks_flag] * len(url_models),
|
|
||||||
[word_count_threshold] * len(url_models),
|
|
||||||
[css_selector] * len(url_models),
|
|
||||||
[screenshot] * len(url_models),
|
|
||||||
[use_cached_html] * len(url_models),
|
|
||||||
[extraction_strategy] * len(url_models),
|
|
||||||
[chunking_strategy] * len(url_models),
|
|
||||||
*[kwargs] * len(url_models),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
def run(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
|
||||||
extraction_strategy: ExtractionStrategy = None,
|
|
||||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
|
||||||
bypass_cache: bool = False,
|
|
||||||
css_selector: str = None,
|
|
||||||
screenshot: bool = False,
|
|
||||||
user_agent: str = None,
|
|
||||||
verbose=True,
|
|
||||||
**kwargs,
|
|
||||||
) -> CrawlResult:
|
|
||||||
try:
|
|
||||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
|
||||||
extraction_strategy.verbose = verbose
|
|
||||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
|
||||||
raise ValueError("Unsupported extraction strategy")
|
|
||||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
|
||||||
raise ValueError("Unsupported chunking strategy")
|
|
||||||
|
|
||||||
word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD)
|
|
||||||
|
|
||||||
cached = None
|
|
||||||
screenshot_data = None
|
|
||||||
extracted_content = None
|
|
||||||
if not bypass_cache and not self.always_by_pass_cache:
|
|
||||||
cached = get_cached_url(url)
|
|
||||||
|
|
||||||
if kwargs.get("warmup", True) and not self.ready:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if cached:
|
|
||||||
html = sanitize_input_encode(cached[1])
|
|
||||||
extracted_content = sanitize_input_encode(cached[4])
|
|
||||||
if screenshot:
|
|
||||||
screenshot_data = cached[9]
|
|
||||||
if not screenshot_data:
|
|
||||||
cached = None
|
|
||||||
|
|
||||||
if not cached or not html:
|
|
||||||
if user_agent:
|
|
||||||
self.crawler_strategy.update_user_agent(user_agent)
|
|
||||||
t1 = time.time()
|
|
||||||
html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs))
|
|
||||||
t2 = time.time()
|
|
||||||
if verbose:
|
|
||||||
print(f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds")
|
|
||||||
if screenshot:
|
|
||||||
screenshot_data = self.crawler_strategy.take_screenshot()
|
|
||||||
|
|
||||||
|
|
||||||
crawl_result = self.process_html(url, html, extracted_content, word_count_threshold, extraction_strategy, chunking_strategy, css_selector, screenshot_data, verbose, bool(cached), **kwargs)
|
|
||||||
crawl_result.success = bool(html)
|
|
||||||
return crawl_result
|
|
||||||
except Exception as e:
|
|
||||||
if not hasattr(e, "msg"):
|
|
||||||
e.msg = str(e)
|
|
||||||
print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}")
|
|
||||||
return CrawlResult(url=url, html="", success=False, error_message=e.msg)
|
|
||||||
|
|
||||||
def process_html(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
html: str,
|
|
||||||
extracted_content: str,
|
|
||||||
word_count_threshold: int,
|
|
||||||
extraction_strategy: ExtractionStrategy,
|
|
||||||
chunking_strategy: ChunkingStrategy,
|
|
||||||
css_selector: str,
|
|
||||||
screenshot: bool,
|
|
||||||
verbose: bool,
|
|
||||||
is_cached: bool,
|
|
||||||
**kwargs,
|
|
||||||
) -> CrawlResult:
|
|
||||||
t = time.time()
|
|
||||||
# Extract content from HTML
|
|
||||||
try:
|
|
||||||
t1 = time.time()
|
|
||||||
scrapping_strategy = WebScrapingStrategy()
|
|
||||||
extra_params = {k: v for k, v in kwargs.items() if k not in ["only_text", "image_description_min_word_threshold"]}
|
|
||||||
result = scrapping_strategy.scrap(
|
|
||||||
url,
|
|
||||||
html,
|
|
||||||
word_count_threshold=word_count_threshold,
|
|
||||||
css_selector=css_selector,
|
|
||||||
only_text=kwargs.get("only_text", False),
|
|
||||||
image_description_min_word_threshold=kwargs.get(
|
|
||||||
"image_description_min_word_threshold", IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD
|
|
||||||
),
|
|
||||||
**extra_params,
|
|
||||||
)
|
|
||||||
|
|
||||||
# result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False))
|
|
||||||
if verbose:
|
|
||||||
print(f"[LOG] 🚀 Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds")
|
|
||||||
|
|
||||||
if result is None:
|
|
||||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
|
||||||
except InvalidCSSSelectorError as e:
|
|
||||||
raise ValueError(str(e))
|
|
||||||
|
|
||||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
|
||||||
markdown = sanitize_input_encode(result.get("markdown", ""))
|
|
||||||
media = result.get("media", [])
|
|
||||||
links = result.get("links", [])
|
|
||||||
metadata = result.get("metadata", {})
|
|
||||||
|
|
||||||
if extracted_content is None:
|
|
||||||
if verbose:
|
|
||||||
print(f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}")
|
|
||||||
|
|
||||||
sections = chunking_strategy.chunk(markdown)
|
|
||||||
extracted_content = extraction_strategy.run(url, sections)
|
|
||||||
extracted_content = json.dumps(extracted_content, indent=4, default=str, ensure_ascii=False)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print(f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t:.2f} seconds.")
|
|
||||||
|
|
||||||
screenshot = None if not screenshot else screenshot
|
|
||||||
|
|
||||||
if not is_cached:
|
|
||||||
cache_url(
|
|
||||||
url,
|
|
||||||
html,
|
|
||||||
cleaned_html,
|
|
||||||
markdown,
|
|
||||||
extracted_content,
|
|
||||||
True,
|
|
||||||
json.dumps(media),
|
|
||||||
json.dumps(links),
|
|
||||||
json.dumps(metadata),
|
|
||||||
screenshot=screenshot,
|
|
||||||
)
|
|
||||||
|
|
||||||
return CrawlResult(
|
|
||||||
url=url,
|
|
||||||
html=html,
|
|
||||||
cleaned_html=format_html(cleaned_html),
|
|
||||||
markdown=markdown,
|
|
||||||
media=media,
|
|
||||||
links=links,
|
|
||||||
metadata=metadata,
|
|
||||||
screenshot=screenshot,
|
|
||||||
extracted_content=extracted_content,
|
|
||||||
success=True,
|
|
||||||
error_message="",
|
|
||||||
)
|
|
||||||
31
deploy/docker/.dockerignore
Normal file
31
deploy/docker/.dockerignore
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# .dockerignore
|
||||||
|
*
|
||||||
|
|
||||||
|
# Allow specific files and directories when using local installation
|
||||||
|
!crawl4ai/
|
||||||
|
!docs/
|
||||||
|
!deploy/docker/
|
||||||
|
!setup.py
|
||||||
|
!pyproject.toml
|
||||||
|
!README.md
|
||||||
|
!LICENSE
|
||||||
|
!MANIFEST.in
|
||||||
|
!setup.cfg
|
||||||
|
!mkdocs.yml
|
||||||
|
|
||||||
|
.git/
|
||||||
|
__pycache__/
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.pyd
|
||||||
|
.DS_Store
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
venv/
|
||||||
|
tests/
|
||||||
|
coverage.xml
|
||||||
|
*.log
|
||||||
|
*.swp
|
||||||
|
*.egg-info/
|
||||||
|
dist/
|
||||||
|
build/
|
||||||
8
deploy/docker/.llm.env.example
Normal file
8
deploy/docker/.llm.env.example
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# LLM Provider Keys
|
||||||
|
OPENAI_API_KEY=your_openai_key_here
|
||||||
|
DEEPSEEK_API_KEY=your_deepseek_key_here
|
||||||
|
ANTHROPIC_API_KEY=your_anthropic_key_here
|
||||||
|
GROQ_API_KEY=your_groq_key_here
|
||||||
|
TOGETHER_API_KEY=your_together_key_here
|
||||||
|
MISTRAL_API_KEY=your_mistral_key_here
|
||||||
|
GEMINI_API_TOKEN=your_gemini_key_here
|
||||||
830
deploy/docker/README.md
Normal file
830
deploy/docker/README.md
Normal file
@@ -0,0 +1,830 @@
|
|||||||
|
# Crawl4AI Docker Guide 🐳
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
- [Prerequisites](#prerequisites)
|
||||||
|
- [Installation](#installation)
|
||||||
|
- [Local Build](#local-build)
|
||||||
|
- [Docker Hub](#docker-hub)
|
||||||
|
- [Dockerfile Parameters](#dockerfile-parameters)
|
||||||
|
- [Using the API](#using-the-api)
|
||||||
|
- [Understanding Request Schema](#understanding-request-schema)
|
||||||
|
- [REST API Examples](#rest-api-examples)
|
||||||
|
- [Python SDK](#python-sdk)
|
||||||
|
- [Metrics & Monitoring](#metrics--monitoring)
|
||||||
|
- [Deployment Scenarios](#deployment-scenarios)
|
||||||
|
- [Complete Examples](#complete-examples)
|
||||||
|
- [Getting Help](#getting-help)
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before we dive in, make sure you have:
|
||||||
|
- Docker installed and running (version 20.10.0 or higher)
|
||||||
|
- At least 4GB of RAM available for the container
|
||||||
|
- Python 3.10+ (if using the Python SDK)
|
||||||
|
- Node.js 16+ (if using the Node.js examples)
|
||||||
|
|
||||||
|
> 💡 **Pro tip**: Run `docker info` to check your Docker installation and available resources.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Local Build
|
||||||
|
|
||||||
|
Let's get your local environment set up step by step!
|
||||||
|
|
||||||
|
#### 1. Building the Image
|
||||||
|
|
||||||
|
First, clone the repository and build the Docker image:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repository
|
||||||
|
git clone https://github.com/unclecode/crawl4ai.git
|
||||||
|
cd crawl4ai/deploy
|
||||||
|
|
||||||
|
# Build the Docker image
|
||||||
|
docker build --platform=linux/amd64 --no-cache -t crawl4ai .
|
||||||
|
|
||||||
|
# Or build for arm64
|
||||||
|
docker build --platform=linux/arm64 --no-cache -t crawl4ai .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 2. Environment Setup
|
||||||
|
|
||||||
|
If you plan to use LLMs (Language Models), you'll need to set up your API keys. Create a `.llm.env` file:
|
||||||
|
|
||||||
|
```env
|
||||||
|
# OpenAI
|
||||||
|
OPENAI_API_KEY=sk-your-key
|
||||||
|
|
||||||
|
# Anthropic
|
||||||
|
ANTHROPIC_API_KEY=your-anthropic-key
|
||||||
|
|
||||||
|
# DeepSeek
|
||||||
|
DEEPSEEK_API_KEY=your-deepseek-key
|
||||||
|
|
||||||
|
# Check out https://docs.litellm.ai/docs/providers for more providers!
|
||||||
|
```
|
||||||
|
|
||||||
|
> 🔑 **Note**: Keep your API keys secure! Never commit them to version control.
|
||||||
|
|
||||||
|
#### 3. Running the Container
|
||||||
|
|
||||||
|
You have several options for running the container:
|
||||||
|
|
||||||
|
Basic run (no LLM support):
|
||||||
|
```bash
|
||||||
|
docker run -d -p 8000:8000 --name crawl4ai crawl4ai
|
||||||
|
```
|
||||||
|
|
||||||
|
With LLM support:
|
||||||
|
```bash
|
||||||
|
docker run -d -p 8000:8000 \
|
||||||
|
--env-file .llm.env \
|
||||||
|
--name crawl4ai \
|
||||||
|
crawl4ai
|
||||||
|
```
|
||||||
|
|
||||||
|
Using host environment variables (Not a good practice, but works for local testing):
|
||||||
|
```bash
|
||||||
|
docker run -d -p 8000:8000 \
|
||||||
|
--env-file .llm.env \
|
||||||
|
--env "$(env)" \
|
||||||
|
--name crawl4ai \
|
||||||
|
crawl4ai
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multi-Platform Build
|
||||||
|
For distributing your image across different architectures, use `buildx`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set up buildx builder
|
||||||
|
docker buildx create --use
|
||||||
|
|
||||||
|
# Build for multiple platforms
|
||||||
|
docker buildx build \
|
||||||
|
--platform linux/amd64,linux/arm64 \
|
||||||
|
-t crawl4ai \
|
||||||
|
--push \
|
||||||
|
.
|
||||||
|
```
|
||||||
|
|
||||||
|
> 💡 **Note**: Multi-platform builds require Docker Buildx and need to be pushed to a registry.
|
||||||
|
|
||||||
|
#### Development Build
|
||||||
|
For development, you might want to enable all features:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -t crawl4ai
|
||||||
|
--build-arg INSTALL_TYPE=all \
|
||||||
|
--build-arg PYTHON_VERSION=3.10 \
|
||||||
|
--build-arg ENABLE_GPU=true \
|
||||||
|
.
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GPU-Enabled Build
|
||||||
|
If you plan to use GPU acceleration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -t crawl4ai
|
||||||
|
--build-arg ENABLE_GPU=true \
|
||||||
|
deploy/docker/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build Arguments Explained
|
||||||
|
|
||||||
|
| Argument | Description | Default | Options |
|
||||||
|
|----------|-------------|---------|----------|
|
||||||
|
| PYTHON_VERSION | Python version | 3.10 | 3.8, 3.9, 3.10 |
|
||||||
|
| INSTALL_TYPE | Feature set | default | default, all, torch, transformer |
|
||||||
|
| ENABLE_GPU | GPU support | false | true, false |
|
||||||
|
| APP_HOME | Install path | /app | any valid path |
|
||||||
|
|
||||||
|
### Build Best Practices
|
||||||
|
|
||||||
|
1. **Choose the Right Install Type**
|
||||||
|
- `default`: Basic installation, smallest image, to be honest, I use this most of the time.
|
||||||
|
- `all`: Full features, larger image (include transformer, and nltk, make sure you really need them)
|
||||||
|
|
||||||
|
2. **Platform Considerations**
|
||||||
|
- Let Docker auto-detect platform unless you need cross-compilation
|
||||||
|
- Use --platform for specific architecture requirements
|
||||||
|
- Consider buildx for multi-architecture distribution
|
||||||
|
|
||||||
|
3. **Performance Optimization**
|
||||||
|
- The image automatically includes platform-specific optimizations
|
||||||
|
- AMD64 gets OpenMP optimizations
|
||||||
|
- ARM64 gets OpenBLAS optimizations
|
||||||
|
|
||||||
|
### Docker Hub
|
||||||
|
|
||||||
|
> 🚧 Coming soon! The image will be available at `crawl4ai`. Stay tuned!
|
||||||
|
|
||||||
|
## Using the API
|
||||||
|
|
||||||
|
In the following sections, we discuss two ways to communicate with the Docker server. One option is to use the client SDK that I developed for Python, and I will soon develop one for Node.js. I highly recommend this approach to avoid mistakes. Alternatively, you can take a more technical route by using the JSON structure and passing it to all the URLs, which I will explain in detail.
|
||||||
|
|
||||||
|
### Python SDK
|
||||||
|
|
||||||
|
The SDK makes things easier! Here's how to use it:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||||
|
from crawl4ai import BrowserConfig, CrawlerRunConfig
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
async with Crawl4aiDockerClient(base_url="http://localhost:8000", verbose=True) as client:
|
||||||
|
# If JWT is enabled, you can authenticate like this: (more on this later)
|
||||||
|
# await client.authenticate("test@example.com")
|
||||||
|
|
||||||
|
# Non-streaming crawl
|
||||||
|
results = await client.crawl(
|
||||||
|
["https://example.com", "https://python.org"],
|
||||||
|
browser_config=BrowserConfig(headless=True),
|
||||||
|
crawler_config=CrawlerRunConfig()
|
||||||
|
)
|
||||||
|
print(f"Non-streaming results: {results}")
|
||||||
|
|
||||||
|
# Streaming crawl
|
||||||
|
crawler_config = CrawlerRunConfig(stream=True)
|
||||||
|
async for result in await client.crawl(
|
||||||
|
["https://example.com", "https://python.org"],
|
||||||
|
browser_config=BrowserConfig(headless=True),
|
||||||
|
crawler_config=crawler_config
|
||||||
|
):
|
||||||
|
print(f"Streamed result: {result}")
|
||||||
|
|
||||||
|
# Get schema
|
||||||
|
schema = await client.get_schema()
|
||||||
|
print(f"Schema: {schema}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
`Crawl4aiDockerClient` is an async context manager that handles the connection for you. You can pass in optional parameters for more control:
|
||||||
|
|
||||||
|
- `base_url` (str): Base URL of the Crawl4AI Docker server
|
||||||
|
- `timeout` (float): Default timeout for requests in seconds
|
||||||
|
- `verify_ssl` (bool): Whether to verify SSL certificates
|
||||||
|
- `verbose` (bool): Whether to show logging output
|
||||||
|
- `log_file` (str, optional): Path to log file if file logging is desired
|
||||||
|
|
||||||
|
This client SDK generates a properly structured JSON request for the server's HTTP API.
|
||||||
|
|
||||||
|
## Second Approach: Direct API Calls
|
||||||
|
|
||||||
|
This is super important! The API expects a specific structure that matches our Python classes. Let me show you how it works.
|
||||||
|
|
||||||
|
### Understanding Configuration Structure
|
||||||
|
|
||||||
|
Let's dive deep into how configurations work in Crawl4AI. Every configuration object follows a consistent pattern of `type` and `params`. This structure enables complex, nested configurations while maintaining clarity.
|
||||||
|
|
||||||
|
#### The Basic Pattern
|
||||||
|
|
||||||
|
Try this in Python to understand the structure:
|
||||||
|
```python
|
||||||
|
from crawl4ai import BrowserConfig
|
||||||
|
|
||||||
|
# Create a config and see its structure
|
||||||
|
config = BrowserConfig(headless=True)
|
||||||
|
print(config.dump())
|
||||||
|
```
|
||||||
|
|
||||||
|
This outputs:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "BrowserConfig",
|
||||||
|
"params": {
|
||||||
|
"headless": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Simple vs Complex Values
|
||||||
|
|
||||||
|
The structure follows these rules:
|
||||||
|
- Simple values (strings, numbers, booleans, lists) are passed directly
|
||||||
|
- Complex values (classes, dictionaries) use the type-params pattern
|
||||||
|
|
||||||
|
For example, with dictionaries:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"browser_config": {
|
||||||
|
"type": "BrowserConfig",
|
||||||
|
"params": {
|
||||||
|
"headless": true, // Simple boolean - direct value
|
||||||
|
"viewport": { // Complex dictionary - needs type-params
|
||||||
|
"type": "dict",
|
||||||
|
"value": {
|
||||||
|
"width": 1200,
|
||||||
|
"height": 800
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Strategy Pattern and Nesting
|
||||||
|
|
||||||
|
Strategies (like chunking or content filtering) demonstrate why we need this structure. Consider this chunking configuration:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"crawler_config": {
|
||||||
|
"type": "CrawlerRunConfig",
|
||||||
|
"params": {
|
||||||
|
"chunking_strategy": {
|
||||||
|
"type": "RegexChunking", // Strategy implementation
|
||||||
|
"params": {
|
||||||
|
"patterns": ["\n\n", "\\.\\s+"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Here, `chunking_strategy` accepts any chunking implementation. The `type` field tells the system which strategy to use, and `params` configures that specific strategy.
|
||||||
|
|
||||||
|
#### Complex Nested Example
|
||||||
|
|
||||||
|
Let's look at a more complex example with content filtering:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"crawler_config": {
|
||||||
|
"type": "CrawlerRunConfig",
|
||||||
|
"params": {
|
||||||
|
"markdown_generator": {
|
||||||
|
"type": "DefaultMarkdownGenerator",
|
||||||
|
"params": {
|
||||||
|
"content_filter": {
|
||||||
|
"type": "PruningContentFilter",
|
||||||
|
"params": {
|
||||||
|
"threshold": 0.48,
|
||||||
|
"threshold_type": "fixed"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This shows how deeply configurations can nest while maintaining a consistent structure.
|
||||||
|
|
||||||
|
#### Quick Grammar Overview
|
||||||
|
```
|
||||||
|
config := {
|
||||||
|
"type": string,
|
||||||
|
"params": {
|
||||||
|
key: simple_value | complex_value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
simple_value := string | number | boolean | [simple_value]
|
||||||
|
complex_value := config | dict_value
|
||||||
|
|
||||||
|
dict_value := {
|
||||||
|
"type": "dict",
|
||||||
|
"value": object
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Important Rules 🚨
|
||||||
|
|
||||||
|
- Always use the type-params pattern for class instances
|
||||||
|
- Use direct values for primitives (numbers, strings, booleans)
|
||||||
|
- Wrap dictionaries with {"type": "dict", "value": {...}}
|
||||||
|
- Arrays/lists are passed directly without type-params
|
||||||
|
- All parameters are optional unless specifically required
|
||||||
|
|
||||||
|
#### Pro Tip 💡
|
||||||
|
|
||||||
|
The easiest way to get the correct structure is to:
|
||||||
|
1. Create configuration objects in Python
|
||||||
|
2. Use the `dump()` method to see their JSON representation
|
||||||
|
3. Use that JSON in your API calls
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python
|
||||||
|
from crawl4ai import CrawlerRunConfig, PruningContentFilter
|
||||||
|
|
||||||
|
config = CrawlerRunConfig(
|
||||||
|
content_filter=PruningContentFilter(threshold=0.48)
|
||||||
|
)
|
||||||
|
print(config.dump()) # Use this JSON in your API calls
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### More Examples
|
||||||
|
|
||||||
|
**Advanced Crawler Configuration**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"urls": ["https://example.com"],
|
||||||
|
"crawler_config": {
|
||||||
|
"type": "CrawlerRunConfig",
|
||||||
|
"params": {
|
||||||
|
"cache_mode": "bypass",
|
||||||
|
"markdown_generator": {
|
||||||
|
"type": "DefaultMarkdownGenerator",
|
||||||
|
"params": {
|
||||||
|
"content_filter": {
|
||||||
|
"type": "PruningContentFilter",
|
||||||
|
"params": {
|
||||||
|
"threshold": 0.48,
|
||||||
|
"threshold_type": "fixed",
|
||||||
|
"min_word_threshold": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Extraction Strategy**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"crawler_config": {
|
||||||
|
"type": "CrawlerRunConfig",
|
||||||
|
"params": {
|
||||||
|
"extraction_strategy": {
|
||||||
|
"type": "JsonCssExtractionStrategy",
|
||||||
|
"params": {
|
||||||
|
"schema": {
|
||||||
|
"baseSelector": "article.post",
|
||||||
|
"fields": [
|
||||||
|
{"name": "title", "selector": "h1", "type": "text"},
|
||||||
|
{"name": "content", "selector": ".content", "type": "html"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**LLM Extraction Strategy**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"crawler_config": {
|
||||||
|
"type": "CrawlerRunConfig",
|
||||||
|
"params": {
|
||||||
|
"extraction_strategy": {
|
||||||
|
"type": "LLMExtractionStrategy",
|
||||||
|
"params": {
|
||||||
|
"instruction": "Extract article title, author, publication date and main content",
|
||||||
|
"provider": "openai/gpt-4",
|
||||||
|
"api_token": "your-api-token",
|
||||||
|
"schema": {
|
||||||
|
"type": "dict",
|
||||||
|
"value": {
|
||||||
|
"title": "Article Schema",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"title": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The article's headline"
|
||||||
|
},
|
||||||
|
"author": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The author's name"
|
||||||
|
},
|
||||||
|
"published_date": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "date-time",
|
||||||
|
"description": "Publication date and time"
|
||||||
|
},
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The main article content"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["title", "content"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Deep Crawler Example**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"crawler_config": {
|
||||||
|
"type": "CrawlerRunConfig",
|
||||||
|
"params": {
|
||||||
|
"deep_crawl_strategy": {
|
||||||
|
"type": "BFSDeepCrawlStrategy",
|
||||||
|
"params": {
|
||||||
|
"max_depth": 3,
|
||||||
|
"filter_chain": {
|
||||||
|
"type": "FilterChain",
|
||||||
|
"params": {
|
||||||
|
"filters": [
|
||||||
|
{
|
||||||
|
"type": "ContentTypeFilter",
|
||||||
|
"params": {
|
||||||
|
"allowed_types": ["text/html", "application/xhtml+xml"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "DomainFilter",
|
||||||
|
"params": {
|
||||||
|
"allowed_domains": ["blog.*", "docs.*"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"url_scorer": {
|
||||||
|
"type": "CompositeScorer",
|
||||||
|
"params": {
|
||||||
|
"scorers": [
|
||||||
|
{
|
||||||
|
"type": "KeywordRelevanceScorer",
|
||||||
|
"params": {
|
||||||
|
"keywords": ["tutorial", "guide", "documentation"],
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "PathDepthScorer",
|
||||||
|
"params": {
|
||||||
|
"weight": 0.5,
|
||||||
|
"optimal_depth": 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### REST API Examples
|
||||||
|
|
||||||
|
Let's look at some practical examples:
|
||||||
|
|
||||||
|
#### Simple Crawl
|
||||||
|
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
|
||||||
|
crawl_payload = {
|
||||||
|
"urls": ["https://example.com"],
|
||||||
|
"browser_config": {"headless": True},
|
||||||
|
"crawler_config": {"stream": False}
|
||||||
|
}
|
||||||
|
response = requests.post(
|
||||||
|
"http://localhost:8000/crawl",
|
||||||
|
# headers={"Authorization": f"Bearer {token}"}, # If JWT is enabled, more on this later
|
||||||
|
json=crawl_payload
|
||||||
|
)
|
||||||
|
print(response.json()) # Print the response for debugging
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Streaming Results
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def test_stream_crawl(session, token: str):
|
||||||
|
"""Test the /crawl/stream endpoint with multiple URLs."""
|
||||||
|
url = "http://localhost:8000/crawl/stream"
|
||||||
|
payload = {
|
||||||
|
"urls": [
|
||||||
|
"https://example.com",
|
||||||
|
"https://example.com/page1",
|
||||||
|
"https://example.com/page2",
|
||||||
|
"https://example.com/page3",
|
||||||
|
],
|
||||||
|
"browser_config": {"headless": True, "viewport": {"width": 1200}},
|
||||||
|
"crawler_config": {"stream": True, "cache_mode": "aggressive"}
|
||||||
|
}
|
||||||
|
|
||||||
|
# headers = {"Authorization": f"Bearer {token}"} # If JWT is enabled, more on this later
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with session.post(url, json=payload, headers=headers) as response:
|
||||||
|
status = response.status
|
||||||
|
print(f"Status: {status} (Expected: 200)")
|
||||||
|
assert status == 200, f"Expected 200, got {status}"
|
||||||
|
|
||||||
|
# Read streaming response line-by-line (NDJSON)
|
||||||
|
async for line in response.content:
|
||||||
|
if line:
|
||||||
|
data = json.loads(line.decode('utf-8').strip())
|
||||||
|
print(f"Streamed Result: {json.dumps(data, indent=2)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error in streaming crawl test: {str(e)}")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Metrics & Monitoring
|
||||||
|
|
||||||
|
Keep an eye on your crawler with these endpoints:
|
||||||
|
|
||||||
|
- `/health` - Quick health check
|
||||||
|
- `/metrics` - Detailed Prometheus metrics
|
||||||
|
- `/schema` - Full API schema
|
||||||
|
|
||||||
|
Example health check:
|
||||||
|
```bash
|
||||||
|
curl http://localhost:8000/health
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment Scenarios
|
||||||
|
|
||||||
|
> 🚧 Coming soon! We'll cover:
|
||||||
|
> - Kubernetes deployment
|
||||||
|
> - Cloud provider setups (AWS, GCP, Azure)
|
||||||
|
> - High-availability configurations
|
||||||
|
> - Load balancing strategies
|
||||||
|
|
||||||
|
## Complete Examples
|
||||||
|
|
||||||
|
Check out the `examples` folder in our repository for full working examples! Here are two to get you started:
|
||||||
|
[Using Client SDK](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_python_sdk_example.py)
|
||||||
|
[Using REST API](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_python_rest_api_example.py)
|
||||||
|
|
||||||
|
## Server Configuration
|
||||||
|
|
||||||
|
The server's behavior can be customized through the `config.yml` file. Let's explore how to configure your Crawl4AI server for optimal performance and security.
|
||||||
|
|
||||||
|
### Understanding config.yml
|
||||||
|
|
||||||
|
The configuration file is located at `deploy/docker/config.yml`. You can either modify this file before building the image or mount a custom configuration when running the container.
|
||||||
|
|
||||||
|
Here's a detailed breakdown of the configuration options:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Application Configuration
|
||||||
|
app:
|
||||||
|
title: "Crawl4AI API" # Server title in OpenAPI docs
|
||||||
|
version: "1.0.0" # API version
|
||||||
|
host: "0.0.0.0" # Listen on all interfaces
|
||||||
|
port: 8000 # Server port
|
||||||
|
reload: True # Enable hot reloading (development only)
|
||||||
|
timeout_keep_alive: 300 # Keep-alive timeout in seconds
|
||||||
|
|
||||||
|
# Rate Limiting Configuration
|
||||||
|
rate_limiting:
|
||||||
|
enabled: True # Enable/disable rate limiting
|
||||||
|
default_limit: "100/minute" # Rate limit format: "number/timeunit"
|
||||||
|
trusted_proxies: [] # List of trusted proxy IPs
|
||||||
|
storage_uri: "memory://" # Use "redis://localhost:6379" for production
|
||||||
|
|
||||||
|
# Security Configuration
|
||||||
|
security:
|
||||||
|
enabled: false # Master toggle for security features
|
||||||
|
jwt_enabled: true # Enable JWT authentication
|
||||||
|
https_redirect: True # Force HTTPS
|
||||||
|
trusted_hosts: ["*"] # Allowed hosts (use specific domains in production)
|
||||||
|
headers: # Security headers
|
||||||
|
x_content_type_options: "nosniff"
|
||||||
|
x_frame_options: "DENY"
|
||||||
|
content_security_policy: "default-src 'self'"
|
||||||
|
strict_transport_security: "max-age=63072000; includeSubDomains"
|
||||||
|
|
||||||
|
# Crawler Configuration
|
||||||
|
crawler:
|
||||||
|
memory_threshold_percent: 95.0 # Memory usage threshold
|
||||||
|
rate_limiter:
|
||||||
|
base_delay: [1.0, 2.0] # Min and max delay between requests
|
||||||
|
timeouts:
|
||||||
|
stream_init: 30.0 # Stream initialization timeout
|
||||||
|
batch_process: 300.0 # Batch processing timeout
|
||||||
|
|
||||||
|
# Logging Configuration
|
||||||
|
logging:
|
||||||
|
level: "INFO" # Log level (DEBUG, INFO, WARNING, ERROR)
|
||||||
|
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||||
|
|
||||||
|
# Observability Configuration
|
||||||
|
observability:
|
||||||
|
prometheus:
|
||||||
|
enabled: True # Enable Prometheus metrics
|
||||||
|
endpoint: "/metrics" # Metrics endpoint
|
||||||
|
health_check:
|
||||||
|
endpoint: "/health" # Health check endpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
### JWT Authentication
|
||||||
|
|
||||||
|
When `security.jwt_enabled` is set to `true` in your config.yml, all endpoints require JWT authentication via bearer tokens. Here's how it works:
|
||||||
|
|
||||||
|
#### Getting a Token
|
||||||
|
```python
|
||||||
|
POST /token
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"email": "user@example.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The endpoint returns:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"email": "user@example.com",
|
||||||
|
"access_token": "eyJ0eXAiOiJKV1QiLCJhbGciOi...",
|
||||||
|
"token_type": "bearer"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using the Token
|
||||||
|
Add the token to your requests:
|
||||||
|
```bash
|
||||||
|
curl -H "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGci..." http://localhost:8000/crawl
|
||||||
|
```
|
||||||
|
|
||||||
|
Using the Python SDK:
|
||||||
|
```python
|
||||||
|
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||||
|
|
||||||
|
async with Crawl4aiDockerClient() as client:
|
||||||
|
# Authenticate first
|
||||||
|
await client.authenticate("user@example.com")
|
||||||
|
|
||||||
|
# Now all requests will include the token automatically
|
||||||
|
result = await client.crawl(urls=["https://example.com"])
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Production Considerations 💡
|
||||||
|
The default implementation uses a simple email verification. For production use, consider:
|
||||||
|
- Email verification via OTP/magic links
|
||||||
|
- OAuth2 integration
|
||||||
|
- Rate limiting token generation
|
||||||
|
- Token expiration and refresh mechanisms
|
||||||
|
- IP-based restrictions
|
||||||
|
|
||||||
|
### Configuration Tips and Best Practices
|
||||||
|
|
||||||
|
1. **Production Settings** 🏭
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
app:
|
||||||
|
reload: False # Disable reload in production
|
||||||
|
timeout_keep_alive: 120 # Lower timeout for better resource management
|
||||||
|
|
||||||
|
rate_limiting:
|
||||||
|
storage_uri: "redis://redis:6379" # Use Redis for distributed rate limiting
|
||||||
|
default_limit: "50/minute" # More conservative rate limit
|
||||||
|
|
||||||
|
security:
|
||||||
|
enabled: true # Enable all security features
|
||||||
|
trusted_hosts: ["your-domain.com"] # Restrict to your domain
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Development Settings** 🛠️
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
app:
|
||||||
|
reload: True # Enable hot reloading
|
||||||
|
timeout_keep_alive: 300 # Longer timeout for debugging
|
||||||
|
|
||||||
|
logging:
|
||||||
|
level: "DEBUG" # More verbose logging
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **High-Traffic Settings** 🚦
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
crawler:
|
||||||
|
memory_threshold_percent: 85.0 # More conservative memory limit
|
||||||
|
rate_limiter:
|
||||||
|
base_delay: [2.0, 4.0] # More aggressive rate limiting
|
||||||
|
```
|
||||||
|
|
||||||
|
### Customizing Your Configuration
|
||||||
|
|
||||||
|
#### Method 1: Pre-build Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy and modify config before building
|
||||||
|
cd crawl4ai/deploy
|
||||||
|
vim custom-config.yml # Or use any editor
|
||||||
|
|
||||||
|
# Build with custom config
|
||||||
|
docker build --platform=linux/amd64 --no-cache -t crawl4ai:latest .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Method 2: Build-time Configuration
|
||||||
|
|
||||||
|
Use a custom config during build:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build with custom config
|
||||||
|
docker build --platform=linux/amd64 --no-cache \
|
||||||
|
--build-arg CONFIG_PATH=/path/to/custom-config.yml \
|
||||||
|
-t crawl4ai:latest .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Method 3: Runtime Configuration
|
||||||
|
```bash
|
||||||
|
# Mount custom config at runtime
|
||||||
|
docker run -d -p 8000:8000 \
|
||||||
|
-v $(pwd)/custom-config.yml:/app/config.yml \
|
||||||
|
crawl4ai-server:prod
|
||||||
|
```
|
||||||
|
|
||||||
|
> 💡 Note: When using Method 2, `/path/to/custom-config.yml` is relative to deploy directory.
|
||||||
|
> 💡 Note: When using Method 3, ensure your custom config file has all required fields as the container will use this instead of the built-in config.
|
||||||
|
|
||||||
|
### Configuration Recommendations
|
||||||
|
|
||||||
|
1. **Security First** 🔒
|
||||||
|
- Always enable security in production
|
||||||
|
- Use specific trusted_hosts instead of wildcards
|
||||||
|
- Set up proper rate limiting to protect your server
|
||||||
|
- Consider your environment before enabling HTTPS redirect
|
||||||
|
|
||||||
|
2. **Resource Management** 💻
|
||||||
|
- Adjust memory_threshold_percent based on available RAM
|
||||||
|
- Set timeouts according to your content size and network conditions
|
||||||
|
- Use Redis for rate limiting in multi-container setups
|
||||||
|
|
||||||
|
3. **Monitoring** 📊
|
||||||
|
- Enable Prometheus if you need metrics
|
||||||
|
- Set DEBUG logging in development, INFO in production
|
||||||
|
- Regular health check monitoring is crucial
|
||||||
|
|
||||||
|
4. **Performance Tuning** ⚡
|
||||||
|
- Start with conservative rate limiter delays
|
||||||
|
- Increase batch_process timeout for large content
|
||||||
|
- Adjust stream_init timeout based on initial response times
|
||||||
|
|
||||||
|
## Getting Help
|
||||||
|
|
||||||
|
We're here to help you succeed with Crawl4AI! Here's how to get support:
|
||||||
|
|
||||||
|
- 📖 Check our [full documentation](https://docs.crawl4ai.com)
|
||||||
|
- 🐛 Found a bug? [Open an issue](https://github.com/unclecode/crawl4ai/issues)
|
||||||
|
- 💬 Join our [Discord community](https://discord.gg/crawl4ai)
|
||||||
|
- ⭐ Star us on GitHub to show support!
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
In this guide, we've covered everything you need to get started with Crawl4AI's Docker deployment:
|
||||||
|
- Building and running the Docker container
|
||||||
|
- Configuring the environment
|
||||||
|
- Making API requests with proper typing
|
||||||
|
- Using the Python SDK
|
||||||
|
- Monitoring your deployment
|
||||||
|
|
||||||
|
Remember, the examples in the `examples` folder are your friends - they show real-world usage patterns that you can adapt for your needs.
|
||||||
|
|
||||||
|
Keep exploring, and don't hesitate to reach out if you need help! We're building something amazing together. 🚀
|
||||||
|
|
||||||
|
Happy crawling! 🕷️
|
||||||
442
deploy/docker/api.py
Normal file
442
deploy/docker/api.py
Normal file
@@ -0,0 +1,442 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import asyncio
|
||||||
|
from typing import List, Tuple
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Optional, AsyncGenerator
|
||||||
|
from urllib.parse import unquote
|
||||||
|
from fastapi import HTTPException, Request, status
|
||||||
|
from fastapi.background import BackgroundTasks
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
|
from redis import asyncio as aioredis
|
||||||
|
|
||||||
|
from crawl4ai import (
|
||||||
|
AsyncWebCrawler,
|
||||||
|
CrawlerRunConfig,
|
||||||
|
LLMExtractionStrategy,
|
||||||
|
CacheMode,
|
||||||
|
BrowserConfig,
|
||||||
|
MemoryAdaptiveDispatcher,
|
||||||
|
RateLimiter
|
||||||
|
)
|
||||||
|
from crawl4ai.utils import perform_completion_with_backoff
|
||||||
|
from crawl4ai.content_filter_strategy import (
|
||||||
|
PruningContentFilter,
|
||||||
|
BM25ContentFilter,
|
||||||
|
LLMContentFilter
|
||||||
|
)
|
||||||
|
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||||
|
from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy
|
||||||
|
|
||||||
|
from utils import (
|
||||||
|
TaskStatus,
|
||||||
|
FilterType,
|
||||||
|
get_base_url,
|
||||||
|
is_task_id,
|
||||||
|
should_cleanup_task,
|
||||||
|
decode_redis_hash
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
async def handle_llm_qa(
|
||||||
|
url: str,
|
||||||
|
query: str,
|
||||||
|
config: dict
|
||||||
|
) -> str:
|
||||||
|
"""Process QA using LLM with crawled content as context."""
|
||||||
|
try:
|
||||||
|
# Extract base URL by finding last '?q=' occurrence
|
||||||
|
last_q_index = url.rfind('?q=')
|
||||||
|
if last_q_index != -1:
|
||||||
|
url = url[:last_q_index]
|
||||||
|
|
||||||
|
# Get markdown content
|
||||||
|
async with AsyncWebCrawler() as crawler:
|
||||||
|
result = await crawler.arun(url)
|
||||||
|
if not result.success:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=result.error_message
|
||||||
|
)
|
||||||
|
content = result.markdown.fit_markdown
|
||||||
|
|
||||||
|
# Create prompt and get LLM response
|
||||||
|
prompt = f"""Use the following content as context to answer the question.
|
||||||
|
Content:
|
||||||
|
{content}
|
||||||
|
|
||||||
|
Question: {query}
|
||||||
|
|
||||||
|
Answer:"""
|
||||||
|
|
||||||
|
response = perform_completion_with_backoff(
|
||||||
|
provider=config["llm"]["provider"],
|
||||||
|
prompt_with_variables=prompt,
|
||||||
|
api_token=os.environ.get(config["llm"].get("api_key_env", ""))
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.choices[0].message.content
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"QA processing error: {str(e)}", exc_info=True)
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=str(e)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def process_llm_extraction(
|
||||||
|
redis: aioredis.Redis,
|
||||||
|
config: dict,
|
||||||
|
task_id: str,
|
||||||
|
url: str,
|
||||||
|
instruction: str,
|
||||||
|
schema: Optional[str] = None,
|
||||||
|
cache: str = "0"
|
||||||
|
) -> None:
|
||||||
|
"""Process LLM extraction in background."""
|
||||||
|
try:
|
||||||
|
# If config['llm'] has api_key then ignore the api_key_env
|
||||||
|
api_key = ""
|
||||||
|
if "api_key" in config["llm"]:
|
||||||
|
api_key = config["llm"]["api_key"]
|
||||||
|
else:
|
||||||
|
api_key = os.environ.get(config["llm"].get("api_key_env", None), "")
|
||||||
|
llm_strategy = LLMExtractionStrategy(
|
||||||
|
provider=config["llm"]["provider"],
|
||||||
|
api_token=api_key,
|
||||||
|
instruction=instruction,
|
||||||
|
schema=json.loads(schema) if schema else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
|
||||||
|
|
||||||
|
async with AsyncWebCrawler() as crawler:
|
||||||
|
result = await crawler.arun(
|
||||||
|
url=url,
|
||||||
|
config=CrawlerRunConfig(
|
||||||
|
extraction_strategy=llm_strategy,
|
||||||
|
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||||
|
cache_mode=cache_mode
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
await redis.hset(f"task:{task_id}", mapping={
|
||||||
|
"status": TaskStatus.FAILED,
|
||||||
|
"error": result.error_message
|
||||||
|
})
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = json.loads(result.extracted_content)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
content = result.extracted_content
|
||||||
|
await redis.hset(f"task:{task_id}", mapping={
|
||||||
|
"status": TaskStatus.COMPLETED,
|
||||||
|
"result": json.dumps(content)
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"LLM extraction error: {str(e)}", exc_info=True)
|
||||||
|
await redis.hset(f"task:{task_id}", mapping={
|
||||||
|
"status": TaskStatus.FAILED,
|
||||||
|
"error": str(e)
|
||||||
|
})
|
||||||
|
|
||||||
|
async def handle_markdown_request(
|
||||||
|
url: str,
|
||||||
|
filter_type: FilterType,
|
||||||
|
query: Optional[str] = None,
|
||||||
|
cache: str = "0",
|
||||||
|
config: Optional[dict] = None
|
||||||
|
) -> str:
|
||||||
|
"""Handle markdown generation requests."""
|
||||||
|
try:
|
||||||
|
decoded_url = unquote(url)
|
||||||
|
if not decoded_url.startswith(('http://', 'https://')):
|
||||||
|
decoded_url = 'https://' + decoded_url
|
||||||
|
|
||||||
|
if filter_type == FilterType.RAW:
|
||||||
|
md_generator = DefaultMarkdownGenerator()
|
||||||
|
else:
|
||||||
|
content_filter = {
|
||||||
|
FilterType.FIT: PruningContentFilter(),
|
||||||
|
FilterType.BM25: BM25ContentFilter(user_query=query or ""),
|
||||||
|
FilterType.LLM: LLMContentFilter(
|
||||||
|
provider=config["llm"]["provider"],
|
||||||
|
api_token=os.environ.get(config["llm"].get("api_key_env", None), ""),
|
||||||
|
instruction=query or "Extract main content"
|
||||||
|
)
|
||||||
|
}[filter_type]
|
||||||
|
md_generator = DefaultMarkdownGenerator(content_filter=content_filter)
|
||||||
|
|
||||||
|
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
|
||||||
|
|
||||||
|
async with AsyncWebCrawler() as crawler:
|
||||||
|
result = await crawler.arun(
|
||||||
|
url=decoded_url,
|
||||||
|
config=CrawlerRunConfig(
|
||||||
|
markdown_generator=md_generator,
|
||||||
|
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||||
|
cache_mode=cache_mode
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=result.error_message
|
||||||
|
)
|
||||||
|
|
||||||
|
return (result.markdown.raw_markdown
|
||||||
|
if filter_type == FilterType.RAW
|
||||||
|
else result.markdown.fit_markdown)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Markdown error: {str(e)}", exc_info=True)
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=str(e)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def handle_llm_request(
|
||||||
|
redis: aioredis.Redis,
|
||||||
|
background_tasks: BackgroundTasks,
|
||||||
|
request: Request,
|
||||||
|
input_path: str,
|
||||||
|
query: Optional[str] = None,
|
||||||
|
schema: Optional[str] = None,
|
||||||
|
cache: str = "0",
|
||||||
|
config: Optional[dict] = None
|
||||||
|
) -> JSONResponse:
|
||||||
|
"""Handle LLM extraction requests."""
|
||||||
|
base_url = get_base_url(request)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if is_task_id(input_path):
|
||||||
|
return await handle_task_status(
|
||||||
|
redis, input_path, base_url
|
||||||
|
)
|
||||||
|
|
||||||
|
if not query:
|
||||||
|
return JSONResponse({
|
||||||
|
"message": "Please provide an instruction",
|
||||||
|
"_links": {
|
||||||
|
"example": {
|
||||||
|
"href": f"{base_url}/llm/{input_path}?q=Extract+main+content",
|
||||||
|
"title": "Try this example"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return await create_new_task(
|
||||||
|
redis,
|
||||||
|
background_tasks,
|
||||||
|
input_path,
|
||||||
|
query,
|
||||||
|
schema,
|
||||||
|
cache,
|
||||||
|
base_url,
|
||||||
|
config
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"LLM endpoint error: {str(e)}", exc_info=True)
|
||||||
|
return JSONResponse({
|
||||||
|
"error": str(e),
|
||||||
|
"_links": {
|
||||||
|
"retry": {"href": str(request.url)}
|
||||||
|
}
|
||||||
|
}, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||||
|
|
||||||
|
async def handle_task_status(
|
||||||
|
redis: aioredis.Redis,
|
||||||
|
task_id: str,
|
||||||
|
base_url: str
|
||||||
|
) -> JSONResponse:
|
||||||
|
"""Handle task status check requests."""
|
||||||
|
task = await redis.hgetall(f"task:{task_id}")
|
||||||
|
if not task:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail="Task not found"
|
||||||
|
)
|
||||||
|
|
||||||
|
task = decode_redis_hash(task)
|
||||||
|
response = create_task_response(task, task_id, base_url)
|
||||||
|
|
||||||
|
if task["status"] in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
|
||||||
|
if should_cleanup_task(task["created_at"]):
|
||||||
|
await redis.delete(f"task:{task_id}")
|
||||||
|
|
||||||
|
return JSONResponse(response)
|
||||||
|
|
||||||
|
async def create_new_task(
|
||||||
|
redis: aioredis.Redis,
|
||||||
|
background_tasks: BackgroundTasks,
|
||||||
|
input_path: str,
|
||||||
|
query: str,
|
||||||
|
schema: Optional[str],
|
||||||
|
cache: str,
|
||||||
|
base_url: str,
|
||||||
|
config: dict
|
||||||
|
) -> JSONResponse:
|
||||||
|
"""Create and initialize a new task."""
|
||||||
|
decoded_url = unquote(input_path)
|
||||||
|
if not decoded_url.startswith(('http://', 'https://')):
|
||||||
|
decoded_url = 'https://' + decoded_url
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
task_id = f"llm_{int(datetime.now().timestamp())}_{id(background_tasks)}"
|
||||||
|
|
||||||
|
await redis.hset(f"task:{task_id}", mapping={
|
||||||
|
"status": TaskStatus.PROCESSING,
|
||||||
|
"created_at": datetime.now().isoformat(),
|
||||||
|
"url": decoded_url
|
||||||
|
})
|
||||||
|
|
||||||
|
background_tasks.add_task(
|
||||||
|
process_llm_extraction,
|
||||||
|
redis,
|
||||||
|
config,
|
||||||
|
task_id,
|
||||||
|
decoded_url,
|
||||||
|
query,
|
||||||
|
schema,
|
||||||
|
cache
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse({
|
||||||
|
"task_id": task_id,
|
||||||
|
"status": TaskStatus.PROCESSING,
|
||||||
|
"url": decoded_url,
|
||||||
|
"_links": {
|
||||||
|
"self": {"href": f"{base_url}/llm/{task_id}"},
|
||||||
|
"status": {"href": f"{base_url}/llm/{task_id}"}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
def create_task_response(task: dict, task_id: str, base_url: str) -> dict:
|
||||||
|
"""Create response for task status check."""
|
||||||
|
response = {
|
||||||
|
"task_id": task_id,
|
||||||
|
"status": task["status"],
|
||||||
|
"created_at": task["created_at"],
|
||||||
|
"url": task["url"],
|
||||||
|
"_links": {
|
||||||
|
"self": {"href": f"{base_url}/llm/{task_id}"},
|
||||||
|
"refresh": {"href": f"{base_url}/llm/{task_id}"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if task["status"] == TaskStatus.COMPLETED:
|
||||||
|
response["result"] = json.loads(task["result"])
|
||||||
|
elif task["status"] == TaskStatus.FAILED:
|
||||||
|
response["error"] = task["error"]
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator) -> AsyncGenerator[bytes, None]:
|
||||||
|
"""Stream results with heartbeats and completion markers."""
|
||||||
|
import json
|
||||||
|
from utils import datetime_handler
|
||||||
|
|
||||||
|
try:
|
||||||
|
async for result in results_gen:
|
||||||
|
try:
|
||||||
|
result_dict = result.model_dump()
|
||||||
|
logger.info(f"Streaming result for {result_dict.get('url', 'unknown')}")
|
||||||
|
data = json.dumps(result_dict, default=datetime_handler) + "\n"
|
||||||
|
yield data.encode('utf-8')
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Serialization error: {e}")
|
||||||
|
error_response = {"error": str(e), "url": getattr(result, 'url', 'unknown')}
|
||||||
|
yield (json.dumps(error_response) + "\n").encode('utf-8')
|
||||||
|
|
||||||
|
yield json.dumps({"status": "completed"}).encode('utf-8')
|
||||||
|
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
logger.warning("Client disconnected during streaming")
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
await crawler.close()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Crawler cleanup error: {e}")
|
||||||
|
|
||||||
|
async def handle_crawl_request(
|
||||||
|
urls: List[str],
|
||||||
|
browser_config: dict,
|
||||||
|
crawler_config: dict,
|
||||||
|
config: dict
|
||||||
|
) -> dict:
|
||||||
|
"""Handle non-streaming crawl requests."""
|
||||||
|
try:
|
||||||
|
browser_config = BrowserConfig.load(browser_config)
|
||||||
|
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||||
|
|
||||||
|
dispatcher = MemoryAdaptiveDispatcher(
|
||||||
|
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||||
|
rate_limiter=RateLimiter(
|
||||||
|
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
|
results = await crawler.arun_many(
|
||||||
|
urls=urls,
|
||||||
|
config=crawler_config,
|
||||||
|
dispatcher=dispatcher
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"results": [result.model_dump() for result in results]
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Crawl error: {str(e)}", exc_info=True)
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=str(e)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def handle_stream_crawl_request(
|
||||||
|
urls: List[str],
|
||||||
|
browser_config: dict,
|
||||||
|
crawler_config: dict,
|
||||||
|
config: dict
|
||||||
|
) -> Tuple[AsyncWebCrawler, AsyncGenerator]:
|
||||||
|
"""Handle streaming crawl requests."""
|
||||||
|
try:
|
||||||
|
browser_config = BrowserConfig.load(browser_config)
|
||||||
|
browser_config.verbose = True
|
||||||
|
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||||
|
crawler_config.scraping_strategy = LXMLWebScrapingStrategy()
|
||||||
|
|
||||||
|
dispatcher = MemoryAdaptiveDispatcher(
|
||||||
|
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||||
|
rate_limiter=RateLimiter(
|
||||||
|
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
crawler = AsyncWebCrawler(config=browser_config)
|
||||||
|
await crawler.start()
|
||||||
|
|
||||||
|
results_gen = await crawler.arun_many(
|
||||||
|
urls=urls,
|
||||||
|
config=crawler_config,
|
||||||
|
dispatcher=dispatcher
|
||||||
|
)
|
||||||
|
|
||||||
|
return crawler, results_gen
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if 'crawler' in locals():
|
||||||
|
await crawler.close()
|
||||||
|
logger.error(f"Stream crawl error: {str(e)}", exc_info=True)
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=str(e)
|
||||||
|
)
|
||||||
46
deploy/docker/auth.py
Normal file
46
deploy/docker/auth.py
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
import os
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
|
from typing import Dict, Optional
|
||||||
|
from jwt import JWT, jwk_from_dict
|
||||||
|
from jwt.utils import get_int_from_datetime
|
||||||
|
from fastapi import Depends, HTTPException
|
||||||
|
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||||
|
from pydantic import EmailStr
|
||||||
|
from pydantic.main import BaseModel
|
||||||
|
import base64
|
||||||
|
|
||||||
|
instance = JWT()
|
||||||
|
security = HTTPBearer()
|
||||||
|
SECRET_KEY = os.environ.get("SECRET_KEY", "mysecret")
|
||||||
|
ACCESS_TOKEN_EXPIRE_MINUTES = 60
|
||||||
|
|
||||||
|
def get_jwk_from_secret(secret: str):
|
||||||
|
"""Convert a secret string into a JWK object."""
|
||||||
|
secret_bytes = secret.encode('utf-8')
|
||||||
|
b64_secret = base64.urlsafe_b64encode(secret_bytes).rstrip(b'=').decode('utf-8')
|
||||||
|
return jwk_from_dict({"kty": "oct", "k": b64_secret})
|
||||||
|
|
||||||
|
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
|
||||||
|
"""Create a JWT access token with an expiration."""
|
||||||
|
to_encode = data.copy()
|
||||||
|
expire = datetime.now(timezone.utc) + (expires_delta or timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES))
|
||||||
|
to_encode.update({"exp": get_int_from_datetime(expire)})
|
||||||
|
signing_key = get_jwk_from_secret(SECRET_KEY)
|
||||||
|
return instance.encode(to_encode, signing_key, alg='HS256')
|
||||||
|
|
||||||
|
def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)) -> Dict:
|
||||||
|
"""Verify the JWT token from the Authorization header."""
|
||||||
|
token = credentials.credentials
|
||||||
|
verifying_key = get_jwk_from_secret(SECRET_KEY)
|
||||||
|
try:
|
||||||
|
payload = instance.decode(token, verifying_key, do_time_check=True, algorithms='HS256')
|
||||||
|
return payload
|
||||||
|
except Exception:
|
||||||
|
raise HTTPException(status_code=401, detail="Invalid or expired token")
|
||||||
|
|
||||||
|
def get_token_dependency(config: Dict):
|
||||||
|
"""Return the token dependency if JWT is enabled, else None."""
|
||||||
|
return verify_token if config.get("security", {}).get("jwt_enabled", False) else None
|
||||||
|
|
||||||
|
class TokenRequest(BaseModel):
|
||||||
|
email: EmailStr
|
||||||
71
deploy/docker/config.yml
Normal file
71
deploy/docker/config.yml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Application Configuration
|
||||||
|
app:
|
||||||
|
title: "Crawl4AI API"
|
||||||
|
version: "1.0.0"
|
||||||
|
host: "0.0.0.0"
|
||||||
|
port: 8000
|
||||||
|
reload: True
|
||||||
|
timeout_keep_alive: 300
|
||||||
|
|
||||||
|
# Default LLM Configuration
|
||||||
|
llm:
|
||||||
|
provider: "openai/gpt-4o-mini"
|
||||||
|
api_key_env: "OPENAI_API_KEY"
|
||||||
|
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
||||||
|
|
||||||
|
# Redis Configuration
|
||||||
|
redis:
|
||||||
|
host: "localhost"
|
||||||
|
port: 6379
|
||||||
|
db: 0
|
||||||
|
password: ""
|
||||||
|
ssl: False
|
||||||
|
ssl_cert_reqs: None
|
||||||
|
ssl_ca_certs: None
|
||||||
|
ssl_certfile: None
|
||||||
|
ssl_keyfile: None
|
||||||
|
ssl_cert_reqs: None
|
||||||
|
ssl_ca_certs: None
|
||||||
|
ssl_certfile: None
|
||||||
|
ssl_keyfile: None
|
||||||
|
|
||||||
|
# Rate Limiting Configuration
|
||||||
|
rate_limiting:
|
||||||
|
enabled: True
|
||||||
|
default_limit: "1000/minute"
|
||||||
|
trusted_proxies: []
|
||||||
|
storage_uri: "memory://" # Use "redis://localhost:6379" for production
|
||||||
|
|
||||||
|
# Security Configuration
|
||||||
|
security:
|
||||||
|
enabled: false
|
||||||
|
jwt_enabled: false
|
||||||
|
https_redirect: false
|
||||||
|
trusted_hosts: ["*"]
|
||||||
|
headers:
|
||||||
|
x_content_type_options: "nosniff"
|
||||||
|
x_frame_options: "DENY"
|
||||||
|
content_security_policy: "default-src 'self'"
|
||||||
|
strict_transport_security: "max-age=63072000; includeSubDomains"
|
||||||
|
|
||||||
|
# Crawler Configuration
|
||||||
|
crawler:
|
||||||
|
memory_threshold_percent: 95.0
|
||||||
|
rate_limiter:
|
||||||
|
base_delay: [1.0, 2.0]
|
||||||
|
timeouts:
|
||||||
|
stream_init: 30.0 # Timeout for stream initialization
|
||||||
|
batch_process: 300.0 # Timeout for batch processing
|
||||||
|
|
||||||
|
# Logging Configuration
|
||||||
|
logging:
|
||||||
|
level: "INFO"
|
||||||
|
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||||
|
|
||||||
|
# Observability Configuration
|
||||||
|
observability:
|
||||||
|
prometheus:
|
||||||
|
enabled: True
|
||||||
|
endpoint: "/metrics"
|
||||||
|
health_check:
|
||||||
|
endpoint: "/health"
|
||||||
10
deploy/docker/requirements.txt
Normal file
10
deploy/docker/requirements.txt
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
crawl4ai
|
||||||
|
fastapi
|
||||||
|
uvicorn
|
||||||
|
gunicorn>=23.0.0
|
||||||
|
slowapi>=0.1.9
|
||||||
|
prometheus-fastapi-instrumentator>=7.0.2
|
||||||
|
redis>=5.2.1
|
||||||
|
jwt>=1.3.1
|
||||||
|
dnspython>=2.7.0
|
||||||
|
email-validator>=2.2.0
|
||||||
181
deploy/docker/server.py
Normal file
181
deploy/docker/server.py
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from typing import List, Optional, Dict
|
||||||
|
from fastapi import FastAPI, HTTPException, Request, Query, Path, Depends
|
||||||
|
from fastapi.responses import StreamingResponse, RedirectResponse, PlainTextResponse, JSONResponse
|
||||||
|
from fastapi.middleware.httpsredirect import HTTPSRedirectMiddleware
|
||||||
|
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from slowapi import Limiter
|
||||||
|
from slowapi.util import get_remote_address
|
||||||
|
from prometheus_fastapi_instrumentator import Instrumentator
|
||||||
|
from redis import asyncio as aioredis
|
||||||
|
|
||||||
|
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||||
|
from utils import FilterType, load_config, setup_logging, verify_email_domain
|
||||||
|
from api import (
|
||||||
|
handle_markdown_request,
|
||||||
|
handle_llm_qa,
|
||||||
|
handle_stream_crawl_request,
|
||||||
|
handle_crawl_request,
|
||||||
|
stream_results
|
||||||
|
)
|
||||||
|
from auth import create_access_token, get_token_dependency, TokenRequest # Import from auth.py
|
||||||
|
|
||||||
|
__version__ = "0.2.6"
|
||||||
|
|
||||||
|
class CrawlRequest(BaseModel):
|
||||||
|
urls: List[str] = Field(min_length=1, max_length=100)
|
||||||
|
browser_config: Optional[Dict] = Field(default_factory=dict)
|
||||||
|
crawler_config: Optional[Dict] = Field(default_factory=dict)
|
||||||
|
|
||||||
|
# Load configuration and setup
|
||||||
|
config = load_config()
|
||||||
|
setup_logging(config)
|
||||||
|
|
||||||
|
# Initialize Redis
|
||||||
|
redis = aioredis.from_url(config["redis"].get("uri", "redis://localhost"))
|
||||||
|
|
||||||
|
# Initialize rate limiter
|
||||||
|
limiter = Limiter(
|
||||||
|
key_func=get_remote_address,
|
||||||
|
default_limits=[config["rate_limiting"]["default_limit"]],
|
||||||
|
storage_uri=config["rate_limiting"]["storage_uri"]
|
||||||
|
)
|
||||||
|
|
||||||
|
app = FastAPI(
|
||||||
|
title=config["app"]["title"],
|
||||||
|
version=config["app"]["version"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Configure middleware
|
||||||
|
def setup_security_middleware(app, config):
|
||||||
|
sec_config = config.get("security", {})
|
||||||
|
if sec_config.get("enabled", False):
|
||||||
|
if sec_config.get("https_redirect", False):
|
||||||
|
app.add_middleware(HTTPSRedirectMiddleware)
|
||||||
|
if sec_config.get("trusted_hosts", []) != ["*"]:
|
||||||
|
app.add_middleware(TrustedHostMiddleware, allowed_hosts=sec_config["trusted_hosts"])
|
||||||
|
|
||||||
|
setup_security_middleware(app, config)
|
||||||
|
|
||||||
|
# Prometheus instrumentation
|
||||||
|
if config["observability"]["prometheus"]["enabled"]:
|
||||||
|
Instrumentator().instrument(app).expose(app)
|
||||||
|
|
||||||
|
# Get token dependency based on config
|
||||||
|
token_dependency = get_token_dependency(config)
|
||||||
|
|
||||||
|
# Middleware for security headers
|
||||||
|
@app.middleware("http")
|
||||||
|
async def add_security_headers(request: Request, call_next):
|
||||||
|
response = await call_next(request)
|
||||||
|
if config["security"]["enabled"]:
|
||||||
|
response.headers.update(config["security"]["headers"])
|
||||||
|
return response
|
||||||
|
|
||||||
|
# Token endpoint (always available, but usage depends on config)
|
||||||
|
@app.post("/token")
|
||||||
|
async def get_token(request_data: TokenRequest):
|
||||||
|
if not verify_email_domain(request_data.email):
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid email domain")
|
||||||
|
token = create_access_token({"sub": request_data.email})
|
||||||
|
return {"email": request_data.email, "access_token": token, "token_type": "bearer"}
|
||||||
|
|
||||||
|
# Endpoints with conditional auth
|
||||||
|
@app.get("/md/{url:path}")
|
||||||
|
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||||
|
async def get_markdown(
|
||||||
|
request: Request,
|
||||||
|
url: str,
|
||||||
|
f: FilterType = FilterType.FIT,
|
||||||
|
q: Optional[str] = None,
|
||||||
|
c: Optional[str] = "0",
|
||||||
|
token_data: Optional[Dict] = Depends(token_dependency)
|
||||||
|
):
|
||||||
|
result = await handle_markdown_request(url, f, q, c, config)
|
||||||
|
return PlainTextResponse(result)
|
||||||
|
|
||||||
|
@app.get("/llm/{url:path}", description="URL should be without http/https prefix")
|
||||||
|
async def llm_endpoint(
|
||||||
|
request: Request,
|
||||||
|
url: str = Path(...),
|
||||||
|
q: Optional[str] = Query(None),
|
||||||
|
token_data: Optional[Dict] = Depends(token_dependency)
|
||||||
|
):
|
||||||
|
if not q:
|
||||||
|
raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
|
||||||
|
if not url.startswith(('http://', 'https://')):
|
||||||
|
url = 'https://' + url
|
||||||
|
try:
|
||||||
|
answer = await handle_llm_qa(url, q, config)
|
||||||
|
return JSONResponse({"answer": answer})
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
@app.get("/schema")
|
||||||
|
async def get_schema():
|
||||||
|
from crawl4ai import BrowserConfig, CrawlerRunConfig
|
||||||
|
return {"browser": BrowserConfig().dump(), "crawler": CrawlerRunConfig().dump()}
|
||||||
|
|
||||||
|
@app.get(config["observability"]["health_check"]["endpoint"])
|
||||||
|
async def health():
|
||||||
|
return {"status": "ok", "timestamp": time.time(), "version": __version__}
|
||||||
|
|
||||||
|
@app.get(config["observability"]["prometheus"]["endpoint"])
|
||||||
|
async def metrics():
|
||||||
|
return RedirectResponse(url=config["observability"]["prometheus"]["endpoint"])
|
||||||
|
|
||||||
|
@app.post("/crawl")
|
||||||
|
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||||
|
async def crawl(
|
||||||
|
request: Request,
|
||||||
|
crawl_request: CrawlRequest,
|
||||||
|
token_data: Optional[Dict] = Depends(token_dependency)
|
||||||
|
):
|
||||||
|
if not crawl_request.urls:
|
||||||
|
raise HTTPException(status_code=400, detail="At least one URL required")
|
||||||
|
|
||||||
|
results = await handle_crawl_request(
|
||||||
|
urls=crawl_request.urls,
|
||||||
|
browser_config=crawl_request.browser_config,
|
||||||
|
crawler_config=crawl_request.crawler_config,
|
||||||
|
config=config
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse(results)
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/crawl/stream")
|
||||||
|
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||||
|
async def crawl_stream(
|
||||||
|
request: Request,
|
||||||
|
crawl_request: CrawlRequest,
|
||||||
|
token_data: Optional[Dict] = Depends(token_dependency)
|
||||||
|
):
|
||||||
|
if not crawl_request.urls:
|
||||||
|
raise HTTPException(status_code=400, detail="At least one URL required")
|
||||||
|
|
||||||
|
crawler, results_gen = await handle_stream_crawl_request(
|
||||||
|
urls=crawl_request.urls,
|
||||||
|
browser_config=crawl_request.browser_config,
|
||||||
|
crawler_config=crawl_request.crawler_config,
|
||||||
|
config=config
|
||||||
|
)
|
||||||
|
|
||||||
|
return StreamingResponse(
|
||||||
|
stream_results(crawler, results_gen),
|
||||||
|
media_type='application/x-ndjson',
|
||||||
|
headers={'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'X-Stream-Status': 'active'}
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import uvicorn
|
||||||
|
uvicorn.run(
|
||||||
|
"server:app",
|
||||||
|
host=config["app"]["host"],
|
||||||
|
port=config["app"]["port"],
|
||||||
|
reload=config["app"]["reload"],
|
||||||
|
timeout_keep_alive=config["app"]["timeout_keep_alive"]
|
||||||
|
)
|
||||||
12
deploy/docker/supervisord.conf
Normal file
12
deploy/docker/supervisord.conf
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[supervisord]
|
||||||
|
nodaemon=true
|
||||||
|
|
||||||
|
[program:redis]
|
||||||
|
command=redis-server
|
||||||
|
autorestart=true
|
||||||
|
priority=10
|
||||||
|
|
||||||
|
[program:gunicorn]
|
||||||
|
command=gunicorn --bind 0.0.0.0:8000 --workers 4 --threads 2 --timeout 300 --graceful-timeout 60 --keep-alive 65 --log-level debug --worker-class uvicorn.workers.UvicornWorker --max-requests 1000 --max-requests-jitter 50 server:app
|
||||||
|
autorestart=true
|
||||||
|
priority=20
|
||||||
66
deploy/docker/utils.py
Normal file
66
deploy/docker/utils.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
import dns.resolver
|
||||||
|
import logging
|
||||||
|
import yaml
|
||||||
|
from datetime import datetime
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from fastapi import Request
|
||||||
|
from typing import Dict, Optional
|
||||||
|
|
||||||
|
class TaskStatus(str, Enum):
|
||||||
|
PROCESSING = "processing"
|
||||||
|
FAILED = "failed"
|
||||||
|
COMPLETED = "completed"
|
||||||
|
|
||||||
|
class FilterType(str, Enum):
|
||||||
|
RAW = "raw"
|
||||||
|
FIT = "fit"
|
||||||
|
BM25 = "bm25"
|
||||||
|
LLM = "llm"
|
||||||
|
|
||||||
|
def load_config() -> Dict:
|
||||||
|
"""Load and return application configuration."""
|
||||||
|
config_path = Path(__file__).parent / "config.yml"
|
||||||
|
with open(config_path, "r") as config_file:
|
||||||
|
return yaml.safe_load(config_file)
|
||||||
|
|
||||||
|
def setup_logging(config: Dict) -> None:
|
||||||
|
"""Configure application logging."""
|
||||||
|
logging.basicConfig(
|
||||||
|
level=config["logging"]["level"],
|
||||||
|
format=config["logging"]["format"]
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_base_url(request: Request) -> str:
|
||||||
|
"""Get base URL including scheme and host."""
|
||||||
|
return f"{request.url.scheme}://{request.url.netloc}"
|
||||||
|
|
||||||
|
def is_task_id(value: str) -> bool:
|
||||||
|
"""Check if the value matches task ID pattern."""
|
||||||
|
return value.startswith("llm_") and "_" in value
|
||||||
|
|
||||||
|
def datetime_handler(obj: any) -> Optional[str]:
|
||||||
|
"""Handle datetime serialization for JSON."""
|
||||||
|
if hasattr(obj, 'isoformat'):
|
||||||
|
return obj.isoformat()
|
||||||
|
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
|
||||||
|
|
||||||
|
def should_cleanup_task(created_at: str) -> bool:
|
||||||
|
"""Check if task should be cleaned up based on creation time."""
|
||||||
|
created = datetime.fromisoformat(created_at)
|
||||||
|
return (datetime.now() - created).total_seconds() > 3600
|
||||||
|
|
||||||
|
def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]:
|
||||||
|
"""Decode Redis hash data from bytes to strings."""
|
||||||
|
return {k.decode('utf-8'): v.decode('utf-8') for k, v in hash_data.items()}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def verify_email_domain(email: str) -> bool:
|
||||||
|
try:
|
||||||
|
domain = email.split('@')[1]
|
||||||
|
# Try to resolve MX records for the domain.
|
||||||
|
records = dns.resolver.resolve(domain, 'MX')
|
||||||
|
return True if records else False
|
||||||
|
except Exception as e:
|
||||||
|
return False
|
||||||
@@ -1,3 +1,30 @@
|
|||||||
|
# Base configuration (not a service, just a reusable config block)
|
||||||
|
x-base-config: &base-config
|
||||||
|
ports:
|
||||||
|
- "11235:11235"
|
||||||
|
- "8000:8000"
|
||||||
|
- "9222:9222"
|
||||||
|
- "8080:8080"
|
||||||
|
environment:
|
||||||
|
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-}
|
||||||
|
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||||
|
- CLAUDE_API_KEY=${CLAUDE_API_KEY:-}
|
||||||
|
volumes:
|
||||||
|
- /dev/shm:/dev/shm
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 4G
|
||||||
|
reservations:
|
||||||
|
memory: 1G
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:11235/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
services:
|
services:
|
||||||
# Local build services for different platforms
|
# Local build services for different platforms
|
||||||
crawl4ai-amd64:
|
crawl4ai-amd64:
|
||||||
@@ -11,9 +38,7 @@ services:
|
|||||||
platforms:
|
platforms:
|
||||||
- linux/amd64
|
- linux/amd64
|
||||||
profiles: ["local-amd64"]
|
profiles: ["local-amd64"]
|
||||||
extends: &base-config
|
<<: *base-config # extends yerine doğrudan yapılandırmayı dahil ettik
|
||||||
file: docker-compose.yml
|
|
||||||
service: base-config
|
|
||||||
|
|
||||||
crawl4ai-arm64:
|
crawl4ai-arm64:
|
||||||
build:
|
build:
|
||||||
@@ -26,42 +51,15 @@ services:
|
|||||||
platforms:
|
platforms:
|
||||||
- linux/arm64
|
- linux/arm64
|
||||||
profiles: ["local-arm64"]
|
profiles: ["local-arm64"]
|
||||||
extends: *base-config
|
<<: *base-config
|
||||||
|
|
||||||
# Hub services for different platforms and versions
|
# Hub services for different platforms and versions
|
||||||
crawl4ai-hub-amd64:
|
crawl4ai-hub-amd64:
|
||||||
image: unclecode/crawl4ai:${VERSION:-basic}-amd64
|
image: unclecode/crawl4ai:${VERSION:-basic}-amd64
|
||||||
profiles: ["hub-amd64"]
|
profiles: ["hub-amd64"]
|
||||||
extends: *base-config
|
<<: *base-config
|
||||||
|
|
||||||
crawl4ai-hub-arm64:
|
crawl4ai-hub-arm64:
|
||||||
image: unclecode/crawl4ai:${VERSION:-basic}-arm64
|
image: unclecode/crawl4ai:${VERSION:-basic}-arm64
|
||||||
profiles: ["hub-arm64"]
|
profiles: ["hub-arm64"]
|
||||||
extends: *base-config
|
<<: *base-config
|
||||||
|
|
||||||
# Base configuration to be extended
|
|
||||||
base-config:
|
|
||||||
ports:
|
|
||||||
- "11235:11235"
|
|
||||||
- "8000:8000"
|
|
||||||
- "9222:9222"
|
|
||||||
- "8080:8080"
|
|
||||||
environment:
|
|
||||||
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-}
|
|
||||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
||||||
- CLAUDE_API_KEY=${CLAUDE_API_KEY:-}
|
|
||||||
volumes:
|
|
||||||
- /dev/shm:/dev/shm
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 4G
|
|
||||||
reservations:
|
|
||||||
memory: 1G
|
|
||||||
restart: unless-stopped
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:11235/health"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 3
|
|
||||||
start_period: 40s
|
|
||||||
25
docs/assets/powered-by-dark.svg
Normal file
25
docs/assets/powered-by-dark.svg
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||||
|
<!-- Dark Theme -->
|
||||||
|
<g>
|
||||||
|
<defs>
|
||||||
|
<pattern id="halftoneDark" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||||
|
<circle cx="2" cy="2" r="1" fill="#eee" opacity="0.1"/>
|
||||||
|
</pattern>
|
||||||
|
<pattern id="halftoneTextDark" width="3" height="3" patternUnits="userSpaceOnUse">
|
||||||
|
<circle cx="1.5" cy="1.5" r="2" fill="#aaa" opacity="0.2"/>
|
||||||
|
</pattern>
|
||||||
|
</defs>
|
||||||
|
<!-- White border - added as outer rectangle -->
|
||||||
|
<rect width="120" height="35" rx="5" fill="#111"/>
|
||||||
|
<!-- Dark background slightly smaller to show thicker border -->
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="#1a1a1a"/>
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#halftoneDark)"/>
|
||||||
|
|
||||||
|
<!-- Logo with halftone -->
|
||||||
|
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#eee" stroke-width="2"/>
|
||||||
|
<path d="M18 17.5 L27 17.5" stroke="#eee" stroke-width="2"/>
|
||||||
|
<circle cx="22.5" cy="17.5" r="2" fill="#eee"/>
|
||||||
|
|
||||||
|
<text x="40" y="23" fill="#eee" font-family="Arial, sans-serif" font-weight="500" font-size="14">Crawl4AI</text>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 1.2 KiB |
64
docs/assets/powered-by-disco.svg
Normal file
64
docs/assets/powered-by-disco.svg
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||||
|
<g>
|
||||||
|
<defs>
|
||||||
|
<pattern id="cyberdots" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||||
|
<circle cx="2" cy="2" r="1">
|
||||||
|
<animate attributeName="fill"
|
||||||
|
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||||
|
dur="6s"
|
||||||
|
repeatCount="indefinite"/>
|
||||||
|
<animate attributeName="opacity"
|
||||||
|
values="0.2;0.4;0.2"
|
||||||
|
dur="4s"
|
||||||
|
repeatCount="indefinite"/>
|
||||||
|
</circle>
|
||||||
|
</pattern>
|
||||||
|
<filter id="neonGlow" x="-20%" y="-20%" width="140%" height="140%">
|
||||||
|
<feGaussianBlur stdDeviation="1" result="blur"/>
|
||||||
|
<feFlood flood-color="#FF2EC4" flood-opacity="0.2">
|
||||||
|
<animate attributeName="flood-color"
|
||||||
|
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||||
|
dur="8s"
|
||||||
|
repeatCount="indefinite"/>
|
||||||
|
</feFlood>
|
||||||
|
<feComposite in2="blur" operator="in"/>
|
||||||
|
<feMerge>
|
||||||
|
<feMergeNode/>
|
||||||
|
<feMergeNode in="SourceGraphic"/>
|
||||||
|
</feMerge>
|
||||||
|
</filter>
|
||||||
|
</defs>
|
||||||
|
|
||||||
|
<rect width="120" height="35" rx="5" fill="#0A0A0F"/>
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="#16161E"/>
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#cyberdots)"/>
|
||||||
|
|
||||||
|
<!-- Logo with animated neon -->
|
||||||
|
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)">
|
||||||
|
<animate attributeName="stroke"
|
||||||
|
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||||
|
dur="8s"
|
||||||
|
repeatCount="indefinite"/>
|
||||||
|
</path>
|
||||||
|
<path d="M18 17.5 L27 17.5" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)">
|
||||||
|
<animate attributeName="stroke"
|
||||||
|
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||||
|
dur="8s"
|
||||||
|
repeatCount="indefinite"/>
|
||||||
|
</path>
|
||||||
|
<circle cx="22.5" cy="17.5" r="2" fill="#0BC5EA">
|
||||||
|
<animate attributeName="fill"
|
||||||
|
values="#0BC5EA;#FF2EC4;#8B5CF6;#0BC5EA"
|
||||||
|
dur="8s"
|
||||||
|
repeatCount="indefinite"/>
|
||||||
|
</circle>
|
||||||
|
|
||||||
|
<text x="40" y="23" font-family="Arial, sans-serif" font-weight="500" font-size="14" filter="url(#neonGlow)">
|
||||||
|
<animate attributeName="fill"
|
||||||
|
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||||
|
dur="8s"
|
||||||
|
repeatCount="indefinite"/>
|
||||||
|
Crawl4AI
|
||||||
|
</text>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 2.5 KiB |
21
docs/assets/powered-by-light.svg
Normal file
21
docs/assets/powered-by-light.svg
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||||
|
<g>
|
||||||
|
<defs>
|
||||||
|
<pattern id="halftoneLight" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||||
|
<circle cx="2" cy="2" r="1" fill="#111" opacity="0.1"/>
|
||||||
|
</pattern>
|
||||||
|
</defs>
|
||||||
|
<!-- Dark border -->
|
||||||
|
<rect width="120" height="35" rx="5" fill="#DDD"/>
|
||||||
|
<!-- Light background -->
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="#fff"/>
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#halftoneLight)"/>
|
||||||
|
|
||||||
|
<!-- Logo -->
|
||||||
|
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#111" stroke-width="2"/>
|
||||||
|
<path d="M18 17.5 L27 17.5" stroke="#111" stroke-width="2"/>
|
||||||
|
<circle cx="22.5" cy="17.5" r="2" fill="#111"/>
|
||||||
|
|
||||||
|
<text x="40" y="23" fill="#111" font-family="Arial, sans-serif" font-weight="500" font-size="14">Crawl4AI</text>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 925 B |
28
docs/assets/powered-by-night.svg
Normal file
28
docs/assets/powered-by-night.svg
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||||
|
<g>
|
||||||
|
<defs>
|
||||||
|
<pattern id="halftoneDark" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||||
|
<circle cx="2" cy="2" r="1" fill="#8B5CF6" opacity="0.1"/>
|
||||||
|
</pattern>
|
||||||
|
<filter id="neonGlow" x="-20%" y="-20%" width="140%" height="140%">
|
||||||
|
<feGaussianBlur stdDeviation="1" result="blur"/>
|
||||||
|
<feFlood flood-color="#8B5CF6" flood-opacity="0.2"/>
|
||||||
|
<feComposite in2="blur" operator="in"/>
|
||||||
|
<feMerge>
|
||||||
|
<feMergeNode/>
|
||||||
|
<feMergeNode in="SourceGraphic"/>
|
||||||
|
</feMerge>
|
||||||
|
</filter>
|
||||||
|
</defs>
|
||||||
|
<rect width="120" height="35" rx="5" fill="#0A0A0F"/>
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="#16161E"/>
|
||||||
|
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#halftoneDark)"/>
|
||||||
|
|
||||||
|
<!-- Logo with neon glow -->
|
||||||
|
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)"/>
|
||||||
|
<path d="M18 17.5 L27 17.5" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)"/>
|
||||||
|
<circle cx="22.5" cy="17.5" r="2" fill="#8B5CF6"/>
|
||||||
|
|
||||||
|
<text x="40" y="23" fill="#fff" font-family="Arial, sans-serif" font-weight="500" font-size="14" filter="url(#neonGlow)">Crawl4AI</text>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 1.3 KiB |
@@ -9,12 +9,10 @@ from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
|||||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
async def extract_amazon_products():
|
async def extract_amazon_products():
|
||||||
# Initialize browser config
|
# Initialize browser config
|
||||||
browser_config = BrowserConfig(
|
browser_config = BrowserConfig(browser_type="chromium", headless=True)
|
||||||
browser_type="chromium",
|
|
||||||
headless=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize crawler config with JSON CSS extraction strategy
|
# Initialize crawler config with JSON CSS extraction strategy
|
||||||
crawler_config = CrawlerRunConfig(
|
crawler_config = CrawlerRunConfig(
|
||||||
@@ -27,57 +25,53 @@ async def extract_amazon_products():
|
|||||||
"name": "asin",
|
"name": "asin",
|
||||||
"selector": "",
|
"selector": "",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "data-asin"
|
"attribute": "data-asin",
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"selector": "h2 a span",
|
|
||||||
"type": "text"
|
|
||||||
},
|
},
|
||||||
|
{"name": "title", "selector": "h2 a span", "type": "text"},
|
||||||
{
|
{
|
||||||
"name": "url",
|
"name": "url",
|
||||||
"selector": "h2 a",
|
"selector": "h2 a",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "href"
|
"attribute": "href",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "image",
|
"name": "image",
|
||||||
"selector": ".s-image",
|
"selector": ".s-image",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "src"
|
"attribute": "src",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "rating",
|
"name": "rating",
|
||||||
"selector": ".a-icon-star-small .a-icon-alt",
|
"selector": ".a-icon-star-small .a-icon-alt",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "reviews_count",
|
"name": "reviews_count",
|
||||||
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "price",
|
"name": "price",
|
||||||
"selector": ".a-price .a-offscreen",
|
"selector": ".a-price .a-offscreen",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "original_price",
|
"name": "original_price",
|
||||||
"selector": ".a-price.a-text-price .a-offscreen",
|
"selector": ".a-price.a-text-price .a-offscreen",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "sponsored",
|
"name": "sponsored",
|
||||||
"selector": ".puis-sponsored-label-text",
|
"selector": ".puis-sponsored-label-text",
|
||||||
"type": "exists"
|
"type": "exists",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "delivery_info",
|
"name": "delivery_info",
|
||||||
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"multiple": True
|
"multiple": True,
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -105,10 +99,12 @@ async def extract_amazon_products():
|
|||||||
print(f"Rating: {product.get('rating')}")
|
print(f"Rating: {product.get('rating')}")
|
||||||
print(f"Reviews: {product.get('reviews_count')}")
|
print(f"Reviews: {product.get('reviews_count')}")
|
||||||
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
||||||
if product.get('delivery_info'):
|
if product.get("delivery_info"):
|
||||||
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
||||||
print("-" * 80)
|
print("-" * 80)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
asyncio.run(extract_amazon_products())
|
asyncio.run(extract_amazon_products())
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
|||||||
import json
|
import json
|
||||||
from playwright.async_api import Page, BrowserContext
|
from playwright.async_api import Page, BrowserContext
|
||||||
|
|
||||||
|
|
||||||
async def extract_amazon_products():
|
async def extract_amazon_products():
|
||||||
# Initialize browser config
|
# Initialize browser config
|
||||||
browser_config = BrowserConfig(
|
browser_config = BrowserConfig(
|
||||||
@@ -20,7 +21,6 @@ async def extract_amazon_products():
|
|||||||
# Initialize crawler config with JSON CSS extraction strategy nav-search-submit-button
|
# Initialize crawler config with JSON CSS extraction strategy nav-search-submit-button
|
||||||
crawler_config = CrawlerRunConfig(
|
crawler_config = CrawlerRunConfig(
|
||||||
cache_mode=CacheMode.BYPASS,
|
cache_mode=CacheMode.BYPASS,
|
||||||
|
|
||||||
extraction_strategy=JsonCssExtractionStrategy(
|
extraction_strategy=JsonCssExtractionStrategy(
|
||||||
schema={
|
schema={
|
||||||
"name": "Amazon Product Search Results",
|
"name": "Amazon Product Search Results",
|
||||||
@@ -30,82 +30,86 @@ async def extract_amazon_products():
|
|||||||
"name": "asin",
|
"name": "asin",
|
||||||
"selector": "",
|
"selector": "",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "data-asin"
|
"attribute": "data-asin",
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"selector": "h2 a span",
|
|
||||||
"type": "text"
|
|
||||||
},
|
},
|
||||||
|
{"name": "title", "selector": "h2 a span", "type": "text"},
|
||||||
{
|
{
|
||||||
"name": "url",
|
"name": "url",
|
||||||
"selector": "h2 a",
|
"selector": "h2 a",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "href"
|
"attribute": "href",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "image",
|
"name": "image",
|
||||||
"selector": ".s-image",
|
"selector": ".s-image",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "src"
|
"attribute": "src",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "rating",
|
"name": "rating",
|
||||||
"selector": ".a-icon-star-small .a-icon-alt",
|
"selector": ".a-icon-star-small .a-icon-alt",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "reviews_count",
|
"name": "reviews_count",
|
||||||
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "price",
|
"name": "price",
|
||||||
"selector": ".a-price .a-offscreen",
|
"selector": ".a-price .a-offscreen",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "original_price",
|
"name": "original_price",
|
||||||
"selector": ".a-price.a-text-price .a-offscreen",
|
"selector": ".a-price.a-text-price .a-offscreen",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "sponsored",
|
"name": "sponsored",
|
||||||
"selector": ".puis-sponsored-label-text",
|
"selector": ".puis-sponsored-label-text",
|
||||||
"type": "exists"
|
"type": "exists",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "delivery_info",
|
"name": "delivery_info",
|
||||||
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"multiple": True
|
"multiple": True,
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
)
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
url = "https://www.amazon.com/"
|
url = "https://www.amazon.com/"
|
||||||
|
|
||||||
async def after_goto(page: Page, context: BrowserContext, url: str, response: dict, **kwargs):
|
async def after_goto(
|
||||||
|
page: Page, context: BrowserContext, url: str, response: dict, **kwargs
|
||||||
|
):
|
||||||
"""Hook called after navigating to each URL"""
|
"""Hook called after navigating to each URL"""
|
||||||
print(f"[HOOK] after_goto - Successfully loaded: {url}")
|
print(f"[HOOK] after_goto - Successfully loaded: {url}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Wait for search box to be available
|
# Wait for search box to be available
|
||||||
search_box = await page.wait_for_selector('#twotabsearchtextbox', timeout=1000)
|
search_box = await page.wait_for_selector(
|
||||||
|
"#twotabsearchtextbox", timeout=1000
|
||||||
|
)
|
||||||
|
|
||||||
# Type the search query
|
# Type the search query
|
||||||
await search_box.fill('Samsung Galaxy Tab')
|
await search_box.fill("Samsung Galaxy Tab")
|
||||||
|
|
||||||
# Get the search button and prepare for navigation
|
# Get the search button and prepare for navigation
|
||||||
search_button = await page.wait_for_selector('#nav-search-submit-button', timeout=1000)
|
search_button = await page.wait_for_selector(
|
||||||
|
"#nav-search-submit-button", timeout=1000
|
||||||
|
)
|
||||||
|
|
||||||
# Click with navigation waiting
|
# Click with navigation waiting
|
||||||
await search_button.click()
|
await search_button.click()
|
||||||
|
|
||||||
# Wait for search results to load
|
# Wait for search results to load
|
||||||
await page.wait_for_selector('[data-component-type="s-search-result"]', timeout=10000)
|
await page.wait_for_selector(
|
||||||
|
'[data-component-type="s-search-result"]', timeout=10000
|
||||||
|
)
|
||||||
print("[HOOK] Search completed and results loaded!")
|
print("[HOOK] Search completed and results loaded!")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -115,7 +119,6 @@ async def extract_amazon_products():
|
|||||||
|
|
||||||
# Use context manager for proper resource handling
|
# Use context manager for proper resource handling
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
|
|
||||||
crawler.crawler_strategy.set_hook("after_goto", after_goto)
|
crawler.crawler_strategy.set_hook("after_goto", after_goto)
|
||||||
|
|
||||||
# Extract the data
|
# Extract the data
|
||||||
@@ -136,10 +139,12 @@ async def extract_amazon_products():
|
|||||||
print(f"Rating: {product.get('rating')}")
|
print(f"Rating: {product.get('rating')}")
|
||||||
print(f"Reviews: {product.get('reviews_count')}")
|
print(f"Reviews: {product.get('reviews_count')}")
|
||||||
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
||||||
if product.get('delivery_info'):
|
if product.get("delivery_info"):
|
||||||
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
||||||
print("-" * 80)
|
print("-" * 80)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
asyncio.run(extract_amazon_products())
|
asyncio.run(extract_amazon_products())
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from crawl4ai import AsyncWebCrawler, CacheMode
|
|||||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||||
import json
|
import json
|
||||||
from playwright.async_api import Page, BrowserContext
|
|
||||||
|
|
||||||
async def extract_amazon_products():
|
async def extract_amazon_products():
|
||||||
# Initialize browser config
|
# Initialize browser config
|
||||||
@@ -30,7 +30,7 @@ async def extract_amazon_products():
|
|||||||
"""
|
"""
|
||||||
crawler_config = CrawlerRunConfig(
|
crawler_config = CrawlerRunConfig(
|
||||||
cache_mode=CacheMode.BYPASS,
|
cache_mode=CacheMode.BYPASS,
|
||||||
js_code = js_code_to_search,
|
js_code=js_code_to_search,
|
||||||
wait_for='css:[data-component-type="s-search-result"]',
|
wait_for='css:[data-component-type="s-search-result"]',
|
||||||
extraction_strategy=JsonCssExtractionStrategy(
|
extraction_strategy=JsonCssExtractionStrategy(
|
||||||
schema={
|
schema={
|
||||||
@@ -41,65 +41,60 @@ async def extract_amazon_products():
|
|||||||
"name": "asin",
|
"name": "asin",
|
||||||
"selector": "",
|
"selector": "",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "data-asin"
|
"attribute": "data-asin",
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"selector": "h2 a span",
|
|
||||||
"type": "text"
|
|
||||||
},
|
},
|
||||||
|
{"name": "title", "selector": "h2 a span", "type": "text"},
|
||||||
{
|
{
|
||||||
"name": "url",
|
"name": "url",
|
||||||
"selector": "h2 a",
|
"selector": "h2 a",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "href"
|
"attribute": "href",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "image",
|
"name": "image",
|
||||||
"selector": ".s-image",
|
"selector": ".s-image",
|
||||||
"type": "attribute",
|
"type": "attribute",
|
||||||
"attribute": "src"
|
"attribute": "src",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "rating",
|
"name": "rating",
|
||||||
"selector": ".a-icon-star-small .a-icon-alt",
|
"selector": ".a-icon-star-small .a-icon-alt",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "reviews_count",
|
"name": "reviews_count",
|
||||||
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "price",
|
"name": "price",
|
||||||
"selector": ".a-price .a-offscreen",
|
"selector": ".a-price .a-offscreen",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "original_price",
|
"name": "original_price",
|
||||||
"selector": ".a-price.a-text-price .a-offscreen",
|
"selector": ".a-price.a-text-price .a-offscreen",
|
||||||
"type": "text"
|
"type": "text",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "sponsored",
|
"name": "sponsored",
|
||||||
"selector": ".puis-sponsored-label-text",
|
"selector": ".puis-sponsored-label-text",
|
||||||
"type": "exists"
|
"type": "exists",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "delivery_info",
|
"name": "delivery_info",
|
||||||
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"multiple": True
|
"multiple": True,
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
)
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Example search URL (you should replace with your actual Amazon URL)
|
# Example search URL (you should replace with your actual Amazon URL)
|
||||||
url = "https://www.amazon.com/"
|
url = "https://www.amazon.com/"
|
||||||
|
|
||||||
|
|
||||||
# Use context manager for proper resource handling
|
# Use context manager for proper resource handling
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
# Extract the data
|
# Extract the data
|
||||||
@@ -120,10 +115,12 @@ async def extract_amazon_products():
|
|||||||
print(f"Rating: {product.get('rating')}")
|
print(f"Rating: {product.get('rating')}")
|
||||||
print(f"Reviews: {product.get('reviews_count')}")
|
print(f"Reviews: {product.get('reviews_count')}")
|
||||||
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
||||||
if product.get('delivery_info'):
|
if product.get("delivery_info"):
|
||||||
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
||||||
print("-" * 80)
|
print("-" * 80)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
asyncio.run(extract_amazon_products())
|
asyncio.run(extract_amazon_products())
|
||||||
|
|||||||
@@ -1,12 +1,16 @@
|
|||||||
# File: async_webcrawler_multiple_urls_example.py
|
# File: async_webcrawler_multiple_urls_example.py
|
||||||
import os, sys
|
import os, sys
|
||||||
|
|
||||||
# append 2 parent directories to sys.path to import crawl4ai
|
# append 2 parent directories to sys.path to import crawl4ai
|
||||||
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
parent_dir = os.path.dirname(
|
||||||
|
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
)
|
||||||
sys.path.append(parent_dir)
|
sys.path.append(parent_dir)
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from crawl4ai import AsyncWebCrawler
|
from crawl4ai import AsyncWebCrawler
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
# Initialize the AsyncWebCrawler
|
# Initialize the AsyncWebCrawler
|
||||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||||
@@ -16,7 +20,7 @@ async def main():
|
|||||||
"https://python.org",
|
"https://python.org",
|
||||||
"https://github.com",
|
"https://github.com",
|
||||||
"https://stackoverflow.com",
|
"https://stackoverflow.com",
|
||||||
"https://news.ycombinator.com"
|
"https://news.ycombinator.com",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Set up crawling parameters
|
# Set up crawling parameters
|
||||||
@@ -27,7 +31,7 @@ async def main():
|
|||||||
urls=urls,
|
urls=urls,
|
||||||
word_count_threshold=word_count_threshold,
|
word_count_threshold=word_count_threshold,
|
||||||
bypass_cache=True,
|
bypass_cache=True,
|
||||||
verbose=True
|
verbose=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process the results
|
# Process the results
|
||||||
@@ -36,7 +40,9 @@ async def main():
|
|||||||
print(f"Successfully crawled: {result.url}")
|
print(f"Successfully crawled: {result.url}")
|
||||||
print(f"Title: {result.metadata.get('title', 'N/A')}")
|
print(f"Title: {result.metadata.get('title', 'N/A')}")
|
||||||
print(f"Word count: {len(result.markdown.split())}")
|
print(f"Word count: {len(result.markdown.split())}")
|
||||||
print(f"Number of links: {len(result.links.get('internal', [])) + len(result.links.get('external', []))}")
|
print(
|
||||||
|
f"Number of links: {len(result.links.get('internal', [])) + len(result.links.get('external', []))}"
|
||||||
|
)
|
||||||
print(f"Number of images: {len(result.media.get('images', []))}")
|
print(f"Number of images: {len(result.media.get('images', []))}")
|
||||||
print("---")
|
print("---")
|
||||||
else:
|
else:
|
||||||
@@ -44,5 +50,6 @@ async def main():
|
|||||||
print(f"Error: {result.error_message}")
|
print(f"Error: {result.error_message}")
|
||||||
print("---")
|
print("---")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
@@ -6,10 +6,8 @@ This example demonstrates optimal browser usage patterns in Crawl4AI:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import os
|
|
||||||
from typing import List
|
from typing import List
|
||||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
|
||||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||||
|
|
||||||
|
|
||||||
@@ -54,7 +52,7 @@ async def crawl_sequential(urls: List[str]):
|
|||||||
)
|
)
|
||||||
if result.success:
|
if result.success:
|
||||||
print(f"Successfully crawled {url}")
|
print(f"Successfully crawled {url}")
|
||||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)}")
|
print(f"Content length: {len(result.markdown.raw_markdown)}")
|
||||||
finally:
|
finally:
|
||||||
await crawler.close()
|
await crawler.close()
|
||||||
|
|
||||||
@@ -103,7 +101,7 @@ async def crawl_parallel(urls: List[str], max_concurrent: int = 3):
|
|||||||
print(f"Error crawling {url}: {str(result)}")
|
print(f"Error crawling {url}: {str(result)}")
|
||||||
elif result.success:
|
elif result.success:
|
||||||
print(f"Successfully crawled {url}")
|
print(f"Successfully crawled {url}")
|
||||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)}")
|
print(f"Content length: {len(result.markdown.raw_markdown)}")
|
||||||
finally:
|
finally:
|
||||||
await crawler.close()
|
await crawler.close()
|
||||||
|
|
||||||
|
|||||||
13
docs/examples/cli/browser.yml
Normal file
13
docs/examples/cli/browser.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
browser_type: "chromium"
|
||||||
|
headless: true
|
||||||
|
viewport_width: 1280
|
||||||
|
viewport_height: 800
|
||||||
|
user_agent_mode: "random"
|
||||||
|
verbose: true
|
||||||
|
text_mode: false
|
||||||
|
light_mode: false
|
||||||
|
ignore_https_errors: true
|
||||||
|
java_script_enabled: true
|
||||||
|
extra_args:
|
||||||
|
- "--disable-gpu"
|
||||||
|
- "--no-sandbox"
|
||||||
13
docs/examples/cli/crawler.yml
Normal file
13
docs/examples/cli/crawler.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
cache_mode: "bypass"
|
||||||
|
wait_until: "networkidle"
|
||||||
|
page_timeout: 30000
|
||||||
|
delay_before_return_html: 0.5
|
||||||
|
word_count_threshold: 100
|
||||||
|
scan_full_page: true
|
||||||
|
scroll_delay: 0.3
|
||||||
|
process_iframes: false
|
||||||
|
remove_overlay_elements: true
|
||||||
|
magic: true
|
||||||
|
verbose: true
|
||||||
|
exclude_external_links: true
|
||||||
|
exclude_social_media_links: true
|
||||||
27
docs/examples/cli/css_schema.json
Normal file
27
docs/examples/cli/css_schema.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"name": "ArticleExtractor",
|
||||||
|
"baseSelector": ".cards[data-tax=news] .card__data",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"selector": "h4.card__title",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "link",
|
||||||
|
"selector": "h4.card__title a",
|
||||||
|
"type": "attribute",
|
||||||
|
"attribute": "href"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "details",
|
||||||
|
"selector": ".card__details",
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "topics",
|
||||||
|
"selector": ".card__topics.topics",
|
||||||
|
"type": "text"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user