Compare commits

..

1 Commits

Author SHA1 Message Date
UncleCode
3a2cb7dacf test: Add comprehensive unit tests for AsyncExecutor functionality 2024-11-13 19:46:05 +08:00
722 changed files with 20257 additions and 247578 deletions

View File

@@ -1,28 +0,0 @@
{
"permissions": {
"allow": [
"Bash(cd:*)",
"Bash(python3:*)",
"Bash(python:*)",
"Bash(grep:*)",
"Bash(mkdir:*)",
"Bash(cp:*)",
"Bash(rm:*)",
"Bash(true)",
"Bash(./package-extension.sh:*)",
"Bash(find:*)",
"Bash(chmod:*)",
"Bash(rg:*)",
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 5 -B 5 \"Script Builder\" docs/md_v2/apps/crawl4ai-assistant/)",
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 30 \"generateCode\\(events, format\\)\" docs/md_v2/apps/crawl4ai-assistant/content/content.js)",
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg \"<style>\" docs/md_v2/apps/crawl4ai-assistant/index.html -A 5)",
"Bash(git checkout:*)",
"Bash(docker logs:*)",
"Bash(curl:*)",
"Bash(docker compose:*)",
"Bash(./test-final-integration.sh:*)",
"Bash(mv:*)"
]
},
"enableAllProjectMcpServers": false
}

12
.gitattributes vendored
View File

@@ -1,12 +0,0 @@
# Documentation
*.html linguist-documentation
docs/* linguist-documentation
docs/examples/* linguist-documentation
docs/md_v2/* linguist-documentation
# Explicitly mark Python as the main language
*.py linguist-detectable=true
*.py linguist-language=Python
# Exclude HTML from language statistics
*.html linguist-detectable=false

View File

@@ -1,59 +0,0 @@
title: "[Feature Request]: "
labels: ["⚙️ New"]
body:
- type: markdown
attributes:
value: |
Thank you for your interest in suggesting a new feature! Before you submit, please take a moment to check if already exists in
this discussions category to avoid duplicates. 😊
- type: textarea
id: needs_to_be_done
attributes:
label: What needs to be done?
description: Please describe the feature or functionality you'd like to see.
placeholder: "e.g., Return alt text along with images scraped from a webpages in Result"
validations:
required: true
- type: textarea
id: problem_to_solve
attributes:
label: What problem does this solve?
description: Explain the pain point or issue this feature will help address.
placeholder: "e.g., Bypass Captchas added by cloudflare"
validations:
required: true
- type: textarea
id: target_users
attributes:
label: Target users/beneficiaries
description: Who would benefit from this feature? (e.g., specific teams, developers, users, etc.)
placeholder: "e.g., Marketing teams, developers"
validations:
required: false
- type: textarea
id: current_workarounds
attributes:
label: Current alternatives/workarounds
description: Are there any existing solutions or workarounds? How does this feature improve upon them?
placeholder: "e.g., Users manually select the css classes mapped to data fields to extract them"
validations:
required: false
- type: markdown
attributes:
value: |
### 💡 Implementation Ideas
- type: textarea
id: proposed_approach
attributes:
label: Proposed approach
description: Share any ideas you have for how this feature could be implemented. Point out any challenges your foresee
and the success metrics for this feature
placeholder: "e.g., Implement a breadth first traversal algorithm for scraper"
validations:
required: false

7
.github/FUNDING.yml vendored
View File

@@ -1,7 +0,0 @@
# These are supported funding model platforms
# GitHub Sponsors
github: unclecode
# Custom links for enterprise inquiries (uncomment when ready)
# custom: ["https://crawl4ai.com/enterprise"]

View File

@@ -1,127 +0,0 @@
name: Bug Report
description: Report a bug with the Crawl4AI.
title: "[Bug]: "
labels: ["🐞 Bug","🩺 Needs Triage"]
body:
- type: input
id: crawl4ai_version
attributes:
label: crawl4ai version
description: Specify the version of crawl4ai you are using.
placeholder: "e.g., 2.0.0"
validations:
required: true
- type: textarea
id: expected_behavior
attributes:
label: Expected Behavior
description: Describe what you expected to happen.
placeholder: "Provide a detailed explanation of the expected outcome."
validations:
required: true
- type: textarea
id: current_behavior
attributes:
label: Current Behavior
description: Describe what is happening instead of the expected behavior.
placeholder: "Describe the actual result or issue you encountered."
validations:
required: true
- type: dropdown
id: reproducible
attributes:
label: Is this reproducible?
description: Indicate whether this bug can be reproduced consistently.
options:
- "Yes"
- "No"
validations:
required: true
- type: textarea
id: inputs
attributes:
label: Inputs Causing the Bug
description: Provide details about the inputs causing the issue.
placeholder: |
- URL(s):
- Settings used:
- Input data (if applicable):
render: bash
- type: textarea
id: steps_to_reproduce
attributes:
label: Steps to Reproduce
description: Provide step-by-step instructions to reproduce the issue.
placeholder: |
1. Go to...
2. Click on...
3. Observe the issue...
render: bash
- type: textarea
id: code_snippets
attributes:
label: Code snippets
description: Provide code snippets(if any). Add comments as necessary
placeholder: print("Hello world")
render: python
# Header Section with Title
- type: markdown
attributes:
value: |
## Supporting Information
Please provide the following details to help us understand and resolve your issue. This will assist us in reproducing and diagnosing the problem
- type: input
id: os
attributes:
label: OS
description: Please provide the operating system & distro where the issue occurs.
placeholder: "e.g., Windows, macOS, Linux"
validations:
required: true
- type: input
id: python_version
attributes:
label: Python version
description: Specify the Python version being used.
placeholder: "e.g., 3.8.5"
validations:
required: true
# Browser Field
- type: input
id: browser
attributes:
label: Browser
description: Provide the name of the browser you are using.
placeholder: "e.g., Chrome, Firefox, Safari"
validations:
required: false
# Browser Version Field
- type: input
id: browser_version
attributes:
label: Browser version
description: Provide the version of the browser you are using.
placeholder: "e.g., 91.0.4472.124"
validations:
required: false
# Error Logs Field (Text Area)
- type: textarea
id: error_logs
attributes:
label: Error logs & Screenshots (if applicable)
description: If you encountered any errors, please provide the error logs. Attach any relevant screenshots to help us understand the issue.
placeholder: "Paste error logs here and attach your screenshots"
validations:
required: false

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Feature Requests
url: https://github.com/unclecode/crawl4ai/discussions/categories/feature-requests
about: "Suggest new features or enhancements for Crawl4AI"
- name: Forums - Q&A
url: https://github.com/unclecode/crawl4ai/discussions/categories/forums-q-a
about: "Ask questions or engage in general discussions about Crawl4AI"

View File

@@ -1,19 +0,0 @@
## Summary
Please include a summary of the change and/or which issues are fixed.
eg: `Fixes #123` (Tag GitHub issue numbers in this format, so it automatically links the issues with your PR)
## List of files changed and why
eg: quickstart.py - To update the example as per new changes
## How Has This Been Tested?
Please describe the tests that you ran to verify your changes.
## Checklist:
- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] I have added/updated unit tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes

View File

@@ -1,46 +0,0 @@
name: Discord GitHub Notifications
on:
issues:
types: [opened]
issue_comment:
types: [created]
pull_request:
types: [opened]
discussion:
types: [created]
watch:
types: [started]
jobs:
notify-discord:
runs-on: ubuntu-latest
steps:
- name: Send to Google Apps Script (Stars only)
if: github.event_name == 'watch'
run: |
curl -fSs -X POST "${{ secrets.GOOGLE_SCRIPT_ENDPOINT }}" \
-H 'Content-Type: application/json' \
-d '{"url":"${{ github.event.sender.html_url }}"}'
- name: Set webhook based on event type
id: set-webhook
run: |
if [ "${{ github.event_name }}" == "discussion" ]; then
echo "webhook=${{ secrets.DISCORD_DISCUSSIONS_WEBHOOK }}" >> $GITHUB_OUTPUT
elif [ "${{ github.event_name }}" == "watch" ]; then
echo "webhook=${{ secrets.DISCORD_STAR_GAZERS }}" >> $GITHUB_OUTPUT
else
echo "webhook=${{ secrets.DISCORD_WEBHOOK }}" >> $GITHUB_OUTPUT
fi
- name: Discord Notification
uses: Ilshidur/action-discord@master
env:
DISCORD_WEBHOOK: ${{ steps.set-webhook.outputs.webhook }}
with:
args: |
${{ github.event_name == 'issues' && format('📣 New issue created: **{0}** by {1} - {2}', github.event.issue.title, github.event.issue.user.login, github.event.issue.html_url) ||
github.event_name == 'issue_comment' && format('💬 New comment on issue **{0}** by {1} - {2}', github.event.issue.title, github.event.comment.user.login, github.event.comment.html_url) ||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
github.event_name == 'watch' && format('⭐ {0} starred Crawl4AI 🥳! Check out their profile: {1}', github.event.sender.login, github.event.sender.html_url) ||
format('💬 New discussion started: **{0}** by {1} - {2}', github.event.discussion.title, github.event.discussion.user.login, github.event.discussion.html_url) }}

View File

@@ -1,142 +0,0 @@
name: Release Pipeline
on:
push:
tags:
- 'v*'
- '!test-v*' # Exclude test tags
jobs:
release:
runs-on: ubuntu-latest
permissions:
contents: write # Required for creating releases
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Extract version from tag
id: get_version
run: |
TAG_VERSION=${GITHUB_REF#refs/tags/v}
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
echo "Releasing version: $TAG_VERSION"
- name: Install package dependencies
run: |
pip install -e .
- name: Check version consistency
run: |
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
echo "Tag version: $TAG_VERSION"
echo "Package version: $PACKAGE_VERSION"
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
echo "Please update crawl4ai/__version__.py to match the tag version"
exit 1
fi
echo "✅ Version check passed: $TAG_VERSION"
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
pip install build twine
- name: Build package
run: python -m build
- name: Check package
run: twine check dist/*
- name: Upload to PyPI
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
run: |
echo "📦 Uploading to PyPI..."
twine upload dist/*
echo "✅ Package uploaded to https://pypi.org/project/crawl4ai/"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Extract major and minor versions
id: versions
run: |
VERSION=${{ steps.get_version.outputs.VERSION }}
MAJOR=$(echo $VERSION | cut -d. -f1)
MINOR=$(echo $VERSION | cut -d. -f1-2)
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
- name: Build and push Docker images
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: |
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
unclecode/crawl4ai:latest
platforms: linux/amd64,linux/arm64
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
tag_name: v${{ steps.get_version.outputs.VERSION }}
name: Release v${{ steps.get_version.outputs.VERSION }}
body: |
## 🎉 Crawl4AI v${{ steps.get_version.outputs.VERSION }} Released!
### 📦 Installation
**PyPI:**
```bash
pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}
```
**Docker:**
```bash
docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
docker pull unclecode/crawl4ai:latest
```
### 📝 What's Changed
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details.
draft: false
prerelease: false
token: ${{ secrets.GITHUB_TOKEN }}
- name: Summary
run: |
echo "## 🚀 Release Complete!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📦 PyPI Package" >> $GITHUB_STEP_SUMMARY
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- URL: https://pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
echo "- Install: \`pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🐳 Docker Images" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
echo "https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,116 +0,0 @@
name: Test Release Pipeline
on:
push:
tags:
- 'test-v*'
jobs:
test-release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Extract version from tag
id: get_version
run: |
TAG_VERSION=${GITHUB_REF#refs/tags/test-v}
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
echo "Testing with version: $TAG_VERSION"
- name: Install package dependencies
run: |
pip install -e .
- name: Check version consistency
run: |
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
echo "Tag version: $TAG_VERSION"
echo "Package version: $PACKAGE_VERSION"
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
echo "Please update crawl4ai/__version__.py to match the tag version"
exit 1
fi
echo "✅ Version check passed: $TAG_VERSION"
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
pip install build twine
- name: Build package
run: python -m build
- name: Check package
run: twine check dist/*
- name: Upload to Test PyPI
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.TEST_PYPI_TOKEN }}
run: |
echo "📦 Uploading to Test PyPI..."
twine upload --repository testpypi dist/* || {
if [ $? -eq 1 ]; then
echo "⚠️ Upload failed - likely version already exists on Test PyPI"
echo "Continuing anyway for test purposes..."
else
exit 1
fi
}
echo "✅ Test PyPI step complete"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and push Docker test images
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: |
unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}
unclecode/crawl4ai:test-latest
platforms: linux/amd64,linux/arm64
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Summary
run: |
echo "## 🎉 Test Release Complete!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📦 Test PyPI Package" >> $GITHUB_STEP_SUMMARY
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- URL: https://test.pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
echo "- Install: \`pip install -i https://test.pypi.org/simple/ crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🐳 Docker Test Images" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:test-latest\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🧹 Cleanup Commands" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
echo "# Remove test tag" >> $GITHUB_STEP_SUMMARY
echo "git tag -d test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "git push origin :test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "# Remove Docker test images" >> $GITHUB_STEP_SUMMARY
echo "docker rmi unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "docker rmi unclecode/crawl4ai:test-latest" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY

87
.gitignore vendored
View File

@@ -1,13 +1,3 @@
# Scripts folder (private tools)
.scripts/
# Database files
*.db
# Environment files
.env
.env.local
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@@ -216,80 +206,7 @@ pypi_build.sh
git_issues.py
git_issues.md
.next/
.tests/
# .issues/
.docs/
.issues/
.gitboss/
todo_executor.md
protect-all-except-feature.sh
manage-collab.sh
publish.sh
combine.sh
combined_output.txt
.local
.scripts
tree.md
tree.md
.scripts
.local
.do
/plans
plans/
# Codeium
.codeiumignore
todo/
# Continue development files
.continue/
.continuerc.json
continue.lock
continue_core.log
contextProviders/
continue_workspace/
.continue-cache/
continue_config.json
# Continue temporary files
.continue-temp/
.continue-logs/
.continue-downloads/
# Continue VS Code specific
.vscode-continue/
.vscode-continue-cache/
.prompts/
.llm.env
.private/
CLAUDE_MONITOR.md
CLAUDE.md
.claude/
tests/**/test_site
tests/**/reports
tests/**/benchmark_reports
test_scripts/
docs/**/data
.codecat/
docs/apps/linkdin/debug*/
docs/apps/linkdin/samples/insights/*
scripts/
# Databse files
*.sqlite3
*.sqlite3-journal
*.db-journal
*.db-wal
*.db-shm
*.db
*.rdb
*.ldb
.docs/
.issues/

File diff suppressed because it is too large Load Diff

View File

@@ -1,131 +0,0 @@
# Crawl4AI Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official email address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
unclecode@crawl4ai.com. All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View File

@@ -6,44 +6,21 @@ We would like to thank the following people for their contributions to Crawl4AI:
- [Unclecode](https://github.com/unclecode) - Project Creator and Main Developer
- [Nasrin](https://github.com/ntohidi) - Project Manager and Developer
- [Aravind Karnam](https://github.com/aravindkarnam) - Head of Community and Product
- [Aravind Karnam](https://github.com/aravindkarnam) - Developer
## Community Contributors
- [aadityakanjolia4](https://github.com/aadityakanjolia4) - Fix for `CustomHTML2Text` is not defined.
- [FractalMind](https://github.com/FractalMind) - Created the first official Docker Hub image and fixed Dockerfile errors
- [ketonkss4](https://github.com/ketonkss4) - Identified Selenium's new capabilities, helping reduce dependencies
- [jonymusky](https://github.com/jonymusky) - Javascript execution documentation, and wait_for
- [datehoer](https://github.com/datehoer) - Add browser prxy support
## Pull Requests
- [dvschuyl](https://github.com/dvschuyl) - AsyncPlaywrightCrawlerStrategy page-evaluate context destroyed by navigation [#304](https://github.com/unclecode/crawl4ai/pull/304)
- [nelzomal](https://github.com/nelzomal) - Enhance development installation instructions [#286](https://github.com/unclecode/crawl4ai/pull/286)
- [HamzaFarhan](https://github.com/HamzaFarhan) - Handled the cases where markdown_with_citations, references_markdown, and filtered_html might not be defined [#293](https://github.com/unclecode/crawl4ai/pull/293)
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
#### Feb-Alpha-1
- [sufianuddin](https://github.com/sufianuddin) - fix: [Documentation for JsonCssExtractionStrategy](https://github.com/unclecode/crawl4ai/issues/651)
- [tautikAg](https://github.com/tautikAg) - fix: [Markdown output has incorect spacing](https://github.com/unclecode/crawl4ai/issues/599)
- [cardit1](https://github.com/cardit1) - fix: ['AsyncPlaywrightCrawlerStrategy' object has no attribute 'downloads_path'](https://github.com/unclecode/crawl4ai/issues/585)
- [dmurat](https://github.com/dmurat) - fix: [ Incorrect rendering of inline code inside of links ](https://github.com/unclecode/crawl4ai/issues/583)
- [Sparshsing](https://github.com/Sparshsing) - fix: [Relative Urls in the webpage not extracted properly ](https://github.com/unclecode/crawl4ai/issues/570)
## Other Contributors
- [Gokhan](https://github.com/gkhngyk)
- [Shiv Kumar](https://github.com/shivkumar0757)
- [QIN2DIM](https://github.com/QIN2DIM)
#### Typo fixes
- [ssoydan](https://github.com/ssoydan)
- [Darshan](https://github.com/Darshan2104)
- [tuhinmallick](https://github.com/tuhinmallick)
## Acknowledgements
We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better.

View File

@@ -1,36 +1,29 @@
FROM python:3.12-slim-bookworm AS build
# syntax=docker/dockerfile:1.4
# C4ai version
ARG C4AI_VER=0.7.0-r1
ENV C4AI_VERSION=$C4AI_VER
LABEL c4ai.version=$C4AI_VER
# Build arguments
ARG PYTHON_VERSION=3.10
# Set build arguments
ARG APP_HOME=/app
ARG GITHUB_REPO=https://github.com/unclecode/crawl4ai.git
ARG GITHUB_BRANCH=main
ARG USE_LOCAL=true
# Base stage with system dependencies
FROM python:${PYTHON_VERSION}-slim as base
ENV PYTHONFAULTHANDLER=1 \
PYTHONHASHSEED=random \
PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PIP_DEFAULT_TIMEOUT=100 \
DEBIAN_FRONTEND=noninteractive \
REDIS_HOST=localhost \
REDIS_PORT=6379
ARG PYTHON_VERSION=3.12
ARG INSTALL_TYPE=default
# Declare ARG variables again within the build stage
ARG INSTALL_TYPE=all
ARG ENABLE_GPU=false
ARG TARGETARCH
# Platform-specific labels
LABEL maintainer="unclecode"
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
LABEL description="Crawl4AI - Advanced Web Crawler with AI capabilities"
LABEL version="1.0"
# Environment setup
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PIP_DEFAULT_TIMEOUT=100 \
DEBIAN_FRONTEND=noninteractive
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
curl \
@@ -41,11 +34,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
pkg-config \
python3-dev \
libjpeg-dev \
redis-server \
supervisor \
&& apt-get clean \
libpng-dev \
&& rm -rf /var/lib/apt/lists/*
# Playwright system dependencies for Linux
RUN apt-get update && apt-get install -y --no-install-recommends \
libglib2.0-0 \
libnss3 \
@@ -68,66 +60,28 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libcairo2 \
libasound2 \
libatspi2.0-0 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get dist-upgrade -y \
&& rm -rf /var/lib/apt/lists/*
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
# GPU support if enabled
RUN if [ "$ENABLE_GPU" = "true" ] ; then \
apt-get update && apt-get install -y --no-install-recommends \
nvidia-cuda-toolkit \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* ; \
else \
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
fi
fi
RUN if [ "$TARGETARCH" = "arm64" ]; then \
echo "🦾 Installing ARM-specific optimizations"; \
apt-get update && apt-get install -y --no-install-recommends \
libopenblas-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*; \
elif [ "$TARGETARCH" = "amd64" ]; then \
echo "🖥️ Installing AMD64-specific optimizations"; \
apt-get update && apt-get install -y --no-install-recommends \
libomp-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*; \
else \
echo "Skipping platform-specific optimizations (unsupported platform)"; \
fi
# Create and set working directory
WORKDIR /app
# Create a non-root user and group
RUN groupadd -r appuser && useradd --no-log-init -r -g appuser appuser
# Copy the entire project
COPY . .
# Create and set permissions for appuser home directory
RUN mkdir -p /home/appuser && chown -R appuser:appuser /home/appuser
WORKDIR ${APP_HOME}
RUN echo '#!/bin/bash\n\
if [ "$USE_LOCAL" = "true" ]; then\n\
echo "📦 Installing from local source..."\n\
pip install --no-cache-dir /tmp/project/\n\
else\n\
echo "🌐 Installing from GitHub..."\n\
for i in {1..3}; do \n\
git clone --branch ${GITHUB_BRANCH} ${GITHUB_REPO} /tmp/crawl4ai && break || \n\
{ echo "Attempt $i/3 failed! Taking a short break... ☕"; sleep 5; }; \n\
done\n\
pip install --no-cache-dir /tmp/crawl4ai\n\
fi' > /tmp/install.sh && chmod +x /tmp/install.sh
COPY . /tmp/project/
# Copy supervisor config first (might need root later, but okay for now)
COPY deploy/docker/supervisord.conf .
COPY deploy/docker/requirements.txt .
# Install base requirements
RUN pip install --no-cache-dir -r requirements.txt
# Install required library for FastAPI
RUN pip install fastapi uvicorn psutil
# Install ML dependencies first for better layer caching
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
pip install --no-cache-dir \
torch \
@@ -140,61 +94,33 @@ RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
python -m nltk.downloader punkt stopwords ; \
fi
# Install the package
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
pip install "/tmp/project/[all]" && \
pip install -e ".[all]" && \
python -m crawl4ai.model_loader ; \
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
pip install "/tmp/project/[torch]" ; \
pip install -e ".[torch]" ; \
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
pip install "/tmp/project/[transformer]" && \
pip install -e ".[transformer]" && \
python -m crawl4ai.model_loader ; \
else \
pip install "/tmp/project" ; \
pip install -e "." ; \
fi
RUN pip install --no-cache-dir --upgrade pip && \
/tmp/install.sh && \
python -c "import crawl4ai; print('✅ crawl4ai is ready to rock!')" && \
python -c "from playwright.sync_api import sync_playwright; print('✅ Playwright is feeling dramatic!')"
# Install Playwright and browsers
RUN playwright install
RUN crawl4ai-setup
# Health check
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
RUN playwright install --with-deps
# Expose port
EXPOSE 8000 11235 9222 8080
RUN mkdir -p /home/appuser/.cache/ms-playwright \
&& cp -r /root/.cache/ms-playwright/chromium-* /home/appuser/.cache/ms-playwright/ \
&& chown -R appuser:appuser /home/appuser/.cache/ms-playwright
# Optional: Increase shared memory size to prevent browser crashes
# when loading heavy pages
RUN mkdir /dev/shm
VOLUME /dev/shm
RUN crawl4ai-doctor
# Copy application code
COPY deploy/docker/* ${APP_HOME}/
# copy the playground + any future static assets
COPY deploy/docker/static ${APP_HOME}/static
# Change ownership of the application directory to the non-root user
RUN chown -R appuser:appuser ${APP_HOME}
# give permissions to redis persistence dirs if used
RUN mkdir -p /var/lib/redis /var/log/redis && chown -R appuser:appuser /var/lib/redis /var/log/redis
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD bash -c '\
MEM=$(free -m | awk "/^Mem:/{print \$2}"); \
if [ $MEM -lt 2048 ]; then \
echo "⚠️ Warning: Less than 2GB RAM available! Your container might need a memory boost! 🚀"; \
exit 1; \
fi && \
redis-cli ping > /dev/null && \
curl -f http://localhost:11235/health || exit 1'
EXPOSE 6379
# Switch to the non-root user before starting the application
USER appuser
# Set environment variables to ptoduction
ENV PYTHON_ENV=production
# Start the application using supervisord
CMD ["supervisord", "-c", "supervisord.conf"]
# Start the FastAPI server
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "11235"]

View File

@@ -1,339 +0,0 @@
# Development Journal
This journal tracks significant feature additions, bug fixes, and architectural decisions in the crawl4ai project. It serves as both documentation and a historical record of the project's evolution.
## [2025-04-17] Added Content Source Selection for Markdown Generation
**Feature:** Configurable content source for markdown generation
**Changes Made:**
1. Added `content_source: str = "cleaned_html"` parameter to `MarkdownGenerationStrategy` class
2. Updated `DefaultMarkdownGenerator` to accept and pass the content source parameter
3. Renamed the `cleaned_html` parameter to `input_html` in the `generate_markdown` method
4. Modified `AsyncWebCrawler.aprocess_html` to select the appropriate HTML source based on the generator's config
5. Added `preprocess_html_for_schema` import in `async_webcrawler.py`
**Implementation Details:**
- Added a new `content_source` parameter to specify which HTML input to use for markdown generation
- Options include: "cleaned_html" (default), "raw_html", and "fit_html"
- Used a dictionary dispatch pattern in `aprocess_html` to select the appropriate HTML source
- Added proper error handling with fallback to cleaned_html if content source selection fails
- Ensured backward compatibility by defaulting to "cleaned_html" option
**Files Modified:**
- `crawl4ai/markdown_generation_strategy.py`: Added content_source parameter and updated the method signature
- `crawl4ai/async_webcrawler.py`: Added HTML source selection logic and updated imports
**Examples:**
- Created `docs/examples/content_source_example.py` demonstrating how to use the new parameter
**Challenges:**
- Maintaining backward compatibility while reorganizing the parameter flow
- Ensuring proper error handling for all content source options
- Making the change with minimal code modifications
**Why This Feature:**
The content source selection feature allows users to choose which HTML content to use as input for markdown generation:
1. "cleaned_html" - Uses the post-processed HTML after scraping strategy (original behavior)
2. "raw_html" - Uses the original raw HTML directly from the web page
3. "fit_html" - Uses the preprocessed HTML optimized for schema extraction
This feature provides greater flexibility in how users generate markdown, enabling them to:
- Capture more detailed content from the original HTML when needed
- Use schema-optimized HTML when working with structured data
- Choose the approach that best suits their specific use case
## [2025-04-17] Implemented High Volume Stress Testing Solution for SDK
**Feature:** Comprehensive stress testing framework using `arun_many` and the dispatcher system to evaluate performance, concurrency handling, and identify potential issues under high-volume crawling scenarios.
**Changes Made:**
1. Created a dedicated stress testing framework in the `benchmarking/` (or similar) directory.
2. Implemented local test site generation (`SiteGenerator`) with configurable heavy HTML pages.
3. Added basic memory usage tracking (`SimpleMemoryTracker`) using platform-specific commands (avoiding `psutil` dependency for this specific test).
4. Utilized `CrawlerMonitor` from `crawl4ai` for rich terminal UI and real-time monitoring of test progress and dispatcher activity.
5. Implemented detailed result summary saving (JSON) and memory sample logging (CSV).
6. Developed `run_benchmark.py` to orchestrate tests with predefined configurations.
7. Created `run_all.sh` as a simple wrapper for `run_benchmark.py`.
**Implementation Details:**
- Generates a local test site with configurable pages containing heavy text and image content.
- Uses Python's built-in `http.server` for local serving, minimizing network variance.
- Leverages `crawl4ai`'s `arun_many` method for processing URLs.
- Utilizes `MemoryAdaptiveDispatcher` to manage concurrency via the `max_sessions` parameter (note: memory adaptation features require `psutil`, not used by `SimpleMemoryTracker`).
- Tracks memory usage via `SimpleMemoryTracker`, recording samples throughout test execution to a CSV file.
- Uses `CrawlerMonitor` (which uses the `rich` library) for clear terminal visualization and progress reporting directly from the dispatcher.
- Stores detailed final metrics in a JSON summary file.
**Files Created/Updated:**
- `stress_test_sdk.py`: Main stress testing implementation using `arun_many`.
- `benchmark_report.py`: (Assumed) Report generator for comparing test results.
- `run_benchmark.py`: Test runner script with predefined configurations.
- `run_all.sh`: Simple bash script wrapper for `run_benchmark.py`.
- `USAGE.md`: Comprehensive documentation on usage and interpretation (updated).
**Testing Approach:**
- Creates a controlled, reproducible test environment with a local HTTP server.
- Processes URLs using `arun_many`, allowing the dispatcher to manage concurrency up to `max_sessions`.
- Optionally logs per-batch summaries (when not in streaming mode) after processing chunks.
- Supports different test sizes via `run_benchmark.py` configurations.
- Records memory samples via platform commands for basic trend analysis.
- Includes cleanup functionality for the test environment.
**Challenges:**
- Ensuring proper cleanup of HTTP server processes.
- Getting reliable memory tracking across platforms without adding heavy dependencies (`psutil`) to this specific test script.
- Designing `run_benchmark.py` to correctly pass arguments to `stress_test_sdk.py`.
**Why This Feature:**
The high volume stress testing solution addresses critical needs for ensuring Crawl4AI's `arun_many` reliability:
1. Provides a reproducible way to evaluate performance under concurrent load.
2. Allows testing the dispatcher's concurrency control (`max_session_permit`) and queue management.
3. Enables performance tuning by observing throughput (`URLs/sec`) under different `max_sessions` settings.
4. Creates a controlled environment for testing `arun_many` behavior.
5. Supports continuous integration by providing deterministic test conditions for `arun_many`.
**Design Decisions:**
- Chose local site generation for reproducibility and isolation from network issues.
- Utilized the built-in `CrawlerMonitor` for real-time feedback, leveraging its `rich` integration.
- Implemented optional per-batch logging in `stress_test_sdk.py` (when not streaming) to provide chunk-level summaries alongside the continuous monitor.
- Adopted `arun_many` with a `MemoryAdaptiveDispatcher` as the core mechanism for parallel execution, reflecting the intended SDK usage.
- Created `run_benchmark.py` to simplify running standard test configurations.
- Used `SimpleMemoryTracker` to provide basic memory insights without requiring `psutil` for this particular test runner.
**Future Enhancements to Consider:**
- Create a separate test variant that *does* use `psutil` to specifically stress the memory-adaptive features of the dispatcher.
- Add support for generated JavaScript content.
- Add support for Docker-based testing with explicit memory limits.
- Enhance `benchmark_report.py` to provide more sophisticated analysis of performance and memory trends from the generated JSON/CSV files.
---
## [2025-04-17] Refined Stress Testing System Parameters and Execution
**Changes Made:**
1. Corrected `run_benchmark.py` and `stress_test_sdk.py` to use `--max-sessions` instead of the incorrect `--workers` parameter, accurately reflecting dispatcher configuration.
2. Updated `run_benchmark.py` argument handling to correctly pass all relevant custom parameters (including `--stream`, `--monitor-mode`, etc.) to `stress_test_sdk.py`.
3. (Assuming changes in `benchmark_report.py`) Applied dark theme to benchmark reports for better readability.
4. (Assuming changes in `benchmark_report.py`) Improved visualization code to eliminate matplotlib warnings.
5. Updated `run_benchmark.py` to provide clickable `file://` links to generated reports in the terminal output.
6. Updated `USAGE.md` with comprehensive parameter descriptions reflecting the final script arguments.
7. Updated `run_all.sh` wrapper to correctly invoke `run_benchmark.py` with flexible arguments.
**Details of Changes:**
1. **Parameter Correction (`--max-sessions`)**:
* Identified the fundamental misunderstanding where `--workers` was used incorrectly.
* Refactored `stress_test_sdk.py` to accept `--max-sessions` and configure the `MemoryAdaptiveDispatcher`'s `max_session_permit` accordingly.
* Updated `run_benchmark.py` argument parsing and command construction to use `--max-sessions`.
* Updated `TEST_CONFIGS` in `run_benchmark.py` to use `max_sessions`.
2. **Argument Handling (`run_benchmark.py`)**:
* Improved logic to collect all command-line arguments provided to `run_benchmark.py`.
* Ensured all relevant arguments (like `--stream`, `--monitor-mode`, `--port`, `--use-rate-limiter`, etc.) are correctly forwarded when calling `stress_test_sdk.py` as a subprocess.
3. **Dark Theme & Visualization Fixes (Assumed in `benchmark_report.py`)**:
* (Describes changes assumed to be made in the separate reporting script).
4. **Clickable Links (`run_benchmark.py`)**:
* Added logic to find the latest HTML report and PNG chart in the `benchmark_reports` directory after `benchmark_report.py` runs.
* Used `pathlib` to generate correct `file://` URLs for terminal output.
5. **Documentation Improvements (`USAGE.md`)**:
* Rewrote sections to explain `arun_many`, dispatchers, and `--max-sessions`.
* Updated parameter tables for all scripts (`stress_test_sdk.py`, `run_benchmark.py`).
* Clarified the difference between batch and streaming modes and their effect on logging.
* Updated examples to use correct arguments.
**Files Modified:**
- `stress_test_sdk.py`: Changed `--workers` to `--max-sessions`, added new arguments, used `arun_many`.
- `run_benchmark.py`: Changed argument handling, updated configs, calls `stress_test_sdk.py`.
- `run_all.sh`: Updated to call `run_benchmark.py` correctly.
- `USAGE.md`: Updated documentation extensively.
- `benchmark_report.py`: (Assumed modifications for dark theme and viz fixes).
**Testing:**
- Verified that `--max-sessions` correctly limits concurrency via the `CrawlerMonitor` output.
- Confirmed that custom arguments passed to `run_benchmark.py` are forwarded to `stress_test_sdk.py`.
- Validated clickable links work in supporting terminals.
- Ensured documentation matches the final script parameters and behavior.
**Why These Changes:**
These refinements correct the fundamental approach of the stress test to align with `crawl4ai`'s actual architecture and intended usage:
1. Ensures the test evaluates the correct components (`arun_many`, `MemoryAdaptiveDispatcher`).
2. Makes test configurations more accurate and flexible.
3. Improves the usability of the testing framework through better argument handling and documentation.
**Future Enhancements to Consider:**
- Add support for generated JavaScript content to test JS rendering performance
- Implement more sophisticated memory analysis like generational garbage collection tracking
- Add support for Docker-based testing with memory limits to force OOM conditions
- Create visualization tools for analyzing memory usage patterns across test runs
- Add benchmark comparisons between different crawler versions or configurations
## [2025-04-17] Fixed Issues in Stress Testing System
**Changes Made:**
1. Fixed custom parameter handling in run_benchmark.py
2. Applied dark theme to benchmark reports for better readability
3. Improved visualization code to eliminate matplotlib warnings
4. Added clickable links to generated reports in terminal output
5. Enhanced documentation with comprehensive parameter descriptions
**Details of Changes:**
1. **Custom Parameter Handling Fix**
- Identified bug where custom URL count was being ignored in run_benchmark.py
- Rewrote argument handling to use a custom args dictionary
- Properly passed parameters to the test_simple_stress.py command
- Added better UI indication of custom parameters in use
2. **Dark Theme Implementation**
- Added complete dark theme to HTML benchmark reports
- Applied dark styling to all visualization components
- Used Nord-inspired color palette for charts and graphs
- Improved contrast and readability for data visualization
- Updated text colors and backgrounds for better eye comfort
3. **Matplotlib Warning Fixes**
- Resolved warnings related to improper use of set_xticklabels()
- Implemented correct x-axis positioning for bar charts
- Ensured proper alignment of bar labels and data points
- Updated plotting code to use modern matplotlib practices
4. **Documentation Improvements**
- Created comprehensive USAGE.md with detailed instructions
- Added parameter documentation for all scripts
- Included examples for all common use cases
- Provided detailed explanations for interpreting results
- Added troubleshooting guide for common issues
**Files Modified:**
- `tests/memory/run_benchmark.py`: Fixed custom parameter handling
- `tests/memory/benchmark_report.py`: Added dark theme and fixed visualization warnings
- `tests/memory/run_all.sh`: Added clickable links to reports
- `tests/memory/USAGE.md`: Created comprehensive documentation
**Testing:**
- Verified that custom URL counts are now correctly used
- Confirmed dark theme is properly applied to all report elements
- Checked that matplotlib warnings are no longer appearing
- Validated clickable links to reports work in terminals that support them
**Why These Changes:**
These improvements address several usability issues with the stress testing system:
1. Better parameter handling ensures test configurations work as expected
2. Dark theme reduces eye strain during extended test review sessions
3. Fixing visualization warnings improves code quality and output clarity
4. Enhanced documentation makes the system more accessible for future use
**Future Enhancements:**
- Add additional visualization options for different types of analysis
- Implement theme toggle to support both light and dark preferences
- Add export options for embedding reports in other documentation
- Create dedicated CI/CD integration templates for automated testing
## [2025-04-09] Added MHTML Capture Feature
**Feature:** MHTML snapshot capture of crawled pages
**Changes Made:**
1. Added `capture_mhtml: bool = False` parameter to `CrawlerRunConfig` class
2. Added `mhtml: Optional[str] = None` field to `CrawlResult` model
3. Added `mhtml_data: Optional[str] = None` field to `AsyncCrawlResponse` class
4. Implemented `capture_mhtml()` method in `AsyncPlaywrightCrawlerStrategy` class to capture MHTML via CDP
5. Modified the crawler to capture MHTML when enabled and pass it to the result
**Implementation Details:**
- MHTML capture uses Chrome DevTools Protocol (CDP) via Playwright's CDP session API
- The implementation waits for page to fully load before capturing MHTML content
- Enhanced waiting for JavaScript content with requestAnimationFrame for better JS content capture
- We ensure all browser resources are properly cleaned up after capture
**Files Modified:**
- `crawl4ai/models.py`: Added the mhtml field to CrawlResult
- `crawl4ai/async_configs.py`: Added capture_mhtml parameter to CrawlerRunConfig
- `crawl4ai/async_crawler_strategy.py`: Implemented MHTML capture logic
- `crawl4ai/async_webcrawler.py`: Added mapping from AsyncCrawlResponse.mhtml_data to CrawlResult.mhtml
**Testing:**
- Created comprehensive tests in `tests/20241401/test_mhtml.py` covering:
- Capturing MHTML when enabled
- Ensuring mhtml is None when disabled explicitly
- Ensuring mhtml is None by default
- Capturing MHTML on JavaScript-enabled pages
**Challenges:**
- Had to improve page loading detection to ensure JavaScript content was fully rendered
- Tests needed to be run independently due to Playwright browser instance management
- Modified test expected content to match actual MHTML output
**Why This Feature:**
The MHTML capture feature allows users to capture complete web pages including all resources (CSS, images, etc.) in a single file. This is valuable for:
1. Offline viewing of captured pages
2. Creating permanent snapshots of web content for archival
3. Ensuring consistent content for later analysis, even if the original site changes
**Future Enhancements to Consider:**
- Add option to save MHTML to file
- Support for filtering what resources get included in MHTML
- Add support for specifying MHTML capture options
## [2025-04-10] Added Network Request and Console Message Capturing
**Feature:** Comprehensive capturing of network requests/responses and browser console messages during crawling
**Changes Made:**
1. Added `capture_network_requests: bool = False` and `capture_console_messages: bool = False` parameters to `CrawlerRunConfig` class
2. Added `network_requests: Optional[List[Dict[str, Any]]] = None` and `console_messages: Optional[List[Dict[str, Any]]] = None` fields to both `AsyncCrawlResponse` and `CrawlResult` models
3. Implemented event listeners in `AsyncPlaywrightCrawlerStrategy._crawl_web()` to capture browser network events and console messages
4. Added proper event listener cleanup in the finally block to prevent resource leaks
5. Modified the crawler flow to pass captured data from AsyncCrawlResponse to CrawlResult
**Implementation Details:**
- Network capture uses Playwright event listeners (`request`, `response`, and `requestfailed`) to record all network activity
- Console capture uses Playwright event listeners (`console` and `pageerror`) to record console messages and errors
- Each network event includes metadata like URL, headers, status, and timing information
- Each console message includes type, text content, and source location when available
- All captured events include timestamps for chronological analysis
- Error handling ensures even failed capture attempts won't crash the main crawling process
**Files Modified:**
- `crawl4ai/models.py`: Added new fields to AsyncCrawlResponse and CrawlResult
- `crawl4ai/async_configs.py`: Added new configuration parameters to CrawlerRunConfig
- `crawl4ai/async_crawler_strategy.py`: Implemented capture logic using event listeners
- `crawl4ai/async_webcrawler.py`: Added data transfer from AsyncCrawlResponse to CrawlResult
**Documentation:**
- Created detailed documentation in `docs/md_v2/advanced/network-console-capture.md`
- Added feature to site navigation in `mkdocs.yml`
- Updated CrawlResult documentation in `docs/md_v2/api/crawl-result.md`
- Created comprehensive example in `docs/examples/network_console_capture_example.py`
**Testing:**
- Created `tests/general/test_network_console_capture.py` with tests for:
- Verifying capture is disabled by default
- Testing network request capturing
- Testing console message capturing
- Ensuring both capture types can be enabled simultaneously
- Checking correct content is captured in expected formats
**Challenges:**
- Initial implementation had synchronous/asynchronous mismatches in event handlers
- Needed to fix type of property access vs. method calls in handlers
- Required careful cleanup of event listeners to prevent memory leaks
**Why This Feature:**
The network and console capture feature provides deep visibility into web page activity, enabling:
1. Debugging complex web applications by seeing all network requests and errors
2. Security analysis to detect unexpected third-party requests and data flows
3. Performance profiling to identify slow-loading resources
4. API discovery in single-page applications
5. Comprehensive analysis of web application behavior
**Future Enhancements to Consider:**
- Option to filter captured events by type, domain, or content
- Support for capturing response bodies (with size limits)
- Aggregate statistics calculation for performance metrics
- Integration with visualization tools for network waterfall analysis
- Exporting captures in HAR format for use with external tools

20
LICENSE
View File

@@ -48,22 +48,4 @@ You may add Your own copyright statement to Your modifications and may provide a
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
---
Attribution Requirement
All distributions, publications, or public uses of this software, or derivative works based on this software, must include the following attribution:
"This product includes software developed by UncleCode (https://x.com/unclecode) as part of the Crawl4AI project (https://github.com/unclecode/crawl4ai)."
This attribution must be displayed in a prominent and easily accessible location, such as:
- For software distributions: In a NOTICE file, README file, or equivalent documentation.
- For publications (research papers, articles, blog posts): In the acknowledgments section or a footnote.
- For websites/web applications: In an "About" or "Credits" section.
- For command-line tools: In the help/usage output.
This requirement ensures proper credit is given for the use of Crawl4AI and helps promote the project.
---
END OF TERMS AND CONDITIONS

View File

@@ -1,2 +1 @@
include requirements.txt
recursive-include crawl4ai/js_snippet *.js
include requirements.txt

View File

@@ -1,320 +0,0 @@
# Progressive Web Crawling with Adaptive Information Foraging
## Abstract
This paper presents a novel approach to web crawling that adaptively determines when sufficient information has been gathered to answer a given query. Unlike traditional exhaustive crawling methods, our Progressive Information Sufficiency (PIS) framework uses statistical measures to balance information completeness against crawling efficiency. We introduce a multi-strategy architecture supporting pure statistical, embedding-enhanced, and LLM-assisted approaches, with theoretical guarantees on convergence and practical evaluation methods using synthetic datasets.
## 1. Introduction
Traditional web crawling approaches follow predetermined patterns (breadth-first, depth-first) without consideration for information sufficiency. This work addresses the fundamental question: *"When do we have enough information to answer a query and similar queries in its domain?"*
We formalize this as an optimal stopping problem in information foraging, introducing metrics for coverage, consistency, and saturation that enable crawlers to make intelligent decisions about when to stop crawling and which links to follow.
## 2. Problem Formulation
### 2.1 Definitions
Let:
- **K** = {d₁, d₂, ..., dₙ} be the current knowledge base (crawled documents)
- **Q** be the user query
- **L** = {l₁, l₂, ..., lₘ} be available links with preview metadata
- **θ** be the confidence threshold for information sufficiency
### 2.2 Objectives
1. **Minimize** |K| (number of crawled pages)
2. **Maximize** P(answers(Q) | K) (probability of answering Q given K)
3. **Ensure** coverage of Q's domain (similar queries)
## 3. Mathematical Framework
### 3.1 Information Sufficiency Metric
We define Information Sufficiency as:
```
IS(K, Q) = min(Coverage(K, Q), Consistency(K, Q), 1 - Redundancy(K)) × DomainCoverage(K, Q)
```
### 3.2 Coverage Score
Coverage measures how well current knowledge covers query terms and related concepts:
```
Coverage(K, Q) = Σ(t ∈ Q) log(df(t, K) + 1) × idf(t) / |Q|
```
Where:
- df(t, K) = document frequency of term t in knowledge base K
- idf(t) = inverse document frequency weight
### 3.3 Consistency Score
Consistency measures information coherence across documents:
```
Consistency(K, Q) = 1 - Var(answers from random subsets of K)
```
This captures the principle that sufficient knowledge should provide stable answers regardless of document subset.
### 3.4 Saturation Score
Saturation detects diminishing returns:
```
Saturation(K) = 1 - (ΔInfo(Kₙ) / ΔInfo(K₁))
```
Where ΔInfo represents marginal information gain from the nth crawl.
### 3.5 Link Value Prediction
Expected information gain from uncrawled links:
```
ExpectedGain(l) = Relevance(l, Q) × Novelty(l, K) × Authority(l)
```
Components:
- **Relevance**: BM25(preview_text, Q)
- **Novelty**: 1 - max_similarity(preview, K)
- **Authority**: f(url_structure, domain_metrics)
## 4. Algorithmic Approach
### 4.1 Progressive Crawling Algorithm
```
Algorithm: ProgressiveCrawl(start_url, query, θ)
K ← ∅
crawled ← {start_url}
pending ← extract_links(crawl(start_url))
while IS(K, Q) < θ and |crawled| < max_pages:
candidates ← rank_by_expected_gain(pending, Q, K)
if max(ExpectedGain(candidates)) < min_gain:
break // Diminishing returns
to_crawl ← top_k(candidates)
new_docs ← parallel_crawl(to_crawl)
K ← K new_docs
crawled ← crawled to_crawl
pending ← extract_new_links(new_docs) - crawled
return K
```
### 4.2 Stopping Criteria
Crawling terminates when:
1. IS(K, Q) ≥ θ (sufficient information)
2. d(IS)/d(crawls) < ε (plateau reached)
3. |crawled| ≥ max_pages (resource limit)
4. max(ExpectedGain) < min_gain (no promising links)
## 5. Multi-Strategy Architecture
### 5.1 Strategy Pattern Design
```
AbstractStrategy
├── StatisticalStrategy (no LLM, no embeddings)
├── EmbeddingStrategy (with semantic similarity)
└── LLMStrategy (with language model assistance)
```
### 5.2 Statistical Strategy
Pure statistical approach using:
- BM25 for relevance scoring
- Term frequency analysis for coverage
- Graph structure for authority
- No external models required
**Advantages**: Fast, no API costs, works offline
**Best for**: Technical documentation, specific terminology
### 5.3 Embedding Strategy (Implemented)
Semantic understanding through embeddings:
- Query expansion into semantic variations
- Coverage mapping in embedding space
- Gap-driven link selection
- Validation-based stopping criteria
**Mathematical Framework**:
```
Coverage(K, Q) = mean(max_similarity(q, K) for q in Q_expanded)
Gap(q) = 1 - max_similarity(q, K)
LinkScore(l) = Σ(Gap(q) × relevance(l, q)) × (1 - redundancy(l, K))
```
**Key Parameters**:
- `embedding_k_exp`: Exponential decay factor for distance-to-score mapping
- `embedding_coverage_radius`: Distance threshold for query coverage
- `embedding_min_confidence_threshold`: Minimum relevance threshold
**Advantages**: Semantic understanding, handles ambiguity, detects irrelevance
**Best for**: Research queries, conceptual topics, diverse content
### 5.4 Progressive Enhancement Path
1. **Level 0**: Statistical only (implemented)
2. **Level 1**: + Embeddings for semantic similarity (implemented)
3. **Level 2**: + LLM for query understanding (future)
## 6. Evaluation Methodology
### 6.1 Synthetic Dataset Generation
Using LLM to create evaluation data:
```python
def generate_synthetic_dataset(domain_url):
# 1. Fully crawl domain
full_knowledge = exhaustive_crawl(domain_url)
# 2. Generate answerable queries
queries = llm_generate_queries(full_knowledge)
# 3. Create query variations
for q in queries:
variations = generate_variations(q) # synonyms, sub/super queries
return queries, variations, full_knowledge
```
### 6.2 Evaluation Metrics
1. **Efficiency**: Information gained / Pages crawled
2. **Completeness**: Answerable queries / Total queries
3. **Redundancy**: 1 - (Unique information / Total information)
4. **Convergence Rate**: Pages to 95% completeness
### 6.3 Ablation Studies
- Impact of each score component (coverage, consistency, saturation)
- Sensitivity to threshold parameters
- Performance across different domain types
## 7. Theoretical Properties
### 7.1 Convergence Guarantee
**Theorem**: For finite websites, ProgressiveCrawl converges to IS(K, Q) ≥ θ or exhausts all reachable pages.
**Proof sketch**: IS(K, Q) is monotonically non-decreasing with each crawl, bounded above by 1.
### 7.2 Optimality
Under certain assumptions about link preview accuracy:
- Expected crawls ≤ 2 × optimal_crawls
- Approximation ratio improves with preview quality
## 8. Implementation Design
### 8.1 Core Components
1. **CrawlState**: Maintains crawl history and metrics
2. **AdaptiveConfig**: Configuration parameters
3. **CrawlStrategy**: Pluggable strategy interface
4. **AdaptiveCrawler**: Main orchestrator
### 8.2 Integration with Crawl4AI
- Wraps existing AsyncWebCrawler
- Leverages link preview functionality
- Maintains backward compatibility
### 8.3 Persistence
Knowledge base serialization for:
- Resumable crawls
- Knowledge sharing
- Offline analysis
## 9. Future Directions
### 9.1 Advanced Scoring
- Temporal information value
- Multi-query optimization
- Active learning from user feedback
### 9.2 Distributed Crawling
- Collaborative knowledge building
- Federated information sufficiency
### 9.3 Domain Adaptation
- Transfer learning across domains
- Meta-learning for threshold selection
## 10. Conclusion
Progressive crawling with adaptive information foraging provides a principled approach to efficient web information extraction. By combining coverage, consistency, and saturation metrics, we can determine information sufficiency without ground truth labels. The multi-strategy architecture allows graceful enhancement from pure statistical to LLM-assisted approaches based on requirements and resources.
## References
1. Manning, C. D., Raghavan, P., & Schütze, H. (2008). Introduction to Information Retrieval. Cambridge University Press.
2. Robertson, S., & Zaragoza, H. (2009). The Probabilistic Relevance Framework: BM25 and Beyond. Foundations and Trends in Information Retrieval.
3. Pirolli, P., & Card, S. (1999). Information Foraging. Psychological Review, 106(4), 643-675.
4. Dasgupta, S. (2005). Analysis of a greedy active learning strategy. Advances in Neural Information Processing Systems.
## Appendix A: Implementation Pseudocode
```python
class StatisticalStrategy:
def calculate_confidence(self, state):
coverage = self.calculate_coverage(state)
consistency = self.calculate_consistency(state)
saturation = self.calculate_saturation(state)
return min(coverage, consistency, saturation)
def calculate_coverage(self, state):
# BM25-based term coverage
term_scores = []
for term in state.query.split():
df = state.document_frequencies.get(term, 0)
idf = self.idf_cache.get(term, 1.0)
term_scores.append(log(df + 1) * idf)
return mean(term_scores) / max_possible_score
def rank_links(self, state):
scored_links = []
for link in state.pending_links:
relevance = self.bm25_score(link.preview_text, state.query)
novelty = self.calculate_novelty(link, state.knowledge_base)
authority = self.url_authority(link.href)
score = relevance * novelty * authority
scored_links.append((link, score))
return sorted(scored_links, key=lambda x: x[1], reverse=True)
```
## Appendix B: Evaluation Protocol
1. **Dataset Creation**:
- Select diverse domains (documentation, blogs, e-commerce)
- Generate 100 queries per domain using LLM
- Create query variations (5-10 per query)
2. **Baseline Comparisons**:
- BFS crawler (depth-limited)
- DFS crawler (depth-limited)
- Random crawler
- Oracle (knows relevant pages)
3. **Metrics Collection**:
- Pages crawled vs query answerability
- Time to sufficient confidence
- False positive/negative rates
4. **Statistical Analysis**:
- ANOVA for strategy comparison
- Regression for parameter sensitivity
- Bootstrap for confidence intervals

View File

@@ -1,809 +0,0 @@
# 🚀🤖 Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper.
<div align="center">
<a href="https://trendshift.io/repositories/11716" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11716" alt="unclecode%2Fcrawl4ai | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
[![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers)
[![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members)
[![PyPI version](https://badge.fury.io/py/crawl4ai.svg)](https://badge.fury.io/py/crawl4ai)
[![Python Version](https://img.shields.io/pypi/pyversions/crawl4ai)](https://pypi.org/project/crawl4ai/)
[![Downloads](https://static.pepy.tech/badge/crawl4ai/month)](https://pepy.tech/project/crawl4ai)
[![GitHub Sponsors](https://img.shields.io/github/sponsors/unclecode?style=flat&logo=GitHub-Sponsors&label=Sponsors&color=pink)](https://github.com/sponsors/unclecode)
<p align="center">
<a href="https://x.com/crawl4ai">
<img src="https://img.shields.io/badge/Follow%20on%20X-000000?style=for-the-badge&logo=x&logoColor=white" alt="Follow on X" />
</a>
<a href="https://www.linkedin.com/company/crawl4ai">
<img src="https://img.shields.io/badge/Follow%20on%20LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white" alt="Follow on LinkedIn" />
</a>
<a href="https://discord.gg/jP8KfhDhyN">
<img src="https://img.shields.io/badge/Join%20our%20Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join our Discord" />
</a>
</p>
</div>
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
[✨ Check out latest update v0.7.0](#-recent-updates)
🎉 **Version 0.7.0 is now available!** The Adaptive Intelligence Update introduces groundbreaking features: Adaptive Crawling that learns website patterns, Virtual Scroll support for infinite pages, intelligent Link Preview with 3-layer scoring, Async URL Seeder for massive discovery, and significant performance improvements. [Read the release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.0.md)
<details>
<summary>🤓 <strong>My Personal Story</strong></summary>
My journey with computers started in childhood when my dad, a computer scientist, introduced me to an Amstrad computer. Those early days sparked a fascination with technology, leading me to pursue computer science and specialize in NLP during my postgraduate studies. It was during this time that I first delved into web crawling, building tools to help researchers organize papers and extract information from publications a challenging yet rewarding experience that honed my skills in data extraction.
Fast forward to 2023, I was working on a tool for a project and needed a crawler to convert a webpage into markdown. While exploring solutions, I found one that claimed to be open-source but required creating an account and generating an API token. Worse, it turned out to be a SaaS model charging $16, and its quality didnt meet my standards. Frustrated, I realized this was a deeper problem. That frustration turned into turbo anger mode, and I decided to build my own solution. In just a few days, I created Crawl4AI. To my surprise, it went viral, earning thousands of GitHub stars and resonating with a global community.
I made Crawl4AI open-source for two reasons. First, its my way of giving back to the open-source community that has supported me throughout my career. Second, I believe data should be accessible to everyone, not locked behind paywalls or monopolized by a few. Open access to data lays the foundation for the democratization of AI, a vision where individuals can train their own models and take ownership of their information. This library is the first step in a larger journey to create the best open-source data extraction and generation tool the world has ever seen, built collaboratively by a passionate community.
Thank you to everyone who has supported this project, used it, and shared feedback. Your encouragement motivates me to dream even bigger. Join us, file issues, submit PRs, or spread the word. Together, we can build a tool that truly empowers people to access their own data and reshape the future of AI.
</details>
## 🧐 Why Crawl4AI?
1. **Built for LLMs**: Creates smart, concise Markdown optimized for RAG and fine-tuning applications.
2. **Lightning Fast**: Delivers results faster with real-time, cost-efficient performance.
3. **Flexible Browser Control**: Offers session management, proxies, and custom hooks for seamless data access.
4. **Heuristic Intelligence**: Uses advanced algorithms for efficient extraction, reducing reliance on costly models.
5. **Open Source & Deployable**: Fully open-source with no API keys—ready for Docker and cloud integration.
6. **Thriving Community**: Actively maintained by a vibrant community and the #1 trending GitHub repository.
## 🚀 Quick Start
1. Install Crawl4AI:
```bash
# Install the package
pip install -U crawl4ai
# For pre release versions
pip install crawl4ai --pre
# Run post-installation setup
crawl4ai-setup
# Verify your installation
crawl4ai-doctor
```
If you encounter any browser-related issues, you can install them manually:
```bash
python -m playwright install --with-deps chromium
```
2. Run a simple web crawl with Python:
```python
import asyncio
from crawl4ai import *
async def main():
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url="https://www.nbcnews.com/business",
)
print(result.markdown)
if __name__ == "__main__":
asyncio.run(main())
```
3. Or use the new command-line interface:
```bash
# Basic crawl with markdown output
crwl https://www.nbcnews.com/business -o markdown
# Deep crawl with BFS strategy, max 10 pages
crwl https://docs.crawl4ai.com --deep-crawl bfs --max-pages 10
# Use LLM extraction with a specific question
crwl https://www.example.com/products -q "Extract all product prices"
```
## ✨ Features
<details>
<summary>📝 <strong>Markdown Generation</strong></summary>
- 🧹 **Clean Markdown**: Generates clean, structured Markdown with accurate formatting.
- 🎯 **Fit Markdown**: Heuristic-based filtering to remove noise and irrelevant parts for AI-friendly processing.
- 🔗 **Citations and References**: Converts page links into a numbered reference list with clean citations.
- 🛠️ **Custom Strategies**: Users can create their own Markdown generation strategies tailored to specific needs.
- 📚 **BM25 Algorithm**: Employs BM25-based filtering for extracting core information and removing irrelevant content.
</details>
<details>
<summary>📊 <strong>Structured Data Extraction</strong></summary>
- 🤖 **LLM-Driven Extraction**: Supports all LLMs (open-source and proprietary) for structured data extraction.
- 🧱 **Chunking Strategies**: Implements chunking (topic-based, regex, sentence-level) for targeted content processing.
- 🌌 **Cosine Similarity**: Find relevant content chunks based on user queries for semantic extraction.
- 🔎 **CSS-Based Extraction**: Fast schema-based data extraction using XPath and CSS selectors.
- 🔧 **Schema Definition**: Define custom schemas for extracting structured JSON from repetitive patterns.
</details>
<details>
<summary>🌐 <strong>Browser Integration</strong></summary>
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
- 👤 **Browser Profiler**: Create and manage persistent profiles with saved authentication states, cookies, and settings.
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
- 🌍 **Multi-Browser Support**: Compatible with Chromium, Firefox, and WebKit.
- 📐 **Dynamic Viewport Adjustment**: Automatically adjusts the browser viewport to match page content, ensuring complete rendering and capturing of all elements.
</details>
<details>
<summary>🔎 <strong>Crawling & Scraping</strong></summary>
- 🖼️ **Media Support**: Extract images, audio, videos, and responsive image formats like `srcset` and `picture`.
- 🚀 **Dynamic Crawling**: Execute JS and wait for async or sync for dynamic content extraction.
- 📸 **Screenshots**: Capture page screenshots during crawling for debugging or analysis.
- 📂 **Raw Data Crawling**: Directly process raw HTML (`raw:`) or local files (`file://`).
- 🔗 **Comprehensive Link Extraction**: Extracts internal, external links, and embedded iframe content.
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior.
- 💾 **Caching**: Cache data for improved speed and to avoid redundant fetches.
- 📄 **Metadata Extraction**: Retrieve structured metadata from web pages.
- 📡 **IFrame Content Extraction**: Seamless extraction from embedded iframe content.
- 🕵️ **Lazy Load Handling**: Waits for images to fully load, ensuring no content is missed due to lazy loading.
- 🔄 **Full-Page Scanning**: Simulates scrolling to load and capture all dynamic content, perfect for infinite scroll pages.
</details>
<details>
<summary>🚀 <strong>Deployment</strong></summary>
- 🐳 **Dockerized Setup**: Optimized Docker image with FastAPI server for easy deployment.
- 🔑 **Secure Authentication**: Built-in JWT token authentication for API security.
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
- ☁️ **Cloud Deployment**: Ready-to-deploy configurations for major cloud platforms.
</details>
<details>
<summary>🎯 <strong>Additional Features</strong></summary>
- 🕶️ **Stealth Mode**: Avoid bot detection by mimicking real users.
- 🏷️ **Tag-Based Content Extraction**: Refine crawling based on custom tags, headers, or metadata.
- 🔗 **Link Analysis**: Extract and analyze all links for detailed data exploration.
- 🛡️ **Error Handling**: Robust error management for seamless execution.
- 🔐 **CORS & Static Serving**: Supports filesystem-based caching and cross-origin requests.
- 📖 **Clear Documentation**: Simplified and updated guides for onboarding and advanced usage.
- 🙌 **Community Recognition**: Acknowledges contributors and pull requests for transparency.
</details>
## Try it Now!
✨ Play around with this [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1SgRPrByQLzjRfwoRNq1wSGE9nYY_EE8C?usp=sharing)
✨ Visit our [Documentation Website](https://docs.crawl4ai.com/)
## Installation 🛠️
Crawl4AI offers flexible installation options to suit various use cases. You can install it as a Python package or use Docker.
<details>
<summary>🐍 <strong>Using pip</strong></summary>
Choose the installation option that best fits your needs:
### Basic Installation
For basic web crawling and scraping tasks:
```bash
pip install crawl4ai
crawl4ai-setup # Setup the browser
```
By default, this will install the asynchronous version of Crawl4AI, using Playwright for web crawling.
👉 **Note**: When you install Crawl4AI, the `crawl4ai-setup` should automatically install and set up Playwright. However, if you encounter any Playwright-related errors, you can manually install it using one of these methods:
1. Through the command line:
```bash
playwright install
```
2. If the above doesn't work, try this more specific command:
```bash
python -m playwright install chromium
```
This second method has proven to be more reliable in some cases.
---
### Installation with Synchronous Version
The sync version is deprecated and will be removed in future versions. If you need the synchronous version using Selenium:
```bash
pip install crawl4ai[sync]
```
---
### Development Installation
For contributors who plan to modify the source code:
```bash
git clone https://github.com/unclecode/crawl4ai.git
cd crawl4ai
pip install -e . # Basic installation in editable mode
```
Install optional features:
```bash
pip install -e ".[torch]" # With PyTorch features
pip install -e ".[transformer]" # With Transformer features
pip install -e ".[cosine]" # With cosine similarity features
pip install -e ".[sync]" # With synchronous crawling (Selenium)
pip install -e ".[all]" # Install all optional features
```
</details>
<details>
<summary>🐳 <strong>Docker Deployment</strong></summary>
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
### New Docker Features
The new Docker implementation includes:
- **Browser pooling** with page pre-warming for faster response times
- **Interactive playground** to test and generate request code
- **MCP integration** for direct connection to AI tools like Claude Code
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
- **Optimized resources** with improved memory management
### Getting Started
```bash
# Pull and run the latest release candidate
docker pull unclecode/crawl4ai:0.7.0
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:0.7.0
# Visit the playground at http://localhost:11235/playground
```
For complete documentation, see our [Docker Deployment Guide](https://docs.crawl4ai.com/core/docker-deployment/).
</details>
---
### Quick Test
Run a quick test (works for both Docker options):
```python
import requests
# Submit a crawl job
response = requests.post(
"http://localhost:11235/crawl",
json={"urls": ["https://example.com"], "priority": 10}
)
if response.status_code == 200:
print("Crawl job submitted successfully.")
if "results" in response.json():
results = response.json()["results"]
print("Crawl job completed. Results:")
for result in results:
print(result)
else:
task_id = response.json()["task_id"]
print(f"Crawl job submitted. Task ID:: {task_id}")
result = requests.get(f"http://localhost:11235/task/{task_id}")
```
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
</details>
## 🔬 Advanced Usage Examples 🔬
You can check the project structure in the directory [docs/examples](https://github.com/unclecode/crawl4ai/tree/main/docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
<details>
<summary>📝 <strong>Heuristic Markdown Generation with Clean and Fit Markdown</strong></summary>
```python
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.content_filter_strategy import PruningContentFilter, BM25ContentFilter
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
async def main():
browser_config = BrowserConfig(
headless=True,
verbose=True,
)
run_config = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED,
markdown_generator=DefaultMarkdownGenerator(
content_filter=PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
),
# markdown_generator=DefaultMarkdownGenerator(
# content_filter=BM25ContentFilter(user_query="WHEN_WE_FOCUS_BASED_ON_A_USER_QUERY", bm25_threshold=1.0)
# ),
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url="https://docs.micronaut.io/4.7.6/guide/",
config=run_config
)
print(len(result.markdown.raw_markdown))
print(len(result.markdown.fit_markdown))
if __name__ == "__main__":
asyncio.run(main())
```
</details>
<details>
<summary>🖥️ <strong>Executing JavaScript & Extract Structured Data without LLMs</strong></summary>
```python
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai import JsonCssExtractionStrategy
import json
async def main():
schema = {
"name": "KidoCode Courses",
"baseSelector": "section.charge-methodology .w-tab-content > div",
"fields": [
{
"name": "section_title",
"selector": "h3.heading-50",
"type": "text",
},
{
"name": "section_description",
"selector": ".charge-content",
"type": "text",
},
{
"name": "course_name",
"selector": ".text-block-93",
"type": "text",
},
{
"name": "course_description",
"selector": ".course-content-text",
"type": "text",
},
{
"name": "course_icon",
"selector": ".image-92",
"type": "attribute",
"attribute": "src"
}
}
}
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
browser_config = BrowserConfig(
headless=False,
verbose=True
)
run_config = CrawlerRunConfig(
extraction_strategy=extraction_strategy,
js_code=["""(async () => {const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");for(let tab of tabs) {tab.scrollIntoView();tab.click();await new Promise(r => setTimeout(r, 500));}})();"""],
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url="https://www.kidocode.com/degrees/technology",
config=run_config
)
companies = json.loads(result.extracted_content)
print(f"Successfully extracted {len(companies)} companies")
print(json.dumps(companies[0], indent=2))
if __name__ == "__main__":
asyncio.run(main())
```
</details>
<details>
<summary>📚 <strong>Extracting Structured Data with LLMs</strong></summary>
```python
import os
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
from crawl4ai import LLMExtractionStrategy
from pydantic import BaseModel, Field
class OpenAIModelFee(BaseModel):
model_name: str = Field(..., description="Name of the OpenAI model.")
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
output_fee: str = Field(..., description="Fee for output token for the OpenAI model.")
async def main():
browser_config = BrowserConfig(verbose=True)
run_config = CrawlerRunConfig(
word_count_threshold=1,
extraction_strategy=LLMExtractionStrategy(
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
# provider="ollama/qwen2", api_token="no-token",
llm_config = LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
schema=OpenAIModelFee.schema(),
extraction_type="schema",
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
Do not miss any models in the entire content. One extracted model JSON format should look like this:
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
),
cache_mode=CacheMode.BYPASS,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url='https://openai.com/api/pricing/',
config=run_config
)
print(result.extracted_content)
if __name__ == "__main__":
asyncio.run(main())
```
</details>
<details>
<summary>🤖 <strong>Using Your own Browser with Custom User Profile</strong></summary>
```python
import os, sys
from pathlib import Path
import asyncio, time
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
async def test_news_crawl():
# Create a persistent user data directory
user_data_dir = os.path.join(Path.home(), ".crawl4ai", "browser_profile")
os.makedirs(user_data_dir, exist_ok=True)
browser_config = BrowserConfig(
verbose=True,
headless=True,
user_data_dir=user_data_dir,
use_persistent_context=True,
)
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler(config=browser_config) as crawler:
url = "ADDRESS_OF_A_CHALLENGING_WEBSITE"
result = await crawler.arun(
url,
config=run_config,
magic=True,
)
print(f"Successfully crawled {url}")
print(f"Content length: {len(result.markdown)}")
```
</details>
## ✨ Recent Updates
### Version 0.7.0 Release Highlights - The Adaptive Intelligence Update
- **🧠 Adaptive Crawling**: Your crawler now learns and adapts to website patterns automatically:
```python
config = AdaptiveConfig(
confidence_threshold=0.7, # Min confidence to stop crawling
max_depth=5, # Maximum crawl depth
max_pages=20, # Maximum number of pages to crawl
strategy="statistical"
)
async with AsyncWebCrawler() as crawler:
adaptive_crawler = AdaptiveCrawler(crawler, config)
state = await adaptive_crawler.digest(
start_url="https://news.example.com",
query="latest news content"
)
# Crawler learns patterns and improves extraction over time
```
- **🌊 Virtual Scroll Support**: Complete content extraction from infinite scroll pages:
```python
scroll_config = VirtualScrollConfig(
container_selector="[data-testid='feed']",
scroll_count=20,
scroll_by="container_height",
wait_after_scroll=1.0
)
result = await crawler.arun(url, config=CrawlerRunConfig(
virtual_scroll_config=scroll_config
))
```
- **🔗 Intelligent Link Analysis**: 3-layer scoring system for smart link prioritization:
```python
link_config = LinkPreviewConfig(
query="machine learning tutorials",
score_threshold=0.3,
concurrent_requests=10
)
result = await crawler.arun(url, config=CrawlerRunConfig(
link_preview_config=link_config,
score_links=True
))
# Links ranked by relevance and quality
```
- **🎣 Async URL Seeder**: Discover thousands of URLs in seconds:
```python
seeder = AsyncUrlSeeder(SeedingConfig(
source="sitemap+cc",
pattern="*/blog/*",
query="python tutorials",
score_threshold=0.4
))
urls = await seeder.discover("https://example.com")
```
- **⚡ Performance Boost**: Up to 3x faster with optimized resource handling and memory efficiency
Read the full details in our [0.7.0 Release Notes](https://docs.crawl4ai.com/blog/release-v0.7.0) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
## Version Numbering in Crawl4AI
Crawl4AI follows standard Python version numbering conventions (PEP 440) to help users understand the stability and features of each release.
### Version Numbers Explained
Our version numbers follow this pattern: `MAJOR.MINOR.PATCH` (e.g., 0.4.3)
#### Pre-release Versions
We use different suffixes to indicate development stages:
- `dev` (0.4.3dev1): Development versions, unstable
- `a` (0.4.3a1): Alpha releases, experimental features
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
- `rc` (0.4.3): Release candidates, potential final version
#### Installation
- Regular installation (stable version):
```bash
pip install -U crawl4ai
```
- Install pre-release versions:
```bash
pip install crawl4ai --pre
```
- Install specific version:
```bash
pip install crawl4ai==0.4.3b1
```
#### Why Pre-releases?
We use pre-releases to:
- Test new features in real-world scenarios
- Gather feedback before final releases
- Ensure stability for production users
- Allow early adopters to try new features
For production environments, we recommend using the stable version. For testing new features, you can opt-in to pre-releases using the `--pre` flag.
## 📖 Documentation & Roadmap
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://docs.crawl4ai.com/).
To check our development plans and upcoming features, visit our [Roadmap](https://github.com/unclecode/crawl4ai/blob/main/ROADMAP.md).
<details>
<summary>📈 <strong>Development TODOs</strong></summary>
- [x] 0. Graph Crawler: Smart website traversal using graph search algorithms for comprehensive nested page extraction
- [ ] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
- [ ] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
- [ ] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
- [ ] 4. Automated Schema Generator: Convert natural language to extraction schemas
- [ ] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
- [ ] 6. Web Embedding Index: Semantic search infrastructure for crawled content
- [ ] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
- [ ] 8. Performance Monitor: Real-time insights into crawler operations
- [ ] 9. Cloud Integration: One-click deployment solutions across cloud providers
- [ ] 10. Sponsorship Program: Structured support system with tiered benefits
- [ ] 11. Educational Content: "How to Crawl" video series and interactive tutorials
</details>
## 🤝 Contributing
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTORS.md) for more information.
I'll help modify the license section with badges. For the halftone effect, here's a version with it:
Here's the updated license section:
## 📄 License & Attribution
This project is licensed under the Apache License 2.0, attribution is recommended via the badges below. See the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) file for details.
### Attribution Requirements
When using Crawl4AI, you must include one of the following attribution methods:
#### 1. Badge Attribution (Recommended)
Add one of these badges to your README, documentation, or website:
| Theme | Badge |
|-------|-------|
| **Disco Theme (Animated)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Night Theme (Dark with Neon)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Dark Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Light Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/></a> |
HTML code for adding the badges:
```html
<!-- Disco Theme (Animated) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Night Theme (Dark with Neon) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Dark Theme (Classic) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Light Theme (Classic) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Simple Shield Badge -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://img.shields.io/badge/Powered%20by-Crawl4AI-blue?style=flat-square" alt="Powered by Crawl4AI"/>
</a>
```
#### 2. Text Attribution
Add this line to your documentation:
```
This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
```
## 📚 Citation
If you use Crawl4AI in your research or project, please cite:
```bibtex
@software{crawl4ai2024,
author = {UncleCode},
title = {Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper},
year = {2024},
publisher = {GitHub},
journal = {GitHub Repository},
howpublished = {\url{https://github.com/unclecode/crawl4ai}},
commit = {Please use the commit hash you're working with}
}
```
Text citation format:
```
UncleCode. (2024). Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper [Computer software].
GitHub. https://github.com/unclecode/crawl4ai
```
## 📧 Contact
For questions, suggestions, or feedback, feel free to reach out:
- GitHub: [unclecode](https://github.com/unclecode)
- Twitter: [@unclecode](https://twitter.com/unclecode)
- Website: [crawl4ai.com](https://crawl4ai.com)
Happy Crawling! 🕸️🚀
## 💖 Support Crawl4AI
> 🎉 **Sponsorship Program Just Launched!** Be among the first 50 **Founding Sponsors** and get permanent recognition in our Hall of Fame!
Crawl4AI is the #1 trending open-source web crawler with 51K+ stars. Your support ensures we stay independent, innovative, and free forever.
<div align="center">
[![Become a Sponsor](https://img.shields.io/badge/Become%20a%20Sponsor-pink?style=for-the-badge&logo=github-sponsors&logoColor=white)](https://github.com/sponsors/unclecode)
[![Current Sponsors](https://img.shields.io/github/sponsors/unclecode?style=for-the-badge&logo=github&label=Current%20Sponsors&color=green)](https://github.com/sponsors/unclecode)
</div>
### 🤝 Sponsorship Tiers
- **🌱 Believer ($5/mo)**: Join the movement for data democratization
- **🚀 Builder ($50/mo)**: Get priority support and early feature access
- **💼 Growing Team ($500/mo)**: Bi-weekly syncs and optimization help
- **🏢 Data Infrastructure Partner ($2000/mo)**: Full partnership with dedicated support
**Why sponsor?** Every tier includes real benefits. No more rate-limited APIs. Own your data pipeline. Build data sovereignty together.
[View All Tiers & Benefits →](https://github.com/sponsors/unclecode)
### 🏆 Our Sponsors
#### 👑 Founding Sponsors (First 50)
*Be part of history - [Become a Founding Sponsor](https://github.com/sponsors/unclecode)*
<!-- Founding sponsors will be permanently recognized here -->
#### Current Sponsors
Thank you to all our sponsors who make this project possible!
<!-- Sponsors will be automatically added here -->
## 🗾 Mission
Our mission is to unlock the value of personal and enterprise data by transforming digital footprints into structured, tradeable assets. Crawl4AI empowers individuals and organizations with open-source tools to extract and structure data, fostering a shared data economy.
We envision a future where AI is powered by real human knowledge, ensuring data creators directly benefit from their contributions. By democratizing data and enabling ethical sharing, we are laying the foundation for authentic AI advancement.
<details>
<summary>🔑 <strong>Key Opportunities</strong></summary>
- **Data Capitalization**: Transform digital footprints into measurable, valuable assets.
- **Authentic AI Data**: Provide AI systems with real human insights.
- **Shared Economy**: Create a fair data marketplace that benefits data creators.
</details>
<details>
<summary>🚀 <strong>Development Pathway</strong></summary>
1. **Open-Source Tools**: Community-driven platforms for transparent data extraction.
2. **Digital Asset Structuring**: Tools to organize and value digital knowledge.
3. **Ethical Data Marketplace**: A secure, fair platform for exchanging structured data.
For more details, see our [full mission statement](./MISSION.md).
</details>
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=unclecode/crawl4ai&type=Date)](https://star-history.com/#unclecode/crawl4ai&Date)

1070
README.md

File diff suppressed because it is too large Load Diff

244
README.sync.md Normal file
View File

@@ -0,0 +1,244 @@
# Crawl4AI v0.2.77 🕷️🤖
[![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers)
[![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members)
[![GitHub Issues](https://img.shields.io/github/issues/unclecode/crawl4ai)](https://github.com/unclecode/crawl4ai/issues)
[![GitHub Pull Requests](https://img.shields.io/github/issues-pr/unclecode/crawl4ai)](https://github.com/unclecode/crawl4ai/pulls)
[![License](https://img.shields.io/github/license/unclecode/crawl4ai)](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
Crawl4AI simplifies web crawling and data extraction, making it accessible for large language models (LLMs) and AI applications. 🆓🌐
#### [v0.2.77] - 2024-08-02
Major improvements in functionality, performance, and cross-platform compatibility! 🚀
- 🐳 **Docker enhancements**:
- Significantly improved Dockerfile for easy installation on Linux, Mac, and Windows.
- 🌐 **Official Docker Hub image**:
- Launched our first official image on Docker Hub for streamlined deployment (unclecode/crawl4ai).
- 🔧 **Selenium upgrade**:
- Removed dependency on ChromeDriver, now using Selenium's built-in capabilities for better compatibility.
- 🖼️ **Image description**:
- Implemented ability to generate textual descriptions for extracted images from web pages.
-**Performance boost**:
- Various improvements to enhance overall speed and performance.
## Try it Now!
✨ Play around with this [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1sJPAmeLj5PMrg2VgOwMJ2ubGIcK0cJeX?usp=sharing)
✨ visit our [Documentation Website](https://crawl4ai.com/mkdocs/)
✨ Check [Demo](https://crawl4ai.com/mkdocs/demo)
## Features ✨
- 🆓 Completely free and open-source
- 🤖 LLM-friendly output formats (JSON, cleaned HTML, markdown)
- 🌍 Supports crawling multiple URLs simultaneously
- 🎨 Extracts and returns all media tags (Images, Audio, and Video)
- 🔗 Extracts all external and internal links
- 📚 Extracts metadata from the page
- 🔄 Custom hooks for authentication, headers, and page modifications before crawling
- 🕵️ User-agent customization
- 🖼️ Takes screenshots of the page
- 📜 Executes multiple custom JavaScripts before crawling
- 📚 Various chunking strategies: topic-based, regex, sentence, and more
- 🧠 Advanced extraction strategies: cosine clustering, LLM, and more
- 🎯 CSS selector support
- 📝 Passes instructions/keywords to refine extraction
# Crawl4AI
## 🌟 Shoutout to Contributors of v0.2.77!
A big thank you to the amazing contributors who've made this release possible:
- [@aravindkarnam](https://github.com/aravindkarnam) for the new image description feature
- [@FractalMind](https://github.com/FractalMind) for our official Docker Hub image
- [@ketonkss4](https://github.com/ketonkss4) for helping streamline our Selenium setup
Your contributions are driving Crawl4AI forward! 🚀
## Cool Examples 🚀
### Quick Start
```python
from crawl4ai import WebCrawler
# Create an instance of WebCrawler
crawler = WebCrawler()
# Warm up the crawler (load necessary models)
crawler.warmup()
# Run the crawler on a URL
result = crawler.run(url="https://www.nbcnews.com/business")
# Print the extracted content
print(result.markdown)
```
## How to install 🛠
### Using pip 🐍
```bash
virtualenv venv
source venv/bin/activate
pip install "crawl4ai @ git+https://github.com/unclecode/crawl4ai.git"
```
### Using Docker 🐳
```bash
# For Mac users (M1/M2)
# docker build --platform linux/amd64 -t crawl4ai .
docker build -t crawl4ai .
docker run -d -p 8000:80 crawl4ai
```
### Using Docker Hub 🐳
```bash
docker pull unclecode/crawl4ai:latest
docker run -d -p 8000:80 unclecode/crawl4ai:latest
```
## Speed-First Design 🚀
Perhaps the most important design principle for this library is speed. We need to ensure it can handle many links and resources in parallel as quickly as possible. By combining this speed with fast LLMs like Groq, the results will be truly amazing.
```python
import time
from crawl4ai.web_crawler import WebCrawler
crawler = WebCrawler()
crawler.warmup()
start = time.time()
url = r"https://www.nbcnews.com/business"
result = crawler.run( url, word_count_threshold=10, bypass_cache=True)
end = time.time()
print(f"Time taken: {end - start}")
```
Let's take a look the calculated time for the above code snippet:
```bash
[LOG] 🚀 Crawling done, success: True, time taken: 1.3623387813568115 seconds
[LOG] 🚀 Content extracted, success: True, time taken: 0.05715131759643555 seconds
[LOG] 🚀 Extraction, time taken: 0.05750393867492676 seconds.
Time taken: 1.439958095550537
```
Fetching the content from the page took 1.3623 seconds, and extracting the content took 0.0575 seconds. 🚀
### Extract Structured Data from Web Pages 📊
Crawl all OpenAI models and their fees from the official page.
```python
import os
from crawl4ai import WebCrawler
from crawl4ai.extraction_strategy import LLMExtractionStrategy
from pydantic import BaseModel, Field
class OpenAIModelFee(BaseModel):
model_name: str = Field(..., description="Name of the OpenAI model.")
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
output_fee: str = Field(..., description="Fee for output token ßfor the OpenAI model.")
url = 'https://openai.com/api/pricing/'
crawler = WebCrawler()
crawler.warmup()
result = crawler.run(
url=url,
word_count_threshold=1,
extraction_strategy= LLMExtractionStrategy(
provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'),
schema=OpenAIModelFee.schema(),
extraction_type="schema",
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
Do not miss any models in the entire content. One extracted model JSON format should look like this:
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
),
bypass_cache=True,
)
print(result.extracted_content)
```
### Execute JS, Filter Data with CSS Selector, and Clustering
```python
from crawl4ai import WebCrawler
from crawl4ai.chunking_strategy import CosineStrategy
js_code = ["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"]
crawler = WebCrawler()
crawler.warmup()
result = crawler.run(
url="https://www.nbcnews.com/business",
js=js_code,
css_selector="p",
extraction_strategy=CosineStrategy(semantic_filter="technology")
)
print(result.extracted_content)
```
### Extract Structured Data from Web Pages With Proxy and BaseUrl
```python
from crawl4ai import WebCrawler
from crawl4ai.extraction_strategy import LLMExtractionStrategy
def create_crawler():
crawler = WebCrawler(verbose=True, proxy="http://127.0.0.1:7890")
crawler.warmup()
return crawler
crawler = create_crawler()
crawler.warmup()
result = crawler.run(
url="https://www.nbcnews.com/business",
extraction_strategy=LLMExtractionStrategy(
provider="openai/gpt-4o",
api_token="sk-",
base_url="https://api.openai.com/v1"
)
)
print(result.markdown)
```
## Documentation 📚
For detailed documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://crawl4ai.com/mkdocs/).
## Contributing 🤝
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTING.md) for more information.
## License 📄
Crawl4AI is released under the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE).
## Contact 📧
For questions, suggestions, or feedback, feel free to reach out:
- GitHub: [unclecode](https://github.com/unclecode)
- Twitter: [@unclecode](https://twitter.com/unclecode)
- Website: [crawl4ai.com](https://crawl4ai.com)
Happy Crawling! 🕸️🚀
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=unclecode/crawl4ai&type=Date)](https://star-history.com/#unclecode/crawl4ai&Date)

View File

@@ -1,65 +0,0 @@
# 💖 Sponsors & Supporters
Thank you to everyone supporting Crawl4AI! Your sponsorship helps keep this project open-source and actively maintained.
## 👑 Founding Sponsors
*The first 50 sponsors who believed in our vision - permanently recognized*
<!-- Founding sponsors will be listed here with special recognition -->
🎉 **Become a Founding Sponsor!** Only [X/50] spots remaining! [Join now →](https://github.com/sponsors/unclecode)
---
## 🏢 Data Infrastructure Partners ($2000/month)
*These organizations are building their data sovereignty with Crawl4AI at the core*
<!-- Data Infrastructure Partners will be listed here -->
*Be the first Data Infrastructure Partner! [Join us →](https://github.com/sponsors/unclecode)*
---
## 💼 Growing Teams ($500/month)
*Teams scaling their data extraction with Crawl4AI*
<!-- Growing Teams will be listed here -->
*Your team could be here! [Become a sponsor →](https://github.com/sponsors/unclecode)*
---
## 🚀 Builders ($50/month)
*Developers and entrepreneurs building with Crawl4AI*
<!-- Builders will be listed here -->
*Join the builders! [Start sponsoring →](https://github.com/sponsors/unclecode)*
---
## 🌱 Believers ($5/month)
*The community supporting data democratization*
<!-- Believers will be listed here -->
*Thank you to all our community believers!*
---
## 🤝 Want to Sponsor?
Crawl4AI is the #1 trending open-source web crawler. We're building the future of data extraction - where organizations own their data pipelines instead of relying on rate-limited APIs.
### Available Sponsorship Tiers:
- **🌱 Believer** ($5/mo) - Support the movement
- **🚀 Builder** ($50/mo) - Priority support & early access
- **💼 Growing Team** ($500/mo) - Bi-weekly syncs & optimization
- **🏢 Data Infrastructure Partner** ($2000/mo) - Full partnership & dedicated support
[View all tiers and benefits →](https://github.com/sponsors/unclecode)
### Enterprise & Custom Partnerships
Building data extraction at scale? Need dedicated support or infrastructure? Let's talk about a custom partnership.
📧 Contact: [hello@crawl4ai.com](mailto:hello@crawl4ai.com) | 📅 [Schedule a call](https://calendar.app.google/rEpvi2UBgUQjWHfJ9)
---
*This list is updated regularly. Sponsors at $50+ tiers can submit their logos via [hello@crawl4ai.com](mailto:hello@crawl4ai.com)*

View File

@@ -1,24 +0,0 @@
[changelog]
# Template format
header = """
# Changelog\n
All notable changes to this project will be documented in this file.\n
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n
"""
# Organize commits by type
[git]
conventional_commits = true
filter_unconventional = true
commit_parsers = [
{ message = "^feat", group = "Added"},
{ message = "^fix", group = "Fixed"},
{ message = "^doc", group = "Documentation"},
{ message = "^perf", group = "Performance"},
{ message = "^refactor", group = "Changed"},
{ message = "^style", group = "Changed"},
{ message = "^test", group = "Testing"},
{ message = "^chore\\(release\\): prepare for", skip = true},
{ message = "^chore", group = "Miscellaneous Tasks"},
]

View File

@@ -1,226 +1,30 @@
# __init__.py
import warnings
from .async_webcrawler import AsyncWebCrawler, CacheMode
# MODIFIED: Add SeedingConfig and VirtualScrollConfig here
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig, SeedingConfig, VirtualScrollConfig, LinkPreviewConfig, MatchMode
from .content_scraping_strategy import (
ContentScrapingStrategy,
LXMLWebScrapingStrategy,
WebScrapingStrategy, # Backward compatibility alias
)
from .async_logger import (
AsyncLoggerBase,
AsyncLogger,
)
from .proxy_strategy import (
ProxyRotationStrategy,
RoundRobinProxyStrategy,
)
from .extraction_strategy import (
ExtractionStrategy,
LLMExtractionStrategy,
CosineStrategy,
JsonCssExtractionStrategy,
JsonXPathExtractionStrategy,
JsonLxmlExtractionStrategy,
RegexExtractionStrategy
)
from .chunking_strategy import ChunkingStrategy, RegexChunking
from .markdown_generation_strategy import DefaultMarkdownGenerator
from .table_extraction import (
TableExtractionStrategy,
DefaultTableExtraction,
NoTableExtraction,
LLMTableExtraction,
)
from .content_filter_strategy import (
PruningContentFilter,
BM25ContentFilter,
LLMContentFilter,
RelevantContentFilter,
)
from .models import CrawlResult, MarkdownGenerationResult, DisplayMode
from .components.crawler_monitor import CrawlerMonitor
from .link_preview import LinkPreview
from .async_dispatcher import (
MemoryAdaptiveDispatcher,
SemaphoreDispatcher,
RateLimiter,
BaseDispatcher,
)
from .docker_client import Crawl4aiDockerClient
from .hub import CrawlerHub
from .browser_profiler import BrowserProfiler
from .deep_crawling import (
DeepCrawlStrategy,
BFSDeepCrawlStrategy,
FilterChain,
URLPatternFilter,
DomainFilter,
ContentTypeFilter,
URLFilter,
FilterStats,
SEOFilter,
KeywordRelevanceScorer,
URLScorer,
CompositeScorer,
DomainAuthorityScorer,
FreshnessScorer,
PathDepthScorer,
BestFirstCrawlingStrategy,
DFSDeepCrawlStrategy,
DeepCrawlDecorator,
)
# NEW: Import AsyncUrlSeeder
from .async_url_seeder import AsyncUrlSeeder
# Adaptive Crawler
from .adaptive_crawler import (
AdaptiveCrawler,
AdaptiveConfig,
CrawlState,
CrawlStrategy,
StatisticalStrategy
)
# C4A Script Language Support
from .script import (
compile as c4a_compile,
validate as c4a_validate,
compile_file as c4a_compile_file,
CompilationResult,
ValidationResult,
ErrorDetail
)
# Browser Adapters
from .browser_adapter import (
BrowserAdapter,
PlaywrightAdapter,
UndetectedAdapter
)
from .utils import (
start_colab_display_server,
setup_colab_environment
)
from .async_webcrawler import AsyncWebCrawler
from .models import CrawlResult
from ._version import __version__
# __version__ = "0.3.73"
__all__ = [
"AsyncLoggerBase",
"AsyncLogger",
"AsyncWebCrawler",
"BrowserProfiler",
"LLMConfig",
"GeolocationConfig",
# NEW: Add SeedingConfig and VirtualScrollConfig
"SeedingConfig",
"VirtualScrollConfig",
# NEW: Add AsyncUrlSeeder
"AsyncUrlSeeder",
# Adaptive Crawler
"AdaptiveCrawler",
"AdaptiveConfig",
"CrawlState",
"CrawlStrategy",
"StatisticalStrategy",
"DeepCrawlStrategy",
"BFSDeepCrawlStrategy",
"BestFirstCrawlingStrategy",
"DFSDeepCrawlStrategy",
"FilterChain",
"URLPatternFilter",
"ContentTypeFilter",
"DomainFilter",
"FilterStats",
"URLFilter",
"SEOFilter",
"KeywordRelevanceScorer",
"URLScorer",
"CompositeScorer",
"DomainAuthorityScorer",
"FreshnessScorer",
"PathDepthScorer",
"DeepCrawlDecorator",
"CrawlResult",
"CrawlerHub",
"CacheMode",
"MatchMode",
"ContentScrapingStrategy",
"WebScrapingStrategy",
"LXMLWebScrapingStrategy",
"BrowserConfig",
"CrawlerRunConfig",
"HTTPCrawlerConfig",
"ExtractionStrategy",
"LLMExtractionStrategy",
"CosineStrategy",
"JsonCssExtractionStrategy",
"JsonXPathExtractionStrategy",
"JsonLxmlExtractionStrategy",
"RegexExtractionStrategy",
"ChunkingStrategy",
"RegexChunking",
"DefaultMarkdownGenerator",
"TableExtractionStrategy",
"DefaultTableExtraction",
"NoTableExtraction",
"RelevantContentFilter",
"PruningContentFilter",
"BM25ContentFilter",
"LLMContentFilter",
"BaseDispatcher",
"MemoryAdaptiveDispatcher",
"SemaphoreDispatcher",
"RateLimiter",
"CrawlerMonitor",
"LinkPreview",
"DisplayMode",
"MarkdownGenerationResult",
"Crawl4aiDockerClient",
"ProxyRotationStrategy",
"RoundRobinProxyStrategy",
"ProxyConfig",
"start_colab_display_server",
"setup_colab_environment",
# C4A Script additions
"c4a_compile",
"c4a_validate",
"c4a_compile_file",
"CompilationResult",
"ValidationResult",
"ErrorDetail",
# Browser Adapters
"BrowserAdapter",
"PlaywrightAdapter",
"UndetectedAdapter",
"LinkPreviewConfig"
]
def is_sync_version_installed():
try:
import selenium
return True
except ImportError:
return False
# def is_sync_version_installed():
# try:
# import selenium # noqa
# return True
# except ImportError:
# return False
# if is_sync_version_installed():
# try:
# from .web_crawler import WebCrawler
# __all__.append("WebCrawler")
# except ImportError:
# print(
# "Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
# )
# else:
# WebCrawler = None
# # import warnings
# # print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
# Disable all Pydantic warnings
warnings.filterwarnings("ignore", module="pydantic")
# pydantic_warnings.filter_warnings()
if is_sync_version_installed():
try:
from .web_crawler import WebCrawler
__all__.append("WebCrawler")
except ImportError:
import warnings
print("Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies.")
else:
WebCrawler = None
# import warnings
# print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")

View File

@@ -1,8 +0,0 @@
# crawl4ai/__version__.py
# This is the version that will be used for stable releases
__version__ = "0.7.4"
# For nightly builds, this gets set during build process
__nightly_version__ = None

2
crawl4ai/_version.py Normal file
View File

@@ -0,0 +1,2 @@
# crawl4ai/_version.py
__version__ = "0.3.731"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,965 @@
import asyncio
import base64
import time
from abc import ABC, abstractmethod
from typing import Callable, Dict, Any, List, Optional, Awaitable
import os, sys, shutil
import tempfile, subprocess
from playwright.async_api import async_playwright, Page, Browser, Error
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
from pathlib import Path
from playwright.async_api import ProxySettings
from pydantic import BaseModel
import hashlib
import json
import uuid
from playwright_stealth import StealthConfig, stealth_async
stealth_config = StealthConfig(
webdriver=True,
chrome_app=True,
chrome_csi=True,
chrome_load_times=True,
chrome_runtime=True,
navigator_languages=True,
navigator_plugins=True,
navigator_permissions=True,
webgl_vendor=True,
outerdimensions=True,
navigator_hardware_concurrency=True,
media_codecs=True,
)
class ManagedBrowser:
def __init__(self, browser_type: str = "chromium", user_data_dir: Optional[str] = None, headless: bool = False):
self.browser_type = browser_type
self.user_data_dir = user_data_dir
self.headless = headless
self.browser_process = None
self.temp_dir = None
self.debugging_port = 9222
async def start(self) -> str:
"""
Starts the browser process and returns the CDP endpoint URL.
If user_data_dir is not provided, creates a temporary directory.
"""
# Create temp dir if needed
if not self.user_data_dir:
self.temp_dir = tempfile.mkdtemp(prefix="browser-profile-")
self.user_data_dir = self.temp_dir
# Get browser path and args based on OS and browser type
browser_path = self._get_browser_path()
args = self._get_browser_args()
# Start browser process
try:
self.browser_process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
await asyncio.sleep(2) # Give browser time to start
return f"http://localhost:{self.debugging_port}"
except Exception as e:
await self.cleanup()
raise Exception(f"Failed to start browser: {e}")
def _get_browser_path(self) -> str:
"""Returns the browser executable path based on OS and browser type"""
if sys.platform == "darwin": # macOS
paths = {
"chromium": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
"firefox": "/Applications/Firefox.app/Contents/MacOS/firefox",
"webkit": "/Applications/Safari.app/Contents/MacOS/Safari"
}
elif sys.platform == "win32": # Windows
paths = {
"chromium": "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe",
"firefox": "C:\\Program Files\\Mozilla Firefox\\firefox.exe",
"webkit": None # WebKit not supported on Windows
}
else: # Linux
paths = {
"chromium": "google-chrome",
"firefox": "firefox",
"webkit": None # WebKit not supported on Linux
}
return paths.get(self.browser_type)
def _get_browser_args(self) -> List[str]:
"""Returns browser-specific command line arguments"""
base_args = [self._get_browser_path()]
if self.browser_type == "chromium":
args = [
f"--remote-debugging-port={self.debugging_port}",
f"--user-data-dir={self.user_data_dir}",
]
if self.headless:
args.append("--headless=new")
elif self.browser_type == "firefox":
args = [
"--remote-debugging-port", str(self.debugging_port),
"--profile", self.user_data_dir,
]
if self.headless:
args.append("--headless")
else:
raise NotImplementedError(f"Browser type {self.browser_type} not supported")
return base_args + args
async def cleanup(self):
"""Cleanup browser process and temporary directory"""
if self.browser_process:
try:
self.browser_process.terminate()
await asyncio.sleep(1)
if self.browser_process.poll() is None:
self.browser_process.kill()
except Exception as e:
print(f"Error terminating browser: {e}")
if self.temp_dir and os.path.exists(self.temp_dir):
try:
shutil.rmtree(self.temp_dir)
except Exception as e:
print(f"Error removing temporary directory: {e}")
class AsyncCrawlResponse(BaseModel):
html: str
response_headers: Dict[str, str]
status_code: int
screenshot: Optional[str] = None
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
class Config:
arbitrary_types_allowed = True
class AsyncCrawlerStrategy(ABC):
@abstractmethod
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
pass
@abstractmethod
async def crawl_many(self, urls: List[str], **kwargs) -> List[AsyncCrawlResponse]:
pass
@abstractmethod
async def take_screenshot(self, **kwargs) -> str:
pass
@abstractmethod
def update_user_agent(self, user_agent: str):
pass
@abstractmethod
def set_hook(self, hook_type: str, hook: Callable):
pass
class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
self.use_cached_html = use_cached_html
self.user_agent = kwargs.get(
"user_agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
)
self.proxy = kwargs.get("proxy")
self.proxy_config = kwargs.get("proxy_config")
self.headless = kwargs.get("headless", True)
self.browser_type = kwargs.get("browser_type", "chromium")
self.headers = kwargs.get("headers", {})
self.sessions = {}
self.session_ttl = 1800
self.js_code = js_code
self.verbose = kwargs.get("verbose", False)
self.playwright = None
self.browser = None
self.sleep_on_close = kwargs.get("sleep_on_close", False)
self.use_managed_browser = kwargs.get("use_managed_browser", False)
self.user_data_dir = kwargs.get("user_data_dir", None)
self.use_persistent_context = kwargs.get("use_persistent_context", False)
self.chrome_channel = kwargs.get("chrome_channel", "chrome")
self.managed_browser = None
self.default_context = None
self.hooks = {
'on_browser_created': None,
'on_user_agent_updated': None,
'on_execution_started': None,
'before_goto': None,
'after_goto': None,
'before_return_html': None,
'before_retrieve_html': None
}
self.extra_args = kwargs.get("extra_args", [])
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def start(self):
if self.playwright is None:
self.playwright = await async_playwright().start()
if self.browser is None:
if self.use_managed_browser:
# Use managed browser approach
self.managed_browser = ManagedBrowser(
browser_type=self.browser_type,
user_data_dir=self.user_data_dir,
headless=self.headless
)
cdp_url = await self.managed_browser.start()
self.browser = await self.playwright.chromium.connect_over_cdp(cdp_url)
# Get the default context that maintains the user profile
contexts = self.browser.contexts
if contexts:
self.default_context = contexts[0]
else:
# If no default context exists, create one
self.default_context = await self.browser.new_context(
viewport={"width": 1920, "height": 1080}
)
# Set up the default context
if self.default_context:
await self.default_context.set_extra_http_headers(self.headers)
if self.user_agent:
await self.default_context.set_extra_http_headers({
"User-Agent": self.user_agent
})
else:
browser_args = {
"headless": self.headless,
"args": [
"--disable-gpu",
"--no-sandbox",
"--disable-dev-shm-usage",
"--disable-blink-features=AutomationControlled",
"--disable-infobars",
"--window-position=0,0",
"--ignore-certificate-errors",
"--ignore-certificate-errors-spki-list",
# "--disable-http2",
# "--headless=new", # Use the new headless mode
]
}
# Add extra args if provided
if self.extra_args:
browser_args["args"].extend(self.extra_args)
# Add proxy settings if a proxy is specified
if self.proxy:
proxy_settings = ProxySettings(server=self.proxy)
browser_args["proxy"] = proxy_settings
elif self.proxy_config:
proxy_settings = ProxySettings(server=self.proxy_config.get("server"), username=self.proxy_config.get("username"), password=self.proxy_config.get("password"))
browser_args["proxy"] = proxy_settings
# Select the appropriate browser based on the browser_type
if self.browser_type == "firefox":
self.browser = await self.playwright.firefox.launch(**browser_args)
elif self.browser_type == "webkit":
self.browser = await self.playwright.webkit.launch(**browser_args)
else:
self.browser = await self.playwright.chromium.launch(**browser_args)
# Update the headless configuration
if self.headless:
# Use the new headless mode explicitly
browser_args["args"].append("--headless=new")
await self.execute_hook('on_browser_created', self.browser)
async def close(self):
if self.sleep_on_close:
await asyncio.sleep(0.5)
# Close all active sessions
session_ids = list(self.sessions.keys())
for session_id in session_ids:
await self.kill_session(session_id)
if self.browser:
await self.browser.close()
self.browser = None
if self.managed_browser:
await self.managed_browser.cleanup()
self.managed_browser = None
if self.playwright:
await self.playwright.stop()
self.playwright = None
def __del__(self):
if self.browser or self.playwright:
asyncio.get_event_loop().run_until_complete(self.close())
def set_hook(self, hook_type: str, hook: Callable):
if hook_type in self.hooks:
self.hooks[hook_type] = hook
else:
raise ValueError(f"Invalid hook type: {hook_type}")
async def execute_hook(self, hook_type: str, *args):
hook = self.hooks.get(hook_type)
if hook:
if asyncio.iscoroutinefunction(hook):
return await hook(*args)
else:
return hook(*args)
return args[0] if args else None
def update_user_agent(self, user_agent: str):
self.user_agent = user_agent
def set_custom_headers(self, headers: Dict[str, str]):
self.headers = headers
async def kill_session(self, session_id: str):
if session_id in self.sessions:
context, page, _ = self.sessions[session_id]
await page.close()
if not self.use_managed_browser:
await context.close()
del self.sessions[session_id]
def _cleanup_expired_sessions(self):
current_time = time.time()
expired_sessions = [
sid for sid, (_, _, last_used) in self.sessions.items()
if current_time - last_used > self.session_ttl
]
for sid in expired_sessions:
asyncio.create_task(self.kill_session(sid))
async def smart_wait(self, page: Page, wait_for: str, timeout: float = 30000):
wait_for = wait_for.strip()
if wait_for.startswith('js:'):
# Explicitly specified JavaScript
js_code = wait_for[3:].strip()
return await self.csp_compliant_wait(page, js_code, timeout)
elif wait_for.startswith('css:'):
# Explicitly specified CSS selector
css_selector = wait_for[4:].strip()
try:
await page.wait_for_selector(css_selector, timeout=timeout)
except Error as e:
if 'Timeout' in str(e):
raise TimeoutError(f"Timeout after {timeout}ms waiting for selector '{css_selector}'")
else:
raise ValueError(f"Invalid CSS selector: '{css_selector}'")
else:
# Auto-detect based on content
if wait_for.startswith('()') or wait_for.startswith('function'):
# It's likely a JavaScript function
return await self.csp_compliant_wait(page, wait_for, timeout)
else:
# Assume it's a CSS selector first
try:
await page.wait_for_selector(wait_for, timeout=timeout)
except Error as e:
if 'Timeout' in str(e):
raise TimeoutError(f"Timeout after {timeout}ms waiting for selector '{wait_for}'")
else:
# If it's not a timeout error, it might be an invalid selector
# Let's try to evaluate it as a JavaScript function as a fallback
try:
return await self.csp_compliant_wait(page, f"() => {{{wait_for}}}", timeout)
except Error:
raise ValueError(f"Invalid wait_for parameter: '{wait_for}'. "
"It should be either a valid CSS selector, a JavaScript function, "
"or explicitly prefixed with 'js:' or 'css:'.")
async def csp_compliant_wait(self, page: Page, user_wait_function: str, timeout: float = 30000):
wrapper_js = f"""
async () => {{
const userFunction = {user_wait_function};
const startTime = Date.now();
while (true) {{
if (await userFunction()) {{
return true;
}}
if (Date.now() - startTime > {timeout}) {{
throw new Error('Timeout waiting for condition');
}}
await new Promise(resolve => setTimeout(resolve, 100));
}}
}}
"""
try:
await page.evaluate(wrapper_js)
except TimeoutError:
raise TimeoutError(f"Timeout after {timeout}ms waiting for condition")
except Exception as e:
raise RuntimeError(f"Error in wait condition: {str(e)}")
async def process_iframes(self, page):
# Find all iframes
iframes = await page.query_selector_all('iframe')
for i, iframe in enumerate(iframes):
try:
# Add a unique identifier to the iframe
await iframe.evaluate(f'(element) => element.id = "iframe-{i}"')
# Get the frame associated with this iframe
frame = await iframe.content_frame()
if frame:
# Wait for the frame to load
await frame.wait_for_load_state('load', timeout=30000) # 30 seconds timeout
# Extract the content of the iframe's body
iframe_content = await frame.evaluate('() => document.body.innerHTML')
# Generate a unique class name for this iframe
class_name = f'extracted-iframe-content-{i}'
# Replace the iframe with a div containing the extracted content
_iframe = iframe_content.replace('`', '\\`')
await page.evaluate(f"""
() => {{
const iframe = document.getElementById('iframe-{i}');
const div = document.createElement('div');
div.innerHTML = `{_iframe}`;
div.className = '{class_name}';
iframe.replaceWith(div);
}}
""")
else:
print(f"Warning: Could not access content frame for iframe {i}")
except Exception as e:
print(f"Error processing iframe {i}: {str(e)}")
# Return the page object
return page
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
response_headers = {}
status_code = None
self._cleanup_expired_sessions()
session_id = kwargs.get("session_id")
# Handle page creation differently for managed browser
if self.use_managed_browser:
if session_id:
# Reuse existing session if available
context, page, _ = self.sessions.get(session_id, (None, None, None))
if not page:
# Create new page in default context if session doesn't exist
page = await self.default_context.new_page()
self.sessions[session_id] = (self.default_context, page, time.time())
else:
# Create new page in default context for non-session requests
page = await self.default_context.new_page()
else:
if session_id:
context, page, _ = self.sessions.get(session_id, (None, None, None))
if not context:
context = await self.browser.new_context(
user_agent=self.user_agent,
viewport={"width": 1920, "height": 1080},
proxy={"server": self.proxy} if self.proxy else None,
accept_downloads=True,
java_script_enabled=True
)
await context.add_cookies([{"name": "cookiesEnabled", "value": "true", "url": url}])
await context.set_extra_http_headers(self.headers)
page = await context.new_page()
self.sessions[session_id] = (context, page, time.time())
else:
context = await self.browser.new_context(
user_agent=self.user_agent,
viewport={"width": 1920, "height": 1080},
proxy={"server": self.proxy} if self.proxy else None
)
await context.set_extra_http_headers(self.headers)
if kwargs.get("override_navigator", False) or kwargs.get("simulate_user", False) or kwargs.get("magic", False):
# Inject scripts to override navigator properties
await context.add_init_script("""
// Pass the Permissions Test.
const originalQuery = window.navigator.permissions.query;
window.navigator.permissions.query = (parameters) => (
parameters.name === 'notifications' ?
Promise.resolve({ state: Notification.permission }) :
originalQuery(parameters)
);
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
});
window.navigator.chrome = {
runtime: {},
// Add other properties if necessary
};
Object.defineProperty(navigator, 'plugins', {
get: () => [1, 2, 3, 4, 5],
});
Object.defineProperty(navigator, 'languages', {
get: () => ['en-US', 'en'],
});
Object.defineProperty(document, 'hidden', {
get: () => false
});
Object.defineProperty(document, 'visibilityState', {
get: () => 'visible'
});
""")
page = await context.new_page()
if kwargs.get("magic", False):
await stealth_async(page, stealth_config)
# Add console message and error logging
if kwargs.get("log_console", False):
page.on("console", lambda msg: print(f"Console: {msg.text}"))
page.on("pageerror", lambda exc: print(f"Page Error: {exc}"))
try:
if self.verbose:
print(f"[LOG] 🕸️ Crawling {url} using AsyncPlaywrightCrawlerStrategy...")
if self.use_cached_html:
cache_file_path = os.path.join(
Path.home(), ".crawl4ai", "cache", hashlib.md5(url.encode()).hexdigest()
)
if os.path.exists(cache_file_path):
html = ""
with open(cache_file_path, "r") as f:
html = f.read()
# retrieve response headers and status code from cache
with open(cache_file_path + ".meta", "r") as f:
meta = json.load(f)
response_headers = meta.get("response_headers", {})
status_code = meta.get("status_code")
response = AsyncCrawlResponse(
html=html, response_headers=response_headers, status_code=status_code
)
return response
if not kwargs.get("js_only", False):
await self.execute_hook('before_goto', page)
# response = await page.goto(
# url, wait_until="domcontentloaded", timeout=kwargs.get("page_timeout", 60000)
# )
# Add retry logic for HTTP2 errors
max_retries = kwargs.get("max_retries", 3)
current_try = 0
while current_try < max_retries:
try:
response = await page.goto(
url,
# wait_until=kwargs.get("wait_until", ["domcontentloaded", "networkidle"]),
wait_until=kwargs.get("wait_until", "networkidle"),
timeout=kwargs.get("page_timeout", 60000)
)
break
except Exception as e:
current_try += 1
if "ERR_HTTP2_PROTOCOL_ERROR" in str(e):
if current_try < max_retries:
# Add exponential backoff
await asyncio.sleep(2 ** current_try)
# Try with different protocol
if 'args' not in kwargs:
kwargs['args'] = []
kwargs['args'].extend(['--disable-http2'])
continue
if current_try == max_retries:
raise
# response = await page.goto("about:blank")
# await page.evaluate(f"window.location.href = '{url}'")
await self.execute_hook('after_goto', page)
# Get status code and headers
status_code = response.status
response_headers = response.headers
else:
status_code = 200
response_headers = {}
# Replace the current wait_for_selector line with this more robust check:
try:
# First wait for body to exist, regardless of visibility
await page.wait_for_selector('body', state='attached', timeout=30000)
# Then wait for it to become visible by checking CSS
await page.wait_for_function("""
() => {
const body = document.body;
const style = window.getComputedStyle(body);
return style.display !== 'none' &&
style.visibility !== 'hidden' &&
style.opacity !== '0';
}
""", timeout=30000)
except Error as e:
# If waiting fails, let's try to diagnose the issue
visibility_info = await page.evaluate("""
() => {
const body = document.body;
const style = window.getComputedStyle(body);
return {
display: style.display,
visibility: style.visibility,
opacity: style.opacity,
hasContent: body.innerHTML.length,
classList: Array.from(body.classList)
}
}
""")
if self.verbose:
print(f"Body visibility debug info: {visibility_info}")
# Even if body is hidden, we might still want to proceed
if kwargs.get('ignore_body_visibility', True):
if self.verbose:
print("Proceeding despite hidden body...")
pass
else:
raise Error(f"Body element is hidden: {visibility_info}")
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
js_code = kwargs.get("js_code", kwargs.get("js", self.js_code))
if js_code:
if isinstance(js_code, str):
await page.evaluate(js_code)
elif isinstance(js_code, list):
for js in js_code:
await page.evaluate(js)
await page.wait_for_load_state('networkidle')
# Check for on execution event
await self.execute_hook('on_execution_started', page)
if kwargs.get("simulate_user", False) or kwargs.get("magic", False):
# Simulate user interactions
await page.mouse.move(100, 100)
await page.mouse.down()
await page.mouse.up()
await page.keyboard.press('ArrowDown')
# Handle the wait_for parameter
wait_for = kwargs.get("wait_for")
if wait_for:
try:
await self.smart_wait(page, wait_for, timeout=kwargs.get("page_timeout", 60000))
except Exception as e:
raise RuntimeError(f"Wait condition failed: {str(e)}")
# Update image dimensions
update_image_dimensions_js = """
() => {
return new Promise((resolve) => {
const filterImage = (img) => {
// Filter out images that are too small
if (img.width < 100 && img.height < 100) return false;
// Filter out images that are not visible
const rect = img.getBoundingClientRect();
if (rect.width === 0 || rect.height === 0) return false;
// Filter out images with certain class names (e.g., icons, thumbnails)
if (img.classList.contains('icon') || img.classList.contains('thumbnail')) return false;
// Filter out images with certain patterns in their src (e.g., placeholder images)
if (img.src.includes('placeholder') || img.src.includes('icon')) return false;
return true;
};
const images = Array.from(document.querySelectorAll('img')).filter(filterImage);
let imagesLeft = images.length;
if (imagesLeft === 0) {
resolve();
return;
}
const checkImage = (img) => {
if (img.complete && img.naturalWidth !== 0) {
img.setAttribute('width', img.naturalWidth);
img.setAttribute('height', img.naturalHeight);
imagesLeft--;
if (imagesLeft === 0) resolve();
}
};
images.forEach(img => {
checkImage(img);
if (!img.complete) {
img.onload = () => {
checkImage(img);
};
img.onerror = () => {
imagesLeft--;
if (imagesLeft === 0) resolve();
};
}
});
// Fallback timeout of 5 seconds
// setTimeout(() => resolve(), 5000);
resolve();
});
}
"""
await page.evaluate(update_image_dimensions_js)
# Wait a bit for any onload events to complete
await page.wait_for_timeout(100)
# Process iframes
if kwargs.get("process_iframes", False):
page = await self.process_iframes(page)
await self.execute_hook('before_retrieve_html', page)
# Check if delay_before_return_html is set then wait for that time
delay_before_return_html = kwargs.get("delay_before_return_html")
if delay_before_return_html:
await asyncio.sleep(delay_before_return_html)
# Check for remove_overlay_elements parameter
if kwargs.get("remove_overlay_elements", False):
await self.remove_overlay_elements(page)
html = await page.content()
await self.execute_hook('before_return_html', page, html)
# Check if kwargs has screenshot=True then take screenshot
screenshot_data = None
if kwargs.get("screenshot"):
# Check we have screenshot_wait_for parameter, if we have simply wait for that time
screenshot_wait_for = kwargs.get("screenshot_wait_for")
if screenshot_wait_for:
await asyncio.sleep(screenshot_wait_for)
screenshot_data = await self.take_screenshot(page)
if self.verbose:
print(f"[LOG] ✅ Crawled {url} successfully!")
if self.use_cached_html:
cache_file_path = os.path.join(
Path.home(), ".crawl4ai", "cache", hashlib.md5(url.encode()).hexdigest()
)
with open(cache_file_path, "w", encoding="utf-8") as f:
f.write(html)
# store response headers and status code in cache
with open(cache_file_path + ".meta", "w", encoding="utf-8") as f:
json.dump({
"response_headers": response_headers,
"status_code": status_code
}, f)
async def get_delayed_content(delay: float = 5.0) -> str:
if self.verbose:
print(f"[LOG] Waiting for {delay} seconds before retrieving content for {url}")
await asyncio.sleep(delay)
return await page.content()
response = AsyncCrawlResponse(
html=html,
response_headers=response_headers,
status_code=status_code,
screenshot=screenshot_data,
get_delayed_content=get_delayed_content
)
return response
except Error as e:
raise Error(f"[ERROR] 🚫 crawl(): Failed to crawl {url}: {str(e)}")
# finally:
# if not session_id:
# await page.close()
# await context.close()
async def crawl_many(self, urls: List[str], **kwargs) -> List[AsyncCrawlResponse]:
semaphore_count = kwargs.get('semaphore_count', 5) # Adjust as needed
semaphore = asyncio.Semaphore(semaphore_count)
async def crawl_with_semaphore(url):
async with semaphore:
return await self.crawl(url, **kwargs)
tasks = [crawl_with_semaphore(url) for url in urls]
results = await asyncio.gather(*tasks, return_exceptions=True)
return [result if not isinstance(result, Exception) else str(result) for result in results]
async def remove_overlay_elements(self, page: Page) -> None:
"""
Removes popup overlays, modals, cookie notices, and other intrusive elements from the page.
Args:
page (Page): The Playwright page instance
"""
remove_overlays_js = """
async () => {
// Function to check if element is visible
const isVisible = (elem) => {
const style = window.getComputedStyle(elem);
return style.display !== 'none' &&
style.visibility !== 'hidden' &&
style.opacity !== '0';
};
// Common selectors for popups and overlays
const commonSelectors = [
// Close buttons first
'button[class*="close" i]', 'button[class*="dismiss" i]',
'button[aria-label*="close" i]', 'button[title*="close" i]',
'a[class*="close" i]', 'span[class*="close" i]',
// Cookie notices
'[class*="cookie-banner" i]', '[id*="cookie-banner" i]',
'[class*="cookie-consent" i]', '[id*="cookie-consent" i]',
// Newsletter/subscription dialogs
'[class*="newsletter" i]', '[class*="subscribe" i]',
// Generic popups/modals
'[class*="popup" i]', '[class*="modal" i]',
'[class*="overlay" i]', '[class*="dialog" i]',
'[role="dialog"]', '[role="alertdialog"]'
];
// Try to click close buttons first
for (const selector of commonSelectors.slice(0, 6)) {
const closeButtons = document.querySelectorAll(selector);
for (const button of closeButtons) {
if (isVisible(button)) {
try {
button.click();
await new Promise(resolve => setTimeout(resolve, 100));
} catch (e) {
console.log('Error clicking button:', e);
}
}
}
}
// Remove remaining overlay elements
const removeOverlays = () => {
// Find elements with high z-index
const allElements = document.querySelectorAll('*');
for (const elem of allElements) {
const style = window.getComputedStyle(elem);
const zIndex = parseInt(style.zIndex);
const position = style.position;
if (
isVisible(elem) &&
(zIndex > 999 || position === 'fixed' || position === 'absolute') &&
(
elem.offsetWidth > window.innerWidth * 0.5 ||
elem.offsetHeight > window.innerHeight * 0.5 ||
style.backgroundColor.includes('rgba') ||
parseFloat(style.opacity) < 1
)
) {
elem.remove();
}
}
// Remove elements matching common selectors
for (const selector of commonSelectors) {
const elements = document.querySelectorAll(selector);
elements.forEach(elem => {
if (isVisible(elem)) {
elem.remove();
}
});
}
};
// Remove overlay elements
removeOverlays();
// Remove any fixed/sticky position elements at the top/bottom
const removeFixedElements = () => {
const elements = document.querySelectorAll('*');
elements.forEach(elem => {
const style = window.getComputedStyle(elem);
if (
(style.position === 'fixed' || style.position === 'sticky') &&
isVisible(elem)
) {
elem.remove();
}
});
};
removeFixedElements();
// Remove empty block elements as: div, p, span, etc.
const removeEmptyBlockElements = () => {
const blockElements = document.querySelectorAll('div, p, span, section, article, header, footer, aside, nav, main, ul, ol, li, dl, dt, dd, h1, h2, h3, h4, h5, h6');
blockElements.forEach(elem => {
if (elem.innerText.trim() === '') {
elem.remove();
}
});
};
// Remove margin-right and padding-right from body (often added by modal scripts)
document.body.style.marginRight = '0px';
document.body.style.paddingRight = '0px';
document.body.style.overflow = 'auto';
// Wait a bit for any animations to complete
await new Promise(resolve => setTimeout(resolve, 100));
}
"""
try:
await page.evaluate(remove_overlays_js)
await page.wait_for_timeout(500) # Wait for any animations to complete
except Exception as e:
if self.verbose:
print(f"Warning: Failed to remove overlay elements: {str(e)}")
async def take_screenshot(self, page: Page) -> str:
try:
# The page is already loaded, just take the screenshot
screenshot = await page.screenshot(full_page=True)
return base64.b64encode(screenshot).decode('utf-8')
except Exception as e:
error_message = f"Failed to take screenshot: {str(e)}"
print(error_message)
# Generate an error image
img = Image.new('RGB', (800, 600), color='black')
draw = ImageDraw.Draw(img)
font = ImageFont.load_default()
draw.text((10, 10), error_message, fill=(255, 255, 255), font=font)
buffered = BytesIO()
img.save(buffered, format="JPEG")
return base64.b64encode(buffered.getvalue()).decode('utf-8')
finally:
await page.close()

View File

@@ -2,94 +2,32 @@ import os
from pathlib import Path
import aiosqlite
import asyncio
from typing import Optional, Dict
from typing import Optional, Tuple, Dict
from contextlib import asynccontextmanager
import json
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
import aiofiles
from .async_logger import AsyncLogger
import logging
import json # Added for serialization/deserialization
from .utils import ensure_content_dirs, generate_content_hash
from .utils import VersionManager
from .utils import get_error_context, create_box_message
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
base_directory = DB_PATH = os.path.join(
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
)
DB_PATH = os.path.join(Path.home(), ".crawl4ai")
os.makedirs(DB_PATH, exist_ok=True)
DB_PATH = os.path.join(base_directory, "crawl4ai.db")
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
class AsyncDatabaseManager:
def __init__(self, pool_size: int = 10, max_retries: int = 3):
self.db_path = DB_PATH
self.content_paths = ensure_content_dirs(os.path.dirname(DB_PATH))
self.pool_size = pool_size
self.max_retries = max_retries
self.connection_pool: Dict[int, aiosqlite.Connection] = {}
self.pool_lock = asyncio.Lock()
self.init_lock = asyncio.Lock()
self.connection_semaphore = asyncio.Semaphore(pool_size)
self._initialized = False
self.version_manager = VersionManager()
self.logger = AsyncLogger(
log_file=os.path.join(base_directory, ".crawl4ai", "crawler_db.log"),
verbose=False,
tag_width=10,
)
async def initialize(self):
"""Initialize the database and connection pool"""
try:
self.logger.info("Initializing database", tag="INIT")
# Ensure the database file exists
os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
# Check if version update is needed
needs_update = self.version_manager.needs_update()
# Always ensure base table exists
await self.ainit_db()
# Verify the table exists
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
async with db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='crawled_data'"
) as cursor:
result = await cursor.fetchone()
if not result:
raise Exception("crawled_data table was not created")
# If version changed or fresh install, run updates
if needs_update:
self.logger.info("New version detected, running updates", tag="INIT")
await self.update_db_schema()
from .migrations import (
run_migration,
) # Import here to avoid circular imports
await run_migration()
self.version_manager.update_version() # Update stored version after successful migration
self.logger.success(
"Version update completed successfully", tag="COMPLETE"
)
else:
self.logger.success(
"Database initialization completed successfully", tag="COMPLETE"
)
except Exception as e:
self.logger.error(
message="Database initialization error: {error}",
tag="ERROR",
params={"error": str(e)},
)
self.logger.info(
message="Database will be initialized on first use", tag="INIT"
)
raise
await self.ainit_db()
async def cleanup(self):
"""Cleanup connections when shutting down"""
async with self.pool_lock:
@@ -99,111 +37,30 @@ class AsyncDatabaseManager:
@asynccontextmanager
async def get_connection(self):
"""Connection pool manager with enhanced error handling"""
if not self._initialized:
async with self.init_lock:
if not self._initialized:
try:
await self.initialize()
self._initialized = True
except Exception as e:
import sys
error_context = get_error_context(sys.exc_info())
self.logger.error(
message="Database initialization failed:\n{error}\n\nContext:\n{context}\n\nTraceback:\n{traceback}",
tag="ERROR",
force_verbose=True,
params={
"error": str(e),
"context": error_context["code_context"],
"traceback": error_context["full_traceback"],
},
"""Connection pool manager"""
async with self.connection_semaphore:
task_id = id(asyncio.current_task())
try:
async with self.pool_lock:
if task_id not in self.connection_pool:
conn = await aiosqlite.connect(
self.db_path,
timeout=30.0
)
raise
await self.connection_semaphore.acquire()
task_id = id(asyncio.current_task())
try:
async with self.pool_lock:
if task_id not in self.connection_pool:
try:
conn = await aiosqlite.connect(self.db_path, timeout=30.0)
await conn.execute("PRAGMA journal_mode = WAL")
await conn.execute("PRAGMA busy_timeout = 5000")
# Verify database structure
async with conn.execute(
"PRAGMA table_info(crawled_data)"
) as cursor:
columns = await cursor.fetchall()
column_names = [col[1] for col in columns]
expected_columns = {
"url",
"html",
"cleaned_html",
"markdown",
"extracted_content",
"success",
"media",
"links",
"metadata",
"screenshot",
"response_headers",
"downloaded_files",
}
missing_columns = expected_columns - set(column_names)
if missing_columns:
raise ValueError(
f"Database missing columns: {missing_columns}"
)
await conn.execute('PRAGMA journal_mode = WAL')
await conn.execute('PRAGMA busy_timeout = 5000')
self.connection_pool[task_id] = conn
except Exception as e:
import sys
error_context = get_error_context(sys.exc_info())
error_message = (
f"Unexpected error in db get_connection at line {error_context['line_no']} "
f"in {error_context['function']} ({error_context['filename']}):\n"
f"Error: {str(e)}\n\n"
f"Code context:\n{error_context['code_context']}"
)
self.logger.error(
message="{error}",
tag="ERROR",
params={"error": str(error_message)},
boxes=["error"],
)
raise
yield self.connection_pool[task_id]
except Exception as e:
import sys
error_context = get_error_context(sys.exc_info())
error_message = (
f"Unexpected error in db get_connection at line {error_context['line_no']} "
f"in {error_context['function']} ({error_context['filename']}):\n"
f"Error: {str(e)}\n\n"
f"Code context:\n{error_context['code_context']}"
)
self.logger.error(
message="{error}",
tag="ERROR",
params={"error": str(error_message)},
boxes=["error"],
)
raise
finally:
async with self.pool_lock:
if task_id in self.connection_pool:
await self.connection_pool[task_id].close()
del self.connection_pool[task_id]
self.connection_semaphore.release()
yield self.connection_pool[task_id]
except Exception as e:
logger.error(f"Connection error: {e}")
raise
finally:
async with self.pool_lock:
if task_id in self.connection_pool:
await self.connection_pool[task_id].close()
del self.connection_pool[task_id]
async def execute_with_retry(self, operation, *args):
"""Execute database operations with retry logic"""
@@ -215,20 +72,14 @@ class AsyncDatabaseManager:
return result
except Exception as e:
if attempt == self.max_retries - 1:
self.logger.error(
message="Operation failed after {retries} attempts: {error}",
tag="ERROR",
force_verbose=True,
params={"retries": self.max_retries, "error": str(e)},
)
logger.error(f"Operation failed after {self.max_retries} attempts: {e}")
raise
await asyncio.sleep(1 * (attempt + 1)) # Exponential backoff
async def ainit_db(self):
"""Initialize database schema"""
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
await db.execute(
"""
async def _init(db):
await db.execute('''
CREATE TABLE IF NOT EXISTS crawled_data (
url TEXT PRIMARY KEY,
html TEXT,
@@ -240,325 +91,145 @@ class AsyncDatabaseManager:
links TEXT DEFAULT "{}",
metadata TEXT DEFAULT "{}",
screenshot TEXT DEFAULT "",
response_headers TEXT DEFAULT "{}",
downloaded_files TEXT DEFAULT "{}" -- New column added
response_headers TEXT DEFAULT "{}" -- New column added
)
"""
)
await db.commit()
''')
await self.execute_with_retry(_init)
await self.update_db_schema()
async def update_db_schema(self):
"""Update database schema if needed"""
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
async def _check_columns(db):
cursor = await db.execute("PRAGMA table_info(crawled_data)")
columns = await cursor.fetchall()
column_names = [column[1] for column in columns]
return [column[1] for column in columns]
# List of new columns to add
new_columns = [
"media",
"links",
"metadata",
"screenshot",
"response_headers",
"downloaded_files",
]
column_names = await self.execute_with_retry(_check_columns)
# List of new columns to add
new_columns = ['media', 'links', 'metadata', 'screenshot', 'response_headers']
for column in new_columns:
if column not in column_names:
await self.aalter_db_add_column(column)
for column in new_columns:
if column not in column_names:
await self.aalter_db_add_column(column, db)
await db.commit()
async def aalter_db_add_column(self, new_column: str, db):
async def aalter_db_add_column(self, new_column: str):
"""Add new column to the database"""
if new_column == "response_headers":
await db.execute(
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"'
)
else:
await db.execute(
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
)
self.logger.info(
message="Added column '{column}' to the database",
tag="INIT",
params={"column": new_column},
)
async def _alter(db):
if new_column == 'response_headers':
await db.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"')
else:
await db.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
logger.info(f"Added column '{new_column}' to the database.")
async def aget_cached_url(self, url: str) -> Optional[CrawlResult]:
"""Retrieve cached URL data as CrawlResult"""
await self.execute_with_retry(_alter)
async def aget_cached_url(self, url: str) -> Optional[Tuple[str, str, str, str, str, bool, str, str, str, str]]:
"""Retrieve cached URL data"""
async def _get(db):
async with db.execute(
"SELECT * FROM crawled_data WHERE url = ?", (url,)
'''
SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot, response_headers
FROM crawled_data WHERE url = ?
''',
(url,)
) as cursor:
row = await cursor.fetchone()
if not row:
return None
# Get column names
columns = [description[0] for description in cursor.description]
# Create dict from row data
row_dict = dict(zip(columns, row))
# Load content from files using stored hashes
content_fields = {
"html": row_dict["html"],
"cleaned_html": row_dict["cleaned_html"],
"markdown": row_dict["markdown"],
"extracted_content": row_dict["extracted_content"],
"screenshot": row_dict["screenshot"],
"screenshots": row_dict["screenshot"],
}
for field, hash_value in content_fields.items():
if hash_value:
content = await self._load_content(
hash_value,
field.split("_")[0], # Get content type from field name
)
row_dict[field] = content or ""
else:
row_dict[field] = ""
# Parse JSON fields
json_fields = [
"media",
"links",
"metadata",
"response_headers",
"markdown",
]
for field in json_fields:
try:
row_dict[field] = (
json.loads(row_dict[field]) if row_dict[field] else {}
)
except json.JSONDecodeError:
# Very UGLY, never mention it to me please
if field == "markdown" and isinstance(row_dict[field], str):
row_dict[field] = MarkdownGenerationResult(
raw_markdown=row_dict[field] or "",
markdown_with_citations="",
references_markdown="",
fit_markdown="",
fit_html="",
)
else:
row_dict[field] = {}
if isinstance(row_dict["markdown"], Dict):
if row_dict["markdown"].get("raw_markdown"):
row_dict["markdown"] = row_dict["markdown"]["raw_markdown"]
# Parse downloaded_files
try:
row_dict["downloaded_files"] = (
json.loads(row_dict["downloaded_files"])
if row_dict["downloaded_files"]
else []
if row:
# Deserialize JSON fields
return (
row[0], # url
row[1], # html
row[2], # cleaned_html
row[3], # markdown
row[4], # extracted_content
row[5], # success
json.loads(row[6] or '{}'), # media
json.loads(row[7] or '{}'), # links
json.loads(row[8] or '{}'), # metadata
row[9], # screenshot
json.loads(row[10] or '{}') # response_headers
)
except json.JSONDecodeError:
row_dict["downloaded_files"] = []
# Remove any fields not in CrawlResult model
valid_fields = CrawlResult.__annotations__.keys()
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
filtered_dict["markdown"] = row_dict["markdown"]
return CrawlResult(**filtered_dict)
return None
try:
return await self.execute_with_retry(_get)
except Exception as e:
self.logger.error(
message="Error retrieving cached URL: {error}",
tag="ERROR",
force_verbose=True,
params={"error": str(e)},
)
logger.error(f"Error retrieving cached URL: {e}")
return None
async def acache_url(self, result: CrawlResult):
"""Cache CrawlResult data"""
# Store content files and get hashes
content_map = {
"html": (result.html, "html"),
"cleaned_html": (result.cleaned_html or "", "cleaned"),
"markdown": None,
"extracted_content": (result.extracted_content or "", "extracted"),
"screenshot": (result.screenshot or "", "screenshots"),
}
try:
if isinstance(result.markdown, StringCompatibleMarkdown):
content_map["markdown"] = (
result.markdown,
"markdown",
)
elif isinstance(result.markdown, MarkdownGenerationResult):
content_map["markdown"] = (
result.markdown.model_dump_json(),
"markdown",
)
elif isinstance(result.markdown, str):
markdown_result = MarkdownGenerationResult(raw_markdown=result.markdown)
content_map["markdown"] = (
markdown_result.model_dump_json(),
"markdown",
)
else:
content_map["markdown"] = (
MarkdownGenerationResult().model_dump_json(),
"markdown",
)
except Exception as e:
self.logger.warning(
message=f"Error processing markdown content: {str(e)}", tag="WARNING"
)
# Fallback to empty markdown result
content_map["markdown"] = (
MarkdownGenerationResult().model_dump_json(),
"markdown",
)
content_hashes = {}
for field, (content, content_type) in content_map.items():
content_hashes[field] = await self._store_content(content, content_type)
async def acache_url(
self,
url: str,
html: str,
cleaned_html: str,
markdown: str,
extracted_content: str,
success: bool,
media: str = "{}",
links: str = "{}",
metadata: str = "{}",
screenshot: str = "",
response_headers: str = "{}" # New parameter added
):
"""Cache URL data with retry logic"""
async def _cache(db):
await db.execute(
"""
await db.execute('''
INSERT INTO crawled_data (
url, html, cleaned_html, markdown,
extracted_content, success, media, links, metadata,
screenshot, response_headers, downloaded_files
url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot, response_headers
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(url) DO UPDATE SET
html = excluded.html,
cleaned_html = excluded.cleaned_html,
markdown = excluded.markdown,
extracted_content = excluded.extracted_content,
success = excluded.success,
media = excluded.media,
links = excluded.links,
metadata = excluded.metadata,
media = excluded.media,
links = excluded.links,
metadata = excluded.metadata,
screenshot = excluded.screenshot,
response_headers = excluded.response_headers,
downloaded_files = excluded.downloaded_files
""",
(
result.url,
content_hashes["html"],
content_hashes["cleaned_html"],
content_hashes["markdown"],
content_hashes["extracted_content"],
result.success,
json.dumps(result.media),
json.dumps(result.links),
json.dumps(result.metadata or {}),
content_hashes["screenshot"],
json.dumps(result.response_headers or {}),
json.dumps(result.downloaded_files or []),
),
)
response_headers = excluded.response_headers -- Update response_headers
''', (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot, response_headers))
try:
await self.execute_with_retry(_cache)
except Exception as e:
self.logger.error(
message="Error caching URL: {error}",
tag="ERROR",
force_verbose=True,
params={"error": str(e)},
)
logger.error(f"Error caching URL: {e}")
async def aget_total_count(self) -> int:
"""Get total number of cached URLs"""
async def _count(db):
async with db.execute("SELECT COUNT(*) FROM crawled_data") as cursor:
async with db.execute('SELECT COUNT(*) FROM crawled_data') as cursor:
result = await cursor.fetchone()
return result[0] if result else 0
try:
return await self.execute_with_retry(_count)
except Exception as e:
self.logger.error(
message="Error getting total count: {error}",
tag="ERROR",
force_verbose=True,
params={"error": str(e)},
)
logger.error(f"Error getting total count: {e}")
return 0
async def aclear_db(self):
"""Clear all data from the database"""
async def _clear(db):
await db.execute("DELETE FROM crawled_data")
await db.execute('DELETE FROM crawled_data')
try:
await self.execute_with_retry(_clear)
except Exception as e:
self.logger.error(
message="Error clearing database: {error}",
tag="ERROR",
force_verbose=True,
params={"error": str(e)},
)
logger.error(f"Error clearing database: {e}")
async def aflush_db(self):
"""Drop the entire table"""
async def _flush(db):
await db.execute("DROP TABLE IF EXISTS crawled_data")
await db.execute('DROP TABLE IF EXISTS crawled_data')
try:
await self.execute_with_retry(_flush)
except Exception as e:
self.logger.error(
message="Error flushing database: {error}",
tag="ERROR",
force_verbose=True,
params={"error": str(e)},
)
async def _store_content(self, content: str, content_type: str) -> str:
"""Store content in filesystem and return hash"""
if not content:
return ""
content_hash = generate_content_hash(content)
file_path = os.path.join(self.content_paths[content_type], content_hash)
# Only write if file doesn't exist
if not os.path.exists(file_path):
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
await f.write(content)
return content_hash
async def _load_content(
self, content_hash: str, content_type: str
) -> Optional[str]:
"""Load content from filesystem by hash"""
if not content_hash:
return None
file_path = os.path.join(self.content_paths[content_type], content_hash)
try:
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
return await f.read()
except:
self.logger.error(
message="Failed to load content: {file_path}",
tag="ERROR",
force_verbose=True,
params={"file_path": file_path},
)
return None
logger.error(f"Error flushing database: {e}")
# Create a singleton instance
async_db_manager = AsyncDatabaseManager()

View File

@@ -1,771 +0,0 @@
from typing import Dict, Optional, List, Tuple, Union
from .async_configs import CrawlerRunConfig
from .models import (
CrawlResult,
CrawlerTaskResult,
CrawlStatus,
DomainState,
)
from .components.crawler_monitor import CrawlerMonitor
from .types import AsyncWebCrawler
from collections.abc import AsyncGenerator
import time
import psutil
import asyncio
import uuid
from urllib.parse import urlparse
import random
from abc import ABC, abstractmethod
from .utils import get_true_memory_usage_percent
class RateLimiter:
def __init__(
self,
base_delay: Tuple[float, float] = (1.0, 3.0),
max_delay: float = 60.0,
max_retries: int = 3,
rate_limit_codes: List[int] = None,
):
self.base_delay = base_delay
self.max_delay = max_delay
self.max_retries = max_retries
self.rate_limit_codes = rate_limit_codes or [429, 503]
self.domains: Dict[str, DomainState] = {}
def get_domain(self, url: str) -> str:
return urlparse(url).netloc
async def wait_if_needed(self, url: str) -> None:
domain = self.get_domain(url)
state = self.domains.get(domain)
if not state:
self.domains[domain] = DomainState()
state = self.domains[domain]
now = time.time()
if state.last_request_time:
wait_time = max(0, state.current_delay - (now - state.last_request_time))
if wait_time > 0:
await asyncio.sleep(wait_time)
# Random delay within base range if no current delay
if state.current_delay == 0:
state.current_delay = random.uniform(*self.base_delay)
state.last_request_time = time.time()
def update_delay(self, url: str, status_code: int) -> bool:
domain = self.get_domain(url)
state = self.domains[domain]
if status_code in self.rate_limit_codes:
state.fail_count += 1
if state.fail_count > self.max_retries:
return False
# Exponential backoff with random jitter
state.current_delay = min(
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
)
else:
# Gradually reduce delay on success
state.current_delay = max(
random.uniform(*self.base_delay), state.current_delay * 0.75
)
state.fail_count = 0
return True
class BaseDispatcher(ABC):
def __init__(
self,
rate_limiter: Optional[RateLimiter] = None,
monitor: Optional[CrawlerMonitor] = None,
):
self.crawler = None
self._domain_last_hit: Dict[str, float] = {}
self.concurrent_sessions = 0
self.rate_limiter = rate_limiter
self.monitor = monitor
def select_config(self, url: str, configs: Union[CrawlerRunConfig, List[CrawlerRunConfig]]) -> Optional[CrawlerRunConfig]:
"""Select the appropriate config for a given URL.
Args:
url: The URL to match against
configs: Single config or list of configs to choose from
Returns:
The matching config, or None if no match found
"""
# Single config - return as is
if isinstance(configs, CrawlerRunConfig):
return configs
# Empty list - return None
if not configs:
return None
# Find first matching config
for config in configs:
if config.is_match(url):
return config
# No match found - return None to indicate URL should be skipped
return None
@abstractmethod
async def crawl_url(
self,
url: str,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
task_id: str,
monitor: Optional[CrawlerMonitor] = None,
) -> CrawlerTaskResult:
pass
@abstractmethod
async def run_urls(
self,
urls: List[str],
crawler: AsyncWebCrawler, # noqa: F821
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
monitor: Optional[CrawlerMonitor] = None,
) -> List[CrawlerTaskResult]:
pass
class MemoryAdaptiveDispatcher(BaseDispatcher):
def __init__(
self,
memory_threshold_percent: float = 90.0,
critical_threshold_percent: float = 95.0, # New critical threshold
recovery_threshold_percent: float = 85.0, # New recovery threshold
check_interval: float = 1.0,
max_session_permit: int = 20,
fairness_timeout: float = 600.0, # 10 minutes before prioritizing long-waiting URLs
memory_wait_timeout: Optional[float] = 600.0,
rate_limiter: Optional[RateLimiter] = None,
monitor: Optional[CrawlerMonitor] = None,
):
super().__init__(rate_limiter, monitor)
self.memory_threshold_percent = memory_threshold_percent
self.critical_threshold_percent = critical_threshold_percent
self.recovery_threshold_percent = recovery_threshold_percent
self.check_interval = check_interval
self.max_session_permit = max_session_permit
self.fairness_timeout = fairness_timeout
self.memory_wait_timeout = memory_wait_timeout
self.result_queue = asyncio.Queue()
self.task_queue = asyncio.PriorityQueue() # Priority queue for better management
self.memory_pressure_mode = False # Flag to indicate when we're in memory pressure mode
self.current_memory_percent = 0.0 # Track current memory usage
self._high_memory_start_time: Optional[float] = None
async def _memory_monitor_task(self):
"""Background task to continuously monitor memory usage and update state"""
while True:
self.current_memory_percent = get_true_memory_usage_percent()
# Enter memory pressure mode if we cross the threshold
if self.current_memory_percent >= self.memory_threshold_percent:
if not self.memory_pressure_mode:
self.memory_pressure_mode = True
self._high_memory_start_time = time.time()
if self.monitor:
self.monitor.update_memory_status("PRESSURE")
else:
if self._high_memory_start_time is None:
self._high_memory_start_time = time.time()
if (
self.memory_wait_timeout is not None
and self._high_memory_start_time is not None
and time.time() - self._high_memory_start_time >= self.memory_wait_timeout
):
raise MemoryError(
"Memory usage exceeded threshold for"
f" {self.memory_wait_timeout} seconds"
)
# Exit memory pressure mode if we go below recovery threshold
elif self.memory_pressure_mode and self.current_memory_percent <= self.recovery_threshold_percent:
self.memory_pressure_mode = False
self._high_memory_start_time = None
if self.monitor:
self.monitor.update_memory_status("NORMAL")
elif self.current_memory_percent < self.memory_threshold_percent:
self._high_memory_start_time = None
# In critical mode, we might need to take more drastic action
if self.current_memory_percent >= self.critical_threshold_percent:
if self.monitor:
self.monitor.update_memory_status("CRITICAL")
# We could implement additional memory-saving measures here
await asyncio.sleep(self.check_interval)
def _get_priority_score(self, wait_time: float, retry_count: int) -> float:
"""Calculate priority score (lower is higher priority)
- URLs waiting longer than fairness_timeout get higher priority
- More retry attempts decreases priority
"""
if wait_time > self.fairness_timeout:
# High priority for long-waiting URLs
return -wait_time
# Standard priority based on retries
return retry_count
async def crawl_url(
self,
url: str,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
task_id: str,
retry_count: int = 0,
) -> CrawlerTaskResult:
start_time = time.time()
error_message = ""
memory_usage = peak_memory = 0.0
# Select appropriate config for this URL
selected_config = self.select_config(url, config)
# If no config matches, return failed result
if selected_config is None:
error_message = f"No matching configuration found for URL: {url}"
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.FAILED,
error_message=error_message
)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=CrawlResult(
url=url,
html="",
metadata={"status": "no_config_match"},
success=False,
error_message=error_message
),
memory_usage=0,
peak_memory=0,
start_time=start_time,
end_time=time.time(),
error_message=error_message,
retry_count=retry_count
)
# Get starting memory for accurate measurement
process = psutil.Process()
start_memory = process.memory_info().rss / (1024 * 1024)
try:
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=start_time,
retry_count=retry_count
)
self.concurrent_sessions += 1
if self.rate_limiter:
await self.rate_limiter.wait_if_needed(url)
# Check if we're in critical memory state
if self.current_memory_percent >= self.critical_threshold_percent:
# Requeue this task with increased priority and retry count
enqueue_time = time.time()
priority = self._get_priority_score(enqueue_time - start_time, retry_count + 1)
await self.task_queue.put((priority, (url, task_id, retry_count + 1, enqueue_time)))
# Update monitoring
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.QUEUED,
error_message="Requeued due to critical memory pressure"
)
# Return placeholder result with requeued status
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=CrawlResult(
url=url, html="", metadata={"status": "requeued"},
success=False, error_message="Requeued due to critical memory pressure"
),
memory_usage=0,
peak_memory=0,
start_time=start_time,
end_time=time.time(),
error_message="Requeued due to critical memory pressure",
retry_count=retry_count + 1
)
# Execute the crawl with selected config
result = await self.crawler.arun(url, config=selected_config, session_id=task_id)
# Measure memory usage
end_memory = process.memory_info().rss / (1024 * 1024)
memory_usage = peak_memory = end_memory - start_memory
# Handle rate limiting
if self.rate_limiter and result.status_code:
if not self.rate_limiter.update_delay(url, result.status_code):
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
# Update status based on result
if not result.success:
error_message = result.error_message
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
elif self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
except Exception as e:
error_message = str(e)
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
result = CrawlResult(
url=url, html="", metadata={}, success=False, error_message=str(e)
)
finally:
end_time = time.time()
if self.monitor:
self.monitor.update_task(
task_id,
end_time=end_time,
memory_usage=memory_usage,
peak_memory=peak_memory,
error_message=error_message,
retry_count=retry_count
)
self.concurrent_sessions -= 1
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=end_time,
error_message=error_message,
retry_count=retry_count
)
async def run_urls(
self,
urls: List[str],
crawler: AsyncWebCrawler,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
) -> List[CrawlerTaskResult]:
self.crawler = crawler
# Start the memory monitor task
memory_monitor = asyncio.create_task(self._memory_monitor_task())
if self.monitor:
self.monitor.start()
results = []
try:
# Initialize task queue
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
# Add to queue with initial priority 0, retry count 0, and current time
await self.task_queue.put((0, (url, task_id, 0, time.time())))
active_tasks = []
# Process until both queues are empty
while not self.task_queue.empty() or active_tasks:
if memory_monitor.done():
exc = memory_monitor.exception()
if exc:
for t in active_tasks:
t.cancel()
raise exc
# If memory pressure is low, greedily fill all available slots
if not self.memory_pressure_mode:
slots = self.max_session_permit - len(active_tasks)
while slots > 0:
try:
# Use get_nowait() to immediately get tasks without blocking
priority, (url, task_id, retry_count, enqueue_time) = self.task_queue.get_nowait()
# Create and start the task
task = asyncio.create_task(
self.crawl_url(url, config, task_id, retry_count)
)
active_tasks.append(task)
# Update waiting time in monitor
if self.monitor:
wait_time = time.time() - enqueue_time
self.monitor.update_task(
task_id,
wait_time=wait_time,
status=CrawlStatus.IN_PROGRESS
)
slots -= 1
except asyncio.QueueEmpty:
# No more tasks in queue, exit the loop
break
# Wait for completion even if queue is starved
if active_tasks:
done, pending = await asyncio.wait(
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
)
# Process completed tasks
for completed_task in done:
result = await completed_task
results.append(result)
# Update active tasks list
active_tasks = list(pending)
else:
# If no active tasks but still waiting, sleep briefly
await asyncio.sleep(self.check_interval / 2)
# Update priorities for waiting tasks if needed
await self._update_queue_priorities()
except Exception as e:
if self.monitor:
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
finally:
# Clean up
memory_monitor.cancel()
if self.monitor:
self.monitor.stop()
return results
async def _update_queue_priorities(self):
"""Periodically update priorities of items in the queue to prevent starvation"""
# Skip if queue is empty
if self.task_queue.empty():
return
# Use a drain-and-refill approach to update all priorities
temp_items = []
# Drain the queue (with a safety timeout to prevent blocking)
try:
drain_start = time.time()
while not self.task_queue.empty() and time.time() - drain_start < 5.0: # 5 second safety timeout
try:
# Get item from queue with timeout
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
self.task_queue.get(), timeout=0.1
)
# Calculate new priority based on current wait time
current_time = time.time()
wait_time = current_time - enqueue_time
new_priority = self._get_priority_score(wait_time, retry_count)
# Store with updated priority
temp_items.append((new_priority, (url, task_id, retry_count, enqueue_time)))
# Update monitoring stats for this task
if self.monitor and task_id in self.monitor.stats:
self.monitor.update_task(task_id, wait_time=wait_time)
except asyncio.TimeoutError:
# Queue might be empty or very slow
break
except Exception as e:
# If anything goes wrong, make sure we refill the queue with what we've got
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
# Calculate queue statistics
if temp_items and self.monitor:
total_queued = len(temp_items)
wait_times = [item[1][3] for item in temp_items]
highest_wait_time = time.time() - min(wait_times) if wait_times else 0
avg_wait_time = sum(time.time() - t for t in wait_times) / len(wait_times) if wait_times else 0
# Update queue statistics in monitor
self.monitor.update_queue_statistics(
total_queued=total_queued,
highest_wait_time=highest_wait_time,
avg_wait_time=avg_wait_time
)
# Sort by priority (lowest number = highest priority)
temp_items.sort(key=lambda x: x[0])
# Refill the queue with updated priorities
for item in temp_items:
await self.task_queue.put(item)
async def run_urls_stream(
self,
urls: List[str],
crawler: AsyncWebCrawler,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
) -> AsyncGenerator[CrawlerTaskResult, None]:
self.crawler = crawler
# Start the memory monitor task
memory_monitor = asyncio.create_task(self._memory_monitor_task())
if self.monitor:
self.monitor.start()
try:
# Initialize task queue
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
# Add to queue with initial priority 0, retry count 0, and current time
await self.task_queue.put((0, (url, task_id, 0, time.time())))
active_tasks = []
completed_count = 0
total_urls = len(urls)
while completed_count < total_urls:
if memory_monitor.done():
exc = memory_monitor.exception()
if exc:
for t in active_tasks:
t.cancel()
raise exc
# If memory pressure is low, greedily fill all available slots
if not self.memory_pressure_mode:
slots = self.max_session_permit - len(active_tasks)
while slots > 0:
try:
# Use get_nowait() to immediately get tasks without blocking
priority, (url, task_id, retry_count, enqueue_time) = self.task_queue.get_nowait()
# Create and start the task
task = asyncio.create_task(
self.crawl_url(url, config, task_id, retry_count)
)
active_tasks.append(task)
# Update waiting time in monitor
if self.monitor:
wait_time = time.time() - enqueue_time
self.monitor.update_task(
task_id,
wait_time=wait_time,
status=CrawlStatus.IN_PROGRESS
)
slots -= 1
except asyncio.QueueEmpty:
# No more tasks in queue, exit the loop
break
# Process completed tasks and yield results
if active_tasks:
done, pending = await asyncio.wait(
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
)
for completed_task in done:
result = await completed_task
# Only count as completed if it wasn't requeued
if "requeued" not in result.error_message:
completed_count += 1
yield result
# Update active tasks list
active_tasks = list(pending)
else:
# If no active tasks but still waiting, sleep briefly
await asyncio.sleep(self.check_interval / 2)
# Update priorities for waiting tasks if needed
await self._update_queue_priorities()
finally:
# Clean up
memory_monitor.cancel()
if self.monitor:
self.monitor.stop()
class SemaphoreDispatcher(BaseDispatcher):
def __init__(
self,
semaphore_count: int = 5,
max_session_permit: int = 20,
rate_limiter: Optional[RateLimiter] = None,
monitor: Optional[CrawlerMonitor] = None,
):
super().__init__(rate_limiter, monitor)
self.semaphore_count = semaphore_count
self.max_session_permit = max_session_permit
async def crawl_url(
self,
url: str,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
task_id: str,
semaphore: asyncio.Semaphore = None,
) -> CrawlerTaskResult:
start_time = time.time()
error_message = ""
memory_usage = peak_memory = 0.0
# Select appropriate config for this URL
selected_config = self.select_config(url, config)
# If no config matches, return failed result
if selected_config is None:
error_message = f"No matching configuration found for URL: {url}"
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.FAILED,
error_message=error_message
)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=CrawlResult(
url=url,
html="",
metadata={"status": "no_config_match"},
success=False,
error_message=error_message
),
memory_usage=0,
peak_memory=0,
start_time=start_time,
end_time=time.time(),
error_message=error_message
)
try:
if self.monitor:
self.monitor.update_task(
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
)
if self.rate_limiter:
await self.rate_limiter.wait_if_needed(url)
async with semaphore:
process = psutil.Process()
start_memory = process.memory_info().rss / (1024 * 1024)
result = await self.crawler.arun(url, config=selected_config, session_id=task_id)
end_memory = process.memory_info().rss / (1024 * 1024)
memory_usage = peak_memory = end_memory - start_memory
if self.rate_limiter and result.status_code:
if not self.rate_limiter.update_delay(url, result.status_code):
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=time.time(),
error_message=error_message,
)
if not result.success:
error_message = result.error_message
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
elif self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
except Exception as e:
error_message = str(e)
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
result = CrawlResult(
url=url, html="", metadata={}, success=False, error_message=str(e)
)
finally:
end_time = time.time()
if self.monitor:
self.monitor.update_task(
task_id,
end_time=end_time,
memory_usage=memory_usage,
peak_memory=peak_memory,
error_message=error_message,
)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=end_time,
error_message=error_message,
)
async def run_urls(
self,
crawler: AsyncWebCrawler, # noqa: F821
urls: List[str],
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
) -> List[CrawlerTaskResult]:
self.crawler = crawler
if self.monitor:
self.monitor.start()
try:
semaphore = asyncio.Semaphore(self.semaphore_count)
tasks = []
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
task = asyncio.create_task(
self.crawl_url(url, config, task_id, semaphore)
)
tasks.append(task)
return await asyncio.gather(*tasks, return_exceptions=True)
finally:
if self.monitor:
self.monitor.stop()

741
crawl4ai/async_executor.py Normal file
View File

@@ -0,0 +1,741 @@
from __future__ import annotations
import asyncio
import psutil
import logging
import time
import sqlite3
import aiosqlite
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Callable, Any, Set, Type
from typing import Awaitable
from pathlib import Path
import json
from datetime import datetime
from typing import ClassVar, Type, Union
import inspect
# Imports from your crawler package
from .async_crawler_strategy import AsyncCrawlerStrategy, AsyncPlaywrightCrawlerStrategy
from .chunking_strategy import ChunkingStrategy, RegexChunking
from .extraction_strategy import ExtractionStrategy
from .models import CrawlResult
from .config import MIN_WORD_THRESHOLD
from .async_webcrawler import AsyncWebCrawler
from .config import MAX_METRICS_HISTORY
import logging
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
# self.logger.error(f"Executor {self.__class__.__name__}: Error message", exc_info=True)
# self.logger.info(f"Executor {self.__class__.__name__}: Info message")
# self.logger.warning(f"Executor {self.__class__.__name__}: Warning message")
# Enums and Constants
class ExecutionMode(Enum):
"""Execution mode for the crawler executor."""
SPEED = "speed"
RESOURCE = "resource"
class TaskState(Enum):
"""Possible states for a crawling task."""
PENDING = "pending"
SCHEDULED = "scheduled"
RUNNING = "running"
COMPLETED = "completed"
FAILED = "failed"
RETRYING = "retrying"
# Types of callbacks we should support
class CallbackType(Enum):
PRE_EXECUTION = "pre_execution" # Before processing a URL
POST_EXECUTION = "post_execution" # After successful processing
ON_ERROR = "on_error" # When an error occurs
ON_RETRY = "on_retry" # Before retrying a failed URL
ON_BATCH_START = "on_batch_start" # Before starting a batch
ON_BATCH_END = "on_batch_end" # After completing a batch
ON_COMPLETE = "on_complete" # After all URLs are processed
@dataclass
class SystemMetrics:
"""System resource metrics."""
cpu_percent: float
memory_percent: float
available_memory: int
timestamp: float
@classmethod
def capture(cls) -> 'SystemMetrics':
"""Capture current system metrics."""
return cls(
cpu_percent=psutil.cpu_percent(),
memory_percent=psutil.virtual_memory().percent,
available_memory=psutil.virtual_memory().available,
timestamp=time.time()
)
@dataclass
class TaskMetadata:
"""Metadata for a crawling task."""
url: str
state: TaskState
attempts: int = 0
last_attempt: Optional[float] = None
error: Optional[str] = None
result: Optional[Any] = None
@dataclass
class ExecutorMetrics:
"""Performance and resource metrics for the executor."""
# Performance metrics
total_urls: int = 0
completed_urls: int = 0
failed_urls: int = 0
start_time: Optional[float] = None
total_retries: int = 0
response_times: List[float] = field(default_factory=list)
# Resource metrics
system_metrics: List[SystemMetrics] = field(default_factory=list)
active_connections: int = 0
def capture_system_metrics(self):
"""Capture system metrics and enforce history size limit."""
metrics = SystemMetrics.capture()
self.system_metrics.append(metrics)
if len(self.system_metrics) > MAX_METRICS_HISTORY:
self.system_metrics.pop(0) # Remove the oldest metric
@property
def urls_per_second(self) -> float:
"""Calculate URLs processed per second."""
if not self.start_time or not self.completed_urls:
return 0.0
duration = time.time() - self.start_time
return self.completed_urls / duration if duration > 0 else 0
@property
def success_rate(self) -> float:
"""Calculate success rate as percentage."""
if not self.total_urls:
return 0.0
return (self.completed_urls / self.total_urls) * 100
@property
def retry_rate(self) -> float:
"""Calculate retry rate as percentage."""
if not self.total_urls:
return 0.0
return (self.total_retries / self.total_urls) * 100
@property
def average_response_time(self) -> float:
"""Calculate average response time in seconds."""
if not self.response_times:
return 0.0
return sum(self.response_times) / len(self.response_times)
def to_dict(self) -> Dict[str, Any]:
"""Convert metrics to dictionary format."""
return {
"performance": {
"urls_per_second": self.urls_per_second,
"success_rate": self.success_rate,
"retry_rate": self.retry_rate,
"average_response_time": self.average_response_time,
"total_urls": self.total_urls,
"completed_urls": self.completed_urls,
"failed_urls": self.failed_urls
},
"resources": {
"cpu_utilization": self.system_metrics[-1].cpu_percent if self.system_metrics else 0,
"memory_usage": self.system_metrics[-1].memory_percent if self.system_metrics else 0,
"active_connections": self.active_connections
}
}
class ResourceMonitor:
"""Monitors and manages system resources."""
def __init__(self, mode: ExecutionMode):
self.mode = mode
self.metrics_history: List[SystemMetrics] = []
self._setup_thresholds()
def _setup_thresholds(self):
"""Set up resource thresholds based on execution mode."""
if self.mode == ExecutionMode.SPEED:
self.memory_threshold = 80 # 80% memory usage limit
self.cpu_threshold = 90 # 90% CPU usage limit
else:
self.memory_threshold = 40 # 40% memory usage limit
self.cpu_threshold = 30 # 30% CPU usage limit
async def check_resources(self) -> bool:
"""Check if system resources are within acceptable limits."""
metrics = SystemMetrics.capture()
self.metrics_history.append(metrics)
# Keep only last hour of metrics
cutoff_time = time.time() - 3600
self.metrics_history = [m for m in self.metrics_history if m.timestamp > cutoff_time]
return (metrics.cpu_percent < self.cpu_threshold and
metrics.memory_percent < self.memory_threshold)
def get_optimal_batch_size(self, total_urls: int) -> int:
metrics = SystemMetrics.capture()
if self.mode == ExecutionMode.SPEED:
base_size = min(1000, total_urls)
# Adjust based on resource usage
cpu_factor = max(0.0, (self.cpu_threshold - metrics.cpu_percent) / self.cpu_threshold)
mem_factor = max(0.0, (self.memory_threshold - metrics.memory_percent) / self.memory_threshold)
min_factor = min(cpu_factor, mem_factor)
adjusted_size = max(1, int(base_size * min_factor))
return min(total_urls, adjusted_size)
else:
# For resource optimization, use a conservative batch size based on resource usage
cpu_factor = max(0.1, (self.cpu_threshold - metrics.cpu_percent) / self.cpu_threshold)
mem_factor = max(0.1, (self.memory_threshold - metrics.memory_percent) / self.memory_threshold)
min_factor = min(cpu_factor, mem_factor)
adjusted_size = max(1, int(50 * min_factor))
return min(total_urls, adjusted_size)
class ExecutorControl:
"""Control interface for the executor."""
def __init__(self):
self._paused = False
self._cancelled = False
self._pause_event = asyncio.Event()
self._pause_event.set() # Not paused initially
self._lock = asyncio.Lock() # Lock to protect shared state
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
async def pause(self):
"""Pause the execution."""
async with self._lock:
self._paused = True
self._pause_event.clear()
async def resume(self):
"""Resume the execution."""
async with self._lock:
self._paused = False
self._pause_event.set()
async def cancel(self):
"""Cancel all pending operations."""
async with self._lock:
self._cancelled = True
self._pause_event.set() # Release any paused operations
async def is_paused(self) -> bool:
"""Check if execution is paused."""
async with self._lock:
return self._paused
async def is_cancelled(self) -> bool:
"""Check if execution is cancelled."""
async with self._lock:
return self._cancelled
async def wait_if_paused(self, timeout: Optional[float] = None):
"""Wait if execution is paused, with an optional timeout."""
try:
await asyncio.wait_for(self._pause_event.wait(), timeout=timeout)
except asyncio.TimeoutError:
# Timeout occurred, handle as needed
async with self._lock:
self._paused = False # Optionally reset the paused state
self._pause_event.set()
# Optionally log a warning
self.logger.warning(f"ExecutorControl: wait_if_paused() timed out after {timeout} seconds. Proceeding with execution.")
async def reset(self):
"""Reset control state."""
async with self._lock:
self._paused = False
self._cancelled = False
self._pause_event.set()
class ExecutorStrategy(ABC):
"""Abstract Base class for executor strategies.
Callbacks:
- PRE_EXECUTION: Callable[[str, Dict[str, Any]], None]
- POST_EXECUTION: Callable[[str, Any, Dict[str, Any]], None]
- ON_ERROR: Callable[[str, Exception, Dict[str, Any]], None]
- ON_RETRY: Callable[[str, int, Dict[str, Any]], None]
- ON_BATCH_START: Callable[[List[str], Dict[str, Any]], None]
- ON_BATCH_END: Callable[[List[str], Dict[str, Any]], None]
- ON_COMPLETE: Callable[[Dict[str, Any], Dict[str, Any]], None]
"""
def __init__(
self,
crawler: AsyncWebCrawler,
mode: ExecutionMode,
# callbacks: Optional[Dict[CallbackType, Callable]] = None,
callbacks: Optional[Dict[CallbackType, Callable[[Any], Union[Awaitable[None], None]]]] = None,
persistence_path: Optional[Path] = None,
**crawl_config_kwargs
):
self.crawler = crawler
self.mode = mode
self.callbacks = callbacks or {}
self.resource_monitor = ResourceMonitor(mode)
self.tasks: Dict[str, TaskMetadata] = {}
self.active_tasks: Set[str] = set()
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
self.metrics = ExecutorMetrics()
self.control = ExecutorControl()
self.crawl_config_kwargs = crawl_config_kwargs # Store parameters for arun
async def get_status(self) -> Dict[str, Any]:
"""Get current executor status and metrics."""
return {
"status": {
"paused": await self.control.is_paused(),
"cancelled": await self.control.is_cancelled(),
"active_tasks": len(self.active_tasks)
},
"metrics": self.metrics.to_dict()
}
async def clear_state(self):
"""Reset executor state."""
self.tasks.clear()
self.active_tasks.clear()
self.metrics = ExecutorMetrics()
await self.control.reset()
await self.persistence.clear() # Implement this method
async def _execute_callback(
self,
callback_type: CallbackType,
*args,
**kwargs
):
"""Execute callback if it exists."""
if callback := self.callbacks.get(callback_type):
try:
if inspect.iscoroutinefunction(callback):
await callback(*args, **kwargs)
else:
callback(*args, **kwargs)
except Exception as e:
# self.logger.error(f"Callback {callback_type} failed: {e}")
self.logger.error(f"Executor {self.__class__.__name__}: Callback {callback_type.value} failed: {e}", exc_info=True)
async def _process_url(self, url: str) -> CrawlResult:
max_retries = self.crawl_config_kwargs.get('max_retries', 3)
backoff_factor = self.crawl_config_kwargs.get('backoff_factor', 1)
attempts = 0
while attempts <= max_retries:
# Invoke PRE_EXECUTION callback
await self._execute_callback(CallbackType.PRE_EXECUTION, url, self.metrics.to_dict())
"""Process a single URL using the crawler."""
# Wait if execution is paused
await self.control.wait_if_paused(timeout=300)
# Check if cancelled
if await self.control.is_cancelled():
raise asyncio.CancelledError("Execution was cancelled")
start_time = time.time()
self.metrics.active_connections += 1
try:
result = await self.crawler.arun(url, **self.crawl_config_kwargs)
self.metrics.completed_urls += 1
self.metrics.response_times.append(time.time() - start_time)
# Invoke POST_EXECUTION callback
await self._execute_callback(CallbackType.POST_EXECUTION, url, result, self.metrics.to_dict())
return result
except Exception as e:
attempts += 1
self.metrics.failed_urls += 1
# self.logger.error(f"Error processing URL {url}: {e}")
self.logger.error(f"Executor {self.__class__.__name__}: Error processing URL {url}: {e}", exc_info=True)
# Invoke ON_ERROR callback
await self._execute_callback(CallbackType.ON_ERROR, url, e, self.metrics.to_dict())
if attempts <= max_retries:
# Invoke ON_RETRY callback
await self._execute_callback(CallbackType.ON_RETRY, url, attempts, self.metrics.to_dict())
# Wait before retrying
await asyncio.sleep(backoff_factor * attempts)
else:
raise e
finally:
self.metrics.active_connections -= 1
# Update system metrics
# INFO: Uncomment this line if you want to capture system metrics after each URL, but it causes a performance hit
# self.metrics.system_metrics.append(SystemMetrics.capture())
# Exit the loop if successful or retries exceeded
if attempts > max_retries:
break
async def execute(self, urls: List[str]) -> Dict[str, Any]:
"""Execute crawling tasks."""
# Initialize metrics
self.metrics.total_urls = len(urls)
self.metrics.start_time = time.time()
# Create context with metrics (used for callbacks)
context = {
"mode": self.mode,
"start_time": self.metrics.start_time,
"total_urls": self.metrics.total_urls
}
# Invoke ON_BATCH_START callback
await self._execute_callback(CallbackType.ON_BATCH_START, urls, context)
results = {}
batch_errors = []
# Use the crawler within an async context manager
async with self.crawler:
# Check for cancellation before starting
if await self.control.is_cancelled():
raise asyncio.CancelledError("Execution was cancelled")
# Wait if paused
await self.control.wait_if_paused(timeout=300)
# Prepare list of batches
batches = []
total_urls_remaining = len(urls)
index = 0
while index < len(urls):
batch_size = self.resource_monitor.get_optimal_batch_size(total_urls_remaining)
batch_urls = urls[index:index + batch_size]
batches.append(batch_urls)
index += batch_size
total_urls_remaining -= batch_size
# Process each batch
for batch_urls in batches:
# Check for cancellation
if await self.control.is_cancelled():
raise asyncio.CancelledError("Execution was cancelled")
# Wait if paused
await self.control.wait_if_paused(timeout=300)
try:
# Process the batch
batch_results = await self.process_batch(batch_urls)
# Update results
results.update(batch_results)
# Capture system metrics after each batch
self.metrics.capture_system_metrics()
# Update system metrics after each batch
# self.metrics.system_metrics.append(SystemMetrics.capture()) # Has memory leak issue
# Invoke ON_BATCH_END callback
await self._execute_callback(CallbackType.ON_BATCH_END, batch_urls, context)
except Exception as e:
# Handle batch-level exceptions
self.logger.error(f"Error processing batch: {e}")
await self._execute_callback(CallbackType.ON_ERROR, "batch", e, context)
# Collect the error
batch_errors.append((batch_urls, e))
# Continue to next batch instead of raising
continue
# Execution complete
await self._execute_callback(CallbackType.ON_COMPLETE, results, context)
# Log final metrics and batch errors if any
final_status = await self.get_status()
# self.logger.info(f"Execution completed. Metrics: {final_status}")
self.logger.info(f"Executor {self.__class__.__name__}: Execution completed. Metrics: {final_status}")
if batch_errors:
# self.logger.warning(f"Execution completed with errors in {len(batch_errors)} batches.")
self.logger.warning(f"Executor {self.__class__.__name__}: Execution completed with errors in {len(batch_errors)} batches.")
return results
@abstractmethod
async def process_batch(self, batch_urls: List[str]) -> Dict[str, Any]:
"""Process a batch of URLs."""
pass
class SpeedOptimizedExecutor(ExecutorStrategy):
"""Executor optimized for speed."""
def __init__(
self,
crawler: AsyncWebCrawler,
callbacks: Optional[Dict[CallbackType, Callable]] = None,
persistence_path: Optional[Path] = None,
word_count_threshold=MIN_WORD_THRESHOLD,
extraction_strategy: ExtractionStrategy = None,
chunking_strategy: ChunkingStrategy = None,
bypass_cache: bool = False,
css_selector: str = None,
screenshot: bool = False,
user_agent: str = None,
verbose=True,
connection_pool_size: int = 1000,
dns_cache_size: int = 10000,
backoff_factor: int = 1,
**kwargs
):
if chunking_strategy is None:
chunking_strategy = RegexChunking()
super().__init__(
crawler=crawler,
mode=ExecutionMode.SPEED,
callbacks=callbacks,
persistence_path=persistence_path,
word_count_threshold=word_count_threshold,
extraction_strategy=extraction_strategy,
chunking_strategy=chunking_strategy,
bypass_cache=bypass_cache,
css_selector=css_selector,
screenshot=screenshot,
user_agent=user_agent,
verbose=verbose,
**kwargs
)
self.connection_pool_size = connection_pool_size
self.dns_cache_size = dns_cache_size
self.backoff_factor = backoff_factor
self.logger.info(
# "Initialized speed-optimized executor with:"
f"Executor {self.__class__.__name__}: Initialized with:"
f" connection_pool_size={self.connection_pool_size},"
f" dns_cache_size={self.dns_cache_size}"
)
async def process_batch(self, batch_urls: List[str]) -> Dict[str, Any]:
"""Process a batch of URLs concurrently."""
batch_tasks = [self._process_url(url) for url in batch_urls]
# Execute batch with concurrency control
batch_results_list = await asyncio.gather(*batch_tasks, return_exceptions=True)
batch_results = {}
for url, result in zip(batch_urls, batch_results_list):
if isinstance(result, Exception):
batch_results[url] = {"success": False, "error": str(result)}
else:
batch_results[url] = {"success": True, "result": result}
return batch_results
class ResourceOptimizedExecutor(ExecutorStrategy):
"""Executor optimized for resource usage."""
def __init__(
self,
crawler: AsyncWebCrawler,
callbacks: Optional[Dict[CallbackType, Callable]] = None,
persistence_path: Optional[Path] = None,
word_count_threshold=MIN_WORD_THRESHOLD,
extraction_strategy: ExtractionStrategy = None,
chunking_strategy: ChunkingStrategy = None,
bypass_cache: bool = False,
css_selector: str = None,
screenshot: bool = False,
user_agent: str = None,
verbose=True,
connection_pool_size: int = 50,
dns_cache_size: int = 1000,
backoff_factor: int = 5,
max_concurrent_tasks: int = 5,
**kwargs
):
if chunking_strategy is None:
chunking_strategy = RegexChunking()
super().__init__(
crawler=crawler,
mode=ExecutionMode.RESOURCE,
callbacks=callbacks,
persistence_path=persistence_path,
word_count_threshold=word_count_threshold,
extraction_strategy=extraction_strategy,
chunking_strategy=chunking_strategy,
bypass_cache=bypass_cache,
css_selector=css_selector,
screenshot=screenshot,
user_agent=user_agent,
verbose=verbose,
**kwargs
)
self.connection_pool_size = connection_pool_size
self.dns_cache_size = dns_cache_size
self.backoff_factor = backoff_factor
self.max_concurrent_tasks = max_concurrent_tasks
self.logger.info(
# "Initialized resource-optimized executor with:"
f"Executor {self.__class__.__name__}: Initialized with:"
f" connection_pool_size={self.connection_pool_size},"
f" dns_cache_size={self.dns_cache_size},"
f" max_concurrent_tasks={self.max_concurrent_tasks}"
)
async def process_batch(self, batch_urls: List[str]) -> Dict[str, Any]:
"""Process a batch of URLs with resource optimization."""
batch_results = {}
semaphore = asyncio.Semaphore(self.max_concurrent_tasks)
# Wait until resources are available before processing batch
while not await self.resource_monitor.check_resources():
# self.logger.warning("Resource limits reached, waiting...")
self.logger.warning(f"Executor {self.__class__.__name__}: Resource limits reached, waiting...")
await asyncio.sleep(self.backoff_factor)
# Check for cancellation
if await self.control.is_cancelled():
raise asyncio.CancelledError("Execution was cancelled")
async def process_url_with_semaphore(url):
async with semaphore:
# Check for cancellation
if await self.control.is_cancelled():
raise asyncio.CancelledError("Execution was cancelled")
# Wait if paused
await self.control.wait_if_paused(timeout=300)
try:
result = await self._process_url(url)
batch_results[url] = {"success": True, "result": result}
except Exception as e:
batch_results[url] = {"success": False, "error": str(e)}
finally:
# Update system metrics after each URL
# INFO: Uncomment this line if you want to capture system metrics after each URL, but it causes a performance hit
# self.metrics.system_metrics.append(SystemMetrics.capture())
# Controlled delay between URLs
await asyncio.sleep(0.1) # Small delay for resource management
tasks = [process_url_with_semaphore(url) for url in batch_urls]
await asyncio.gather(*tasks)
return batch_results
async def main():
# Sample callback functions
async def pre_execution_callback(url: str, context: Dict[str, Any]):
print(f"Pre-execution callback: About to process URL {url}")
async def post_execution_callback(url: str, result: Any, context: Dict[str, Any]):
print(f"Post-execution callback: Successfully processed URL {url}")
async def on_error_callback(url: str, error: Exception, context: Dict[str, Any]):
print(f"Error callback: Error processing URL {url}: {error}")
async def on_retry_callback(url: str, attempt: int, context: Dict[str, Any]):
print(f"Retry callback: Retrying URL {url}, attempt {attempt}")
async def on_batch_start_callback(urls: List[str], context: Dict[str, Any]):
print(f"Batch start callback: Starting batch with {len(urls)} URLs")
async def on_batch_end_callback(urls: List[str], context: Dict[str, Any]):
print(f"Batch end callback: Completed batch with {len(urls)} URLs")
async def on_complete_callback(results: Dict[str, Any], context: Dict[str, Any]):
print(f"Complete callback: Execution completed with {len(results)} results")
# Sample URLs to crawl
urls = [
"https://www.example.com",
"https://www.python.org",
"https://www.asyncio.org",
# Add more URLs as needed
]
# Instantiate the crawler
crawler = AsyncWebCrawler()
# Set up callbacks
callbacks = {
CallbackType.PRE_EXECUTION: pre_execution_callback,
CallbackType.POST_EXECUTION: post_execution_callback,
CallbackType.ON_ERROR: on_error_callback,
CallbackType.ON_RETRY: on_retry_callback,
CallbackType.ON_BATCH_START: on_batch_start_callback,
CallbackType.ON_BATCH_END: on_batch_end_callback,
CallbackType.ON_COMPLETE: on_complete_callback,
}
# Instantiate the executors
speed_executor = SpeedOptimizedExecutor(
crawler=crawler,
callbacks=callbacks,
max_retries=2, # Example additional config
)
resource_executor = ResourceOptimizedExecutor(
crawler=crawler,
callbacks=callbacks,
max_concurrent_tasks=3, # Limit concurrency
max_retries=2, # Example additional config
)
# Choose which executor to use
executor = speed_executor # Or resource_executor
# Start the execution in a background task
execution_task = asyncio.create_task(executor.execute(urls))
# Simulate control operations
await asyncio.sleep(2) # Let it run for a bit
print("Pausing execution...")
await executor.control.pause()
await asyncio.sleep(2) # Wait while paused
print("Resuming execution...")
await executor.control.resume()
# Wait for execution to complete
results = await execution_task
# Print the results
print("Execution results:")
for url, result in results.items():
print(f"{url}: {result}")
# Get and print final metrics
final_status = await executor.get_status()
print("Final executor status and metrics:")
print(final_status)
# Run the main function
if __name__ == "__main__":
asyncio.run(main())
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,374 +0,0 @@
from abc import ABC, abstractmethod
from enum import Enum
from typing import Optional, Dict, Any, List
import os
from datetime import datetime
from urllib.parse import unquote
from rich.console import Console
from rich.text import Text
from .utils import create_box_message
class LogLevel(Enum):
DEFAULT = 0
DEBUG = 1
INFO = 2
SUCCESS = 3
WARNING = 4
ERROR = 5
CRITICAL = 6
ALERT = 7
NOTICE = 8
EXCEPTION = 9
FATAL = 10
def __str__(self):
return self.name.lower()
class LogColor(str, Enum):
"""Enum for log colors."""
DEBUG = "bright_black"
INFO = "cyan"
SUCCESS = "green"
WARNING = "yellow"
ERROR = "red"
CYAN = "cyan"
GREEN = "green"
YELLOW = "yellow"
MAGENTA = "magenta"
DIM_MAGENTA = "dim magenta"
RED = "red"
def __str__(self):
"""Automatically convert rich color to string."""
return self.value
class AsyncLoggerBase(ABC):
@abstractmethod
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
pass
@abstractmethod
def info(self, message: str, tag: str = "INFO", **kwargs):
pass
@abstractmethod
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
pass
@abstractmethod
def warning(self, message: str, tag: str = "WARNING", **kwargs):
pass
@abstractmethod
def error(self, message: str, tag: str = "ERROR", **kwargs):
pass
@abstractmethod
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
pass
@abstractmethod
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
pass
class AsyncLogger(AsyncLoggerBase):
"""
Asynchronous logger with support for colored console output and file logging.
Supports templated messages with colored components.
"""
DEFAULT_ICONS = {
"INIT": "",
"READY": "",
"FETCH": "",
"SCRAPE": "",
"EXTRACT": "",
"COMPLETE": "",
"ERROR": "×",
"DEBUG": "",
"INFO": "",
"WARNING": "",
"SUCCESS": "",
"CRITICAL": "",
"ALERT": "",
"NOTICE": "",
"EXCEPTION": "",
"FATAL": "",
"DEFAULT": "",
}
DEFAULT_COLORS = {
LogLevel.DEBUG: LogColor.DEBUG,
LogLevel.INFO: LogColor.INFO,
LogLevel.SUCCESS: LogColor.SUCCESS,
LogLevel.WARNING: LogColor.WARNING,
LogLevel.ERROR: LogColor.ERROR,
}
def __init__(
self,
log_file: Optional[str] = None,
log_level: LogLevel = LogLevel.DEBUG,
tag_width: int = 10,
icons: Optional[Dict[str, str]] = None,
colors: Optional[Dict[LogLevel, LogColor]] = None,
verbose: bool = True,
):
"""
Initialize the logger.
Args:
log_file: Optional file path for logging
log_level: Minimum log level to display
tag_width: Width for tag formatting
icons: Custom icons for different tags
colors: Custom colors for different log levels
verbose: Whether to output to console
"""
self.log_file = log_file
self.log_level = log_level
self.tag_width = tag_width
self.icons = icons or self.DEFAULT_ICONS
self.colors = colors or self.DEFAULT_COLORS
self.verbose = verbose
self.console = Console()
# Create log file directory if needed
if log_file:
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
def _format_tag(self, tag: str) -> str:
"""Format a tag with consistent width."""
return f"[{tag}]".ljust(self.tag_width, ".")
def _get_icon(self, tag: str) -> str:
"""Get the icon for a tag, defaulting to info icon if not found."""
return self.icons.get(tag, self.icons["INFO"])
def _shorten(self, text, length, placeholder="..."):
"""Truncate text in the middle if longer than length, or pad if shorter."""
if len(text) <= length:
return text.ljust(length) # Pad with spaces to reach desired length
half = (length - len(placeholder)) // 2
shortened = text[:half] + placeholder + text[-half:]
return shortened.ljust(length) # Also pad shortened text to consistent length
def _write_to_file(self, message: str):
"""Write a message to the log file if configured."""
if self.log_file:
text = Text.from_markup(message)
plain_text = text.plain
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
with open(self.log_file, "a", encoding="utf-8") as f:
f.write(f"[{timestamp}] {plain_text}\n")
def _log(
self,
level: LogLevel,
message: str,
tag: str,
params: Optional[Dict[str, Any]] = None,
colors: Optional[Dict[str, LogColor]] = None,
boxes: Optional[List[str]] = None,
base_color: Optional[LogColor] = None,
**kwargs,
):
"""
Core logging method that handles message formatting and output.
Args:
level: Log level for this message
message: Message template string
tag: Tag for the message
params: Parameters to format into the message
colors: Color overrides for specific parameters
boxes: Box overrides for specific parameters
base_color: Base color for the entire message
"""
if level.value < self.log_level.value:
return
# avoid conflict with rich formatting
parsed_message = message.replace("[", "[[").replace("]", "]]")
if params:
# FIXME: If there are formatting strings in floating point format,
# this may result in colors and boxes not being applied properly.
# such as {value:.2f}, the value is 0.23333 format it to 0.23,
# but we replace("0.23333", "[color]0.23333[/color]")
formatted_message = parsed_message.format(**params)
for key, value in params.items():
# value_str may discard `[` and `]`, so we need to replace it.
value_str = str(value).replace("[", "[[").replace("]", "]]")
# check is need apply color
if colors and key in colors:
color_str = f"[{colors[key]}]{value_str}[/{colors[key]}]"
formatted_message = formatted_message.replace(value_str, color_str)
value_str = color_str
# check is need apply box
if boxes and key in boxes:
formatted_message = formatted_message.replace(value_str,
create_box_message(value_str, type=str(level)))
else:
formatted_message = parsed_message
# Construct the full log line
color: LogColor = base_color or self.colors[level]
log_line = f"[{color}]{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message} [/{color}]"
# Output to console if verbose
if self.verbose or kwargs.get("force_verbose", False):
self.console.print(log_line)
# Write to file if configured
self._write_to_file(log_line)
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
"""Log a debug message."""
self._log(LogLevel.DEBUG, message, tag, **kwargs)
def info(self, message: str, tag: str = "INFO", **kwargs):
"""Log an info message."""
self._log(LogLevel.INFO, message, tag, **kwargs)
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
"""Log a success message."""
self._log(LogLevel.SUCCESS, message, tag, **kwargs)
def warning(self, message: str, tag: str = "WARNING", **kwargs):
"""Log a warning message."""
self._log(LogLevel.WARNING, message, tag, **kwargs)
def critical(self, message: str, tag: str = "CRITICAL", **kwargs):
"""Log a critical message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def exception(self, message: str, tag: str = "EXCEPTION", **kwargs):
"""Log an exception message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def fatal(self, message: str, tag: str = "FATAL", **kwargs):
"""Log a fatal message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def alert(self, message: str, tag: str = "ALERT", **kwargs):
"""Log an alert message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def notice(self, message: str, tag: str = "NOTICE", **kwargs):
"""Log a notice message."""
self._log(LogLevel.INFO, message, tag, **kwargs)
def error(self, message: str, tag: str = "ERROR", **kwargs):
"""Log an error message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def url_status(
self,
url: str,
success: bool,
timing: float,
tag: str = "FETCH",
url_length: int = 100,
):
"""
Convenience method for logging URL fetch status.
Args:
url: The URL being processed
success: Whether the operation was successful
timing: Time taken for the operation
tag: Tag for the message
url_length: Maximum length for URL in log
"""
decoded_url = unquote(url)
readable_url = self._shorten(decoded_url, url_length)
self._log(
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
message="{url} | {status} | ⏱: {timing:.2f}s",
tag=tag,
params={
"url": readable_url,
"status": "" if success else "",
"timing": timing,
},
colors={
"status": LogColor.SUCCESS if success else LogColor.ERROR,
"timing": LogColor.WARNING,
},
)
def error_status(
self, url: str, error: str, tag: str = "ERROR", url_length: int = 50
):
"""
Convenience method for logging error status.
Args:
url: The URL being processed
error: Error message
tag: Tag for the message
url_length: Maximum length for URL in log
"""
decoded_url = unquote(url)
readable_url = self._shorten(decoded_url, url_length)
self._log(
level=LogLevel.ERROR,
message="{url} | Error: {error}",
tag=tag,
params={"url": readable_url, "error": error},
)
class AsyncFileLogger(AsyncLoggerBase):
"""
File-only asynchronous logger that writes logs to a specified file.
"""
def __init__(self, log_file: str):
"""
Initialize the file logger.
Args:
log_file: File path for logging
"""
self.log_file = log_file
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
def _write_to_file(self, level: str, message: str, tag: str):
"""Write a message to the log file."""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
with open(self.log_file, "a", encoding="utf-8") as f:
f.write(f"[{timestamp}] [{level}] [{tag}] {message}\n")
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
"""Log a debug message to file."""
self._write_to_file("DEBUG", message, tag)
def info(self, message: str, tag: str = "INFO", **kwargs):
"""Log an info message to file."""
self._write_to_file("INFO", message, tag)
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
"""Log a success message to file."""
self._write_to_file("SUCCESS", message, tag)
def warning(self, message: str, tag: str = "WARNING", **kwargs):
"""Log a warning message to file."""
self._write_to_file("WARNING", message, tag)
def error(self, message: str, tag: str = "ERROR", **kwargs):
"""Log an error message to file."""
self._write_to_file("ERROR", message, tag)
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
"""Log URL fetch status to file."""
status = "SUCCESS" if success else "FAILED"
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
self._write_to_file("URL_STATUS", message, tag)
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
"""Log error status to file."""
message = f"{url[:url_length]}... | Error: {error}"
self._write_to_file("ERROR", message, tag)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,421 +0,0 @@
# browser_adapter.py
"""
Browser adapter for Crawl4AI to support both Playwright and undetected browsers
with minimal changes to existing codebase.
"""
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, Callable
import time
import json
# Import both, but use conditionally
try:
from playwright.async_api import Page
except ImportError:
Page = Any
try:
from patchright.async_api import Page as UndetectedPage
except ImportError:
UndetectedPage = Any
class BrowserAdapter(ABC):
"""Abstract adapter for browser-specific operations"""
@abstractmethod
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
"""Execute JavaScript in the page"""
pass
@abstractmethod
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup console message capturing, returns handler function if needed"""
pass
@abstractmethod
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup error capturing, returns handler function if needed"""
pass
@abstractmethod
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
"""Retrieve captured console messages (for undetected browsers)"""
pass
@abstractmethod
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
"""Clean up console event listeners"""
pass
@abstractmethod
def get_imports(self) -> tuple:
"""Get the appropriate imports for this adapter"""
pass
class PlaywrightAdapter(BrowserAdapter):
"""Adapter for standard Playwright"""
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
"""Standard Playwright evaluate"""
if arg is not None:
return await page.evaluate(expression, arg)
return await page.evaluate(expression)
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup console capture using Playwright's event system"""
def handle_console_capture(msg):
try:
message_type = "unknown"
try:
message_type = msg.type
except:
pass
message_text = "unknown"
try:
message_text = msg.text
except:
pass
entry = {
"type": message_type,
"text": message_text,
"timestamp": time.time()
}
captured_console.append(entry)
except Exception as e:
captured_console.append({
"type": "console_capture_error",
"error": str(e),
"timestamp": time.time()
})
page.on("console", handle_console_capture)
return handle_console_capture
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup error capture using Playwright's event system"""
def handle_pageerror_capture(err):
try:
error_message = "Unknown error"
try:
error_message = err.message
except:
pass
error_stack = ""
try:
error_stack = err.stack
except:
pass
captured_console.append({
"type": "error",
"text": error_message,
"stack": error_stack,
"timestamp": time.time()
})
except Exception as e:
captured_console.append({
"type": "pageerror_capture_error",
"error": str(e),
"timestamp": time.time()
})
page.on("pageerror", handle_pageerror_capture)
return handle_pageerror_capture
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
"""Not needed for Playwright - messages are captured via events"""
return []
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
"""Remove event listeners"""
if handle_console:
page.remove_listener("console", handle_console)
if handle_error:
page.remove_listener("pageerror", handle_error)
def get_imports(self) -> tuple:
"""Return Playwright imports"""
from playwright.async_api import Page, Error
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
return Page, Error, PlaywrightTimeoutError
class StealthAdapter(BrowserAdapter):
"""Adapter for Playwright with stealth features using playwright_stealth"""
def __init__(self):
self._console_script_injected = {}
self._stealth_available = self._check_stealth_availability()
def _check_stealth_availability(self) -> bool:
"""Check if playwright_stealth is available and get the correct function"""
try:
from playwright_stealth import stealth_async
self._stealth_function = stealth_async
return True
except ImportError:
try:
from playwright_stealth import stealth_sync
self._stealth_function = stealth_sync
return True
except ImportError:
self._stealth_function = None
return False
async def apply_stealth(self, page: Page):
"""Apply stealth to a page if available"""
if self._stealth_available and self._stealth_function:
try:
if hasattr(self._stealth_function, '__call__'):
if 'async' in getattr(self._stealth_function, '__name__', ''):
await self._stealth_function(page)
else:
self._stealth_function(page)
except Exception as e:
# Fail silently or log error depending on requirements
pass
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
"""Standard Playwright evaluate with stealth applied"""
if arg is not None:
return await page.evaluate(expression, arg)
return await page.evaluate(expression)
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup console capture using Playwright's event system with stealth"""
# Apply stealth to the page first
await self.apply_stealth(page)
def handle_console_capture(msg):
try:
message_type = "unknown"
try:
message_type = msg.type
except:
pass
message_text = "unknown"
try:
message_text = msg.text
except:
pass
entry = {
"type": message_type,
"text": message_text,
"timestamp": time.time()
}
captured_console.append(entry)
except Exception as e:
captured_console.append({
"type": "console_capture_error",
"error": str(e),
"timestamp": time.time()
})
page.on("console", handle_console_capture)
return handle_console_capture
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup error capture using Playwright's event system"""
def handle_pageerror_capture(err):
try:
error_message = "Unknown error"
try:
error_message = err.message
except:
pass
error_stack = ""
try:
error_stack = err.stack
except:
pass
captured_console.append({
"type": "error",
"text": error_message,
"stack": error_stack,
"timestamp": time.time()
})
except Exception as e:
captured_console.append({
"type": "pageerror_capture_error",
"error": str(e),
"timestamp": time.time()
})
page.on("pageerror", handle_pageerror_capture)
return handle_pageerror_capture
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
"""Not needed for Playwright - messages are captured via events"""
return []
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
"""Remove event listeners"""
if handle_console:
page.remove_listener("console", handle_console)
if handle_error:
page.remove_listener("pageerror", handle_error)
def get_imports(self) -> tuple:
"""Return Playwright imports"""
from playwright.async_api import Page, Error
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
return Page, Error, PlaywrightTimeoutError
class UndetectedAdapter(BrowserAdapter):
"""Adapter for undetected browser automation with stealth features"""
def __init__(self):
self._console_script_injected = {}
async def evaluate(self, page: UndetectedPage, expression: str, arg: Any = None) -> Any:
"""Undetected browser evaluate with isolated context"""
# For most evaluations, use isolated context for stealth
# Only use non-isolated when we need to access our injected console capture
isolated = not (
"__console" in expression or
"__captured" in expression or
"__error" in expression or
"window.__" in expression
)
if arg is not None:
return await page.evaluate(expression, arg, isolated_context=isolated)
return await page.evaluate(expression, isolated_context=isolated)
async def setup_console_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup console capture using JavaScript injection for undetected browsers"""
if not self._console_script_injected.get(page, False):
await page.add_init_script("""
// Initialize console capture
window.__capturedConsole = [];
window.__capturedErrors = [];
// Store original console methods
const originalConsole = {};
['log', 'info', 'warn', 'error', 'debug'].forEach(method => {
originalConsole[method] = console[method];
console[method] = function(...args) {
try {
window.__capturedConsole.push({
type: method,
text: args.map(arg => {
try {
if (typeof arg === 'object') {
return JSON.stringify(arg);
}
return String(arg);
} catch (e) {
return '[Object]';
}
}).join(' '),
timestamp: Date.now()
});
} catch (e) {
// Fail silently to avoid detection
}
// Call original method
originalConsole[method].apply(console, args);
};
});
""")
self._console_script_injected[page] = True
return None # No handler function needed for undetected browser
async def setup_error_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup error capture using JavaScript injection for undetected browsers"""
if not self._console_script_injected.get(page, False):
await page.add_init_script("""
// Capture errors
window.addEventListener('error', (event) => {
try {
window.__capturedErrors.push({
type: 'error',
text: event.message,
stack: event.error ? event.error.stack : '',
filename: event.filename,
lineno: event.lineno,
colno: event.colno,
timestamp: Date.now()
});
} catch (e) {
// Fail silently
}
});
// Capture unhandled promise rejections
window.addEventListener('unhandledrejection', (event) => {
try {
window.__capturedErrors.push({
type: 'unhandledrejection',
text: event.reason ? String(event.reason) : 'Unhandled Promise Rejection',
stack: event.reason && event.reason.stack ? event.reason.stack : '',
timestamp: Date.now()
});
} catch (e) {
// Fail silently
}
});
""")
self._console_script_injected[page] = True
return None # No handler function needed for undetected browser
async def retrieve_console_messages(self, page: UndetectedPage) -> List[Dict]:
"""Retrieve captured console messages and errors from the page"""
messages = []
try:
# Get console messages
console_messages = await page.evaluate(
"() => { const msgs = window.__capturedConsole || []; window.__capturedConsole = []; return msgs; }",
isolated_context=False
)
messages.extend(console_messages)
# Get errors
errors = await page.evaluate(
"() => { const errs = window.__capturedErrors || []; window.__capturedErrors = []; return errs; }",
isolated_context=False
)
messages.extend(errors)
# Convert timestamps from JS to Python format
for msg in messages:
if 'timestamp' in msg and isinstance(msg['timestamp'], (int, float)):
msg['timestamp'] = msg['timestamp'] / 1000.0 # Convert from ms to seconds
except Exception:
# If retrieval fails, return empty list
pass
return messages
async def cleanup_console_capture(self, page: UndetectedPage, handle_console: Optional[Callable], handle_error: Optional[Callable]):
"""Clean up for undetected browser - retrieve final messages"""
# For undetected browser, we don't have event listeners to remove
# but we should retrieve any final messages
final_messages = await self.retrieve_console_messages(page)
return final_messages
def get_imports(self) -> tuple:
"""Return undetected browser imports"""
from patchright.async_api import Page, Error
from patchright.async_api import TimeoutError as PlaywrightTimeoutError
return Page, Error, PlaywrightTimeoutError

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,117 +0,0 @@
from enum import Enum
class CacheMode(Enum):
"""
Defines the caching behavior for web crawling operations.
Modes:
- ENABLED: Normal caching behavior (read and write)
- DISABLED: No caching at all
- READ_ONLY: Only read from cache, don't write
- WRITE_ONLY: Only write to cache, don't read
- BYPASS: Bypass cache for this operation
"""
ENABLED = "enabled"
DISABLED = "disabled"
READ_ONLY = "read_only"
WRITE_ONLY = "write_only"
BYPASS = "bypass"
class CacheContext:
"""
Encapsulates cache-related decisions and URL handling.
This class centralizes all cache-related logic and URL type checking,
making the caching behavior more predictable and maintainable.
Attributes:
url (str): The URL being processed.
cache_mode (CacheMode): The cache mode for the current operation.
always_bypass (bool): If True, bypasses caching for this operation.
is_cacheable (bool): True if the URL is cacheable, False otherwise.
is_web_url (bool): True if the URL is a web URL, False otherwise.
is_local_file (bool): True if the URL is a local file, False otherwise.
is_raw_html (bool): True if the URL is raw HTML, False otherwise.
_url_display (str): The display name for the URL (web, local file, or raw HTML).
"""
def __init__(self, url: str, cache_mode: CacheMode, always_bypass: bool = False):
"""
Initializes the CacheContext with the provided URL and cache mode.
Args:
url (str): The URL being processed.
cache_mode (CacheMode): The cache mode for the current operation.
always_bypass (bool): If True, bypasses caching for this operation.
"""
self.url = url
self.cache_mode = cache_mode
self.always_bypass = always_bypass
self.is_cacheable = url.startswith(("http://", "https://", "file://"))
self.is_web_url = url.startswith(("http://", "https://"))
self.is_local_file = url.startswith("file://")
self.is_raw_html = url.startswith("raw:")
self._url_display = url if not self.is_raw_html else "Raw HTML"
def should_read(self) -> bool:
"""
Determines if cache should be read based on context.
How it works:
1. If always_bypass is True or is_cacheable is False, return False.
2. If cache_mode is ENABLED or READ_ONLY, return True.
Returns:
bool: True if cache should be read, False otherwise.
"""
if self.always_bypass or not self.is_cacheable:
return False
return self.cache_mode in [CacheMode.ENABLED, CacheMode.READ_ONLY]
def should_write(self) -> bool:
"""
Determines if cache should be written based on context.
How it works:
1. If always_bypass is True or is_cacheable is False, return False.
2. If cache_mode is ENABLED or WRITE_ONLY, return True.
Returns:
bool: True if cache should be written, False otherwise.
"""
if self.always_bypass or not self.is_cacheable:
return False
return self.cache_mode in [CacheMode.ENABLED, CacheMode.WRITE_ONLY]
@property
def display_url(self) -> str:
"""Returns the URL in display format."""
return self._url_display
def _legacy_to_cache_mode(
disable_cache: bool = False,
bypass_cache: bool = False,
no_cache_read: bool = False,
no_cache_write: bool = False,
) -> CacheMode:
"""
Converts legacy cache parameters to the new CacheMode enum.
This is an internal function to help transition from the old boolean flags
to the new CacheMode system.
"""
if disable_cache:
return CacheMode.DISABLED
if bypass_cache:
return CacheMode.BYPASS
if no_cache_read and no_cache_write:
return CacheMode.DISABLED
if no_cache_read:
return CacheMode.WRITE_ONLY
if no_cache_write:
return CacheMode.READ_ONLY
return CacheMode.ENABLED

View File

@@ -3,52 +3,23 @@ import re
from collections import Counter
import string
from .model_loader import load_nltk_punkt
from .utils import *
# Define the abstract base class for chunking strategies
class ChunkingStrategy(ABC):
"""
Abstract base class for chunking strategies.
"""
@abstractmethod
def chunk(self, text: str) -> list:
"""
Abstract method to chunk the given text.
Args:
text (str): The text to chunk.
Returns:
list: A list of chunks.
"""
pass
# Create an identity chunking strategy f(x) = [x]
class IdentityChunking(ChunkingStrategy):
"""
Chunking strategy that returns the input text as a single chunk.
"""
def chunk(self, text: str) -> list:
return [text]
# Regex-based chunking
class RegexChunking(ChunkingStrategy):
"""
Chunking strategy that splits text based on regular expression patterns.
"""
def __init__(self, patterns=None, **kwargs):
"""
Initialize the RegexChunking object.
Args:
patterns (list): A list of regular expression patterns to split text.
"""
if patterns is None:
patterns = [r"\n\n"] # Default split pattern
patterns = [r'\n\n'] # Default split pattern
self.patterns = patterns
def chunk(self, text: str) -> list:
@@ -59,20 +30,12 @@ class RegexChunking(ChunkingStrategy):
new_paragraphs.extend(re.split(pattern, paragraph))
paragraphs = new_paragraphs
return paragraphs
# NLP-based sentence chunking
# NLP-based sentence chunking
class NlpSentenceChunking(ChunkingStrategy):
"""
Chunking strategy that splits text into sentences using NLTK's sentence tokenizer.
"""
def __init__(self, **kwargs):
"""
Initialize the NlpSentenceChunking object.
"""
from crawl4ai.le.legacy.model_loader import load_nltk_punkt
load_nltk_punkt()
pass
def chunk(self, text: str) -> list:
# Improved regex for sentence splitting
@@ -80,34 +43,18 @@ class NlpSentenceChunking(ChunkingStrategy):
# r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z][A-Z]\.)(?<![A-Za-z]\.)(?<=\.|\?|\!|\n)\s'
# )
# sentences = sentence_endings.split(text)
# sens = [sent.strip() for sent in sentences if sent]
# sens = [sent.strip() for sent in sentences if sent]
from nltk.tokenize import sent_tokenize
sentences = sent_tokenize(text)
sens = [sent.strip() for sent in sentences]
sens = [sent.strip() for sent in sentences]
return list(set(sens))
# Topic-based segmentation using TextTiling
class TopicSegmentationChunking(ChunkingStrategy):
"""
Chunking strategy that segments text into topics using NLTK's TextTilingTokenizer.
How it works:
1. Segment the text into topics using TextTilingTokenizer
2. Extract keywords for each topic segment
"""
def __init__(self, num_keywords=3, **kwargs):
"""
Initialize the TopicSegmentationChunking object.
Args:
num_keywords (int): The number of keywords to extract for each topic segment.
"""
import nltk as nl
self.tokenizer = nl.tokenize.TextTilingTokenizer()
self.num_keywords = num_keywords
@@ -119,14 +66,8 @@ class TopicSegmentationChunking(ChunkingStrategy):
def extract_keywords(self, text: str) -> list:
# Tokenize and remove stopwords and punctuation
import nltk as nl
tokens = nl.toknize.word_tokenize(text)
tokens = [
token.lower()
for token in tokens
if token not in nl.corpus.stopwords.words("english")
and token not in string.punctuation
]
tokens = [token.lower() for token in tokens if token not in nl.corpus.stopwords.words('english') and token not in string.punctuation]
# Calculate frequency distribution
freq_dist = Counter(tokens)
@@ -137,27 +78,15 @@ class TopicSegmentationChunking(ChunkingStrategy):
# Segment the text into topics
segments = self.chunk(text)
# Extract keywords for each topic segment
segments_with_topics = [
(segment, self.extract_keywords(segment)) for segment in segments
]
segments_with_topics = [(segment, self.extract_keywords(segment)) for segment in segments]
return segments_with_topics
# Fixed-length word chunks
class FixedLengthWordChunking(ChunkingStrategy):
"""
Chunking strategy that splits text into fixed-length word chunks.
How it works:
1. Split the text into words
2. Create chunks of fixed length
3. Return the list of chunks
"""
def __init__(self, chunk_size=100, **kwargs):
"""
Initialize the fixed-length word chunking strategy with the given chunk size.
Args:
chunk_size (int): The size of each chunk in words.
"""
@@ -165,28 +94,15 @@ class FixedLengthWordChunking(ChunkingStrategy):
def chunk(self, text: str) -> list:
words = text.split()
return [
" ".join(words[i : i + self.chunk_size])
for i in range(0, len(words), self.chunk_size)
]
return [' '.join(words[i:i + self.chunk_size]) for i in range(0, len(words), self.chunk_size)]
# Sliding window chunking
class SlidingWindowChunking(ChunkingStrategy):
"""
Chunking strategy that splits text into overlapping word chunks.
How it works:
1. Split the text into words
2. Create chunks of fixed length
3. Return the list of chunks
"""
def __init__(self, window_size=100, step=50, **kwargs):
"""
Initialize the sliding window chunking strategy with the given window size and
step size.
Args:
window_size (int): The size of the sliding window in words.
step (int): The step size for sliding the window in words.
@@ -197,37 +113,27 @@ class SlidingWindowChunking(ChunkingStrategy):
def chunk(self, text: str) -> list:
words = text.split()
chunks = []
if len(words) <= self.window_size:
return [text]
for i in range(0, len(words) - self.window_size + 1, self.step):
chunk = " ".join(words[i : i + self.window_size])
chunk = ' '.join(words[i:i + self.window_size])
chunks.append(chunk)
# Handle the last chunk if it doesn't align perfectly
if i + self.window_size < len(words):
chunks.append(" ".join(words[-self.window_size :]))
chunks.append(' '.join(words[-self.window_size:]))
return chunks
class OverlappingWindowChunking(ChunkingStrategy):
"""
Chunking strategy that splits text into overlapping word chunks.
How it works:
1. Split the text into words using whitespace
2. Create chunks of fixed length equal to the window size
3. Slide the window by the overlap size
4. Return the list of chunks
"""
def __init__(self, window_size=1000, overlap=100, **kwargs):
"""
Initialize the overlapping window chunking strategy with the given window size and
overlap size.
Args:
window_size (int): The size of the window in words.
overlap (int): The size of the overlap between consecutive chunks in words.
@@ -238,19 +144,19 @@ class OverlappingWindowChunking(ChunkingStrategy):
def chunk(self, text: str) -> list:
words = text.split()
chunks = []
if len(words) <= self.window_size:
return [text]
start = 0
while start < len(words):
end = start + self.window_size
chunk = " ".join(words[start:end])
chunk = ' '.join(words[start:end])
chunks.append(chunk)
if end >= len(words):
break
start = end - self.overlap
return chunks
return chunks

File diff suppressed because it is too large Load Diff

View File

@@ -1,837 +0,0 @@
import time
import uuid
import threading
import psutil
from datetime import datetime, timedelta
from typing import Dict, Optional, List
import threading
from rich.console import Console
from rich.layout import Layout
from rich.panel import Panel
from rich.table import Table
from rich.text import Text
from rich.live import Live
from rich import box
from ..models import CrawlStatus
class TerminalUI:
"""Terminal user interface for CrawlerMonitor using rich library."""
def __init__(self, refresh_rate: float = 1.0, max_width: int = 120):
"""
Initialize the terminal UI.
Args:
refresh_rate: How often to refresh the UI (in seconds)
max_width: Maximum width of the UI in characters
"""
self.console = Console(width=max_width)
self.layout = Layout()
self.refresh_rate = refresh_rate
self.stop_event = threading.Event()
self.ui_thread = None
self.monitor = None # Will be set by CrawlerMonitor
self.max_width = max_width
# Setup layout - vertical layout (top to bottom)
self.layout.split(
Layout(name="header", size=3),
Layout(name="pipeline_status", size=10),
Layout(name="task_details", ratio=1),
Layout(name="footer", size=3) # Increased footer size to fit all content
)
def start(self, monitor):
"""Start the UI thread."""
self.monitor = monitor
self.stop_event.clear()
self.ui_thread = threading.Thread(target=self._ui_loop)
self.ui_thread.daemon = True
self.ui_thread.start()
def stop(self):
"""Stop the UI thread."""
if self.ui_thread and self.ui_thread.is_alive():
self.stop_event.set()
# Only try to join if we're not in the UI thread
# This prevents "cannot join current thread" errors
if threading.current_thread() != self.ui_thread:
self.ui_thread.join(timeout=5.0)
def _ui_loop(self):
"""Main UI rendering loop."""
import sys
import select
import termios
import tty
# Setup terminal for non-blocking input
old_settings = termios.tcgetattr(sys.stdin)
try:
tty.setcbreak(sys.stdin.fileno())
# Use Live display to render the UI
with Live(self.layout, refresh_per_second=1/self.refresh_rate, screen=True) as live:
self.live = live # Store the live display for updates
# Main UI loop
while not self.stop_event.is_set():
self._update_display()
# Check for key press (non-blocking)
if select.select([sys.stdin], [], [], 0)[0]:
key = sys.stdin.read(1)
# Check for 'q' to quit
if key == 'q':
# Signal stop but don't call monitor.stop() from UI thread
# as it would cause the thread to try to join itself
self.stop_event.set()
self.monitor.is_running = False
break
time.sleep(self.refresh_rate)
# Just check if the monitor was stopped
if not self.monitor.is_running:
break
finally:
# Restore terminal settings
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
def _update_display(self):
"""Update the terminal display with current statistics."""
if not self.monitor:
return
# Update crawler status panel
self.layout["header"].update(self._create_status_panel())
# Update pipeline status panel and task details panel
self.layout["pipeline_status"].update(self._create_pipeline_panel())
self.layout["task_details"].update(self._create_task_details_panel())
# Update footer
self.layout["footer"].update(self._create_footer())
def _create_status_panel(self) -> Panel:
"""Create the crawler status panel."""
summary = self.monitor.get_summary()
# Format memory status with icon
memory_status = self.monitor.get_memory_status()
memory_icon = "🟢" # Default NORMAL
if memory_status == "PRESSURE":
memory_icon = "🟠"
elif memory_status == "CRITICAL":
memory_icon = "🔴"
# Get current memory usage
current_memory = psutil.Process().memory_info().rss / (1024 * 1024) # MB
memory_percent = (current_memory / psutil.virtual_memory().total) * 100
# Format runtime
runtime = self.monitor._format_time(time.time() - self.monitor.start_time if self.monitor.start_time else 0)
# Create the status text
status_text = Text()
status_text.append(f"Web Crawler Dashboard | Runtime: {runtime} | Memory: {memory_percent:.1f}% {memory_icon}\n")
status_text.append(f"Status: {memory_status} | URLs: {summary['urls_completed']}/{summary['urls_total']} | ")
status_text.append(f"Peak Mem: {summary['peak_memory_percent']:.1f}% at {self.monitor._format_time(summary['peak_memory_time'])}")
return Panel(status_text, title="Crawler Status", border_style="blue")
def _create_pipeline_panel(self) -> Panel:
"""Create the pipeline status panel."""
summary = self.monitor.get_summary()
queue_stats = self.monitor.get_queue_stats()
# Create a table for status counts
table = Table(show_header=True, box=None)
table.add_column("Status", style="cyan")
table.add_column("Count", justify="right")
table.add_column("Percentage", justify="right")
table.add_column("Stat", style="cyan")
table.add_column("Value", justify="right")
# Calculate overall progress
progress = f"{summary['urls_completed']}/{summary['urls_total']}"
progress_percent = f"{summary['completion_percentage']:.1f}%"
# Add rows for each status
table.add_row(
"Overall Progress",
progress,
progress_percent,
"Est. Completion",
summary.get('estimated_completion_time', "N/A")
)
# Add rows for each status
status_counts = summary['status_counts']
total = summary['urls_total'] or 1 # Avoid division by zero
# Status rows
table.add_row(
"Completed",
str(status_counts.get(CrawlStatus.COMPLETED.name, 0)),
f"{status_counts.get(CrawlStatus.COMPLETED.name, 0) / total * 100:.1f}%",
"Avg. Time/URL",
f"{summary.get('avg_task_duration', 0):.2f}s"
)
table.add_row(
"Failed",
str(status_counts.get(CrawlStatus.FAILED.name, 0)),
f"{status_counts.get(CrawlStatus.FAILED.name, 0) / total * 100:.1f}%",
"Concurrent Tasks",
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0))
)
table.add_row(
"In Progress",
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)),
f"{status_counts.get(CrawlStatus.IN_PROGRESS.name, 0) / total * 100:.1f}%",
"Queue Size",
str(queue_stats['total_queued'])
)
table.add_row(
"Queued",
str(status_counts.get(CrawlStatus.QUEUED.name, 0)),
f"{status_counts.get(CrawlStatus.QUEUED.name, 0) / total * 100:.1f}%",
"Max Wait Time",
f"{queue_stats['highest_wait_time']:.1f}s"
)
# Requeued is a special case as it's not a status
requeued_count = summary.get('requeued_count', 0)
table.add_row(
"Requeued",
str(requeued_count),
f"{summary.get('requeue_rate', 0):.1f}%",
"Avg Wait Time",
f"{queue_stats['avg_wait_time']:.1f}s"
)
# Add empty row for spacing
table.add_row(
"",
"",
"",
"Requeue Rate",
f"{summary.get('requeue_rate', 0):.1f}%"
)
return Panel(table, title="Pipeline Status", border_style="green")
def _create_task_details_panel(self) -> Panel:
"""Create the task details panel."""
# Create a table for task details
table = Table(show_header=True, expand=True)
table.add_column("Task ID", style="cyan", no_wrap=True, width=10)
table.add_column("URL", style="blue", ratio=3)
table.add_column("Status", style="green", width=15)
table.add_column("Memory", justify="right", width=8)
table.add_column("Peak", justify="right", width=8)
table.add_column("Duration", justify="right", width=10)
# Get all task stats
task_stats = self.monitor.get_all_task_stats()
# Add summary row
active_tasks = sum(1 for stats in task_stats.values()
if stats['status'] == CrawlStatus.IN_PROGRESS.name)
total_memory = sum(stats['memory_usage'] for stats in task_stats.values())
total_peak = sum(stats['peak_memory'] for stats in task_stats.values())
# Summary row with separators
table.add_row(
"SUMMARY",
f"Total: {len(task_stats)}",
f"Active: {active_tasks}",
f"{total_memory:.1f}",
f"{total_peak:.1f}",
"N/A"
)
# Add a separator
table.add_row("" * 10, "" * 20, "" * 10, "" * 8, "" * 8, "" * 10)
# Status icons
status_icons = {
CrawlStatus.QUEUED.name: "",
CrawlStatus.IN_PROGRESS.name: "🔄",
CrawlStatus.COMPLETED.name: "",
CrawlStatus.FAILED.name: ""
}
# Calculate how many rows we can display based on available space
# We can display more rows now that we have a dedicated panel
display_count = min(len(task_stats), 20) # Display up to 20 tasks
# Add rows for each task
for task_id, stats in sorted(
list(task_stats.items())[:display_count],
# Sort: 1. IN_PROGRESS first, 2. QUEUED, 3. COMPLETED/FAILED by recency
key=lambda x: (
0 if x[1]['status'] == CrawlStatus.IN_PROGRESS.name else
1 if x[1]['status'] == CrawlStatus.QUEUED.name else
2,
-1 * (x[1].get('end_time', 0) or 0) # Most recent first
)
):
# Truncate task_id and URL for display
short_id = task_id[:8]
url = stats['url']
if len(url) > 50: # Allow longer URLs in the dedicated panel
url = url[:47] + "..."
# Format status with icon
status = f"{status_icons.get(stats['status'], '?')} {stats['status']}"
# Add row
table.add_row(
short_id,
url,
status,
f"{stats['memory_usage']:.1f}",
f"{stats['peak_memory']:.1f}",
stats['duration'] if 'duration' in stats else "0:00"
)
return Panel(table, title="Task Details", border_style="yellow")
def _create_footer(self) -> Panel:
"""Create the footer panel."""
from rich.columns import Columns
from rich.align import Align
memory_status = self.monitor.get_memory_status()
memory_icon = "🟢" # Default NORMAL
if memory_status == "PRESSURE":
memory_icon = "🟠"
elif memory_status == "CRITICAL":
memory_icon = "🔴"
# Left section - memory status
left_text = Text()
left_text.append("Memory Status: ", style="bold")
status_style = "green" if memory_status == "NORMAL" else "yellow" if memory_status == "PRESSURE" else "red bold"
left_text.append(f"{memory_icon} {memory_status}", style=status_style)
# Center section - copyright
center_text = Text("© Crawl4AI 2025 | Made by UnclecCode", style="cyan italic")
# Right section - quit instruction
right_text = Text()
right_text.append("Press ", style="bold")
right_text.append("q", style="white on blue")
right_text.append(" to quit", style="bold")
# Create columns with the three sections
footer_content = Columns(
[
Align.left(left_text),
Align.center(center_text),
Align.right(right_text)
],
expand=True
)
# Create a more visible footer panel
return Panel(
footer_content,
border_style="white",
padding=(0, 1) # Add padding for better visibility
)
class CrawlerMonitor:
"""
Comprehensive monitoring and visualization system for tracking web crawler operations in real-time.
Provides a terminal-based dashboard that displays task statuses, memory usage, queue statistics,
and performance metrics.
"""
def __init__(
self,
urls_total: int = 0,
refresh_rate: float = 1.0,
enable_ui: bool = True,
max_width: int = 120
):
"""
Initialize the CrawlerMonitor.
Args:
urls_total: Total number of URLs to be crawled
refresh_rate: How often to refresh the UI (in seconds)
enable_ui: Whether to display the terminal UI
max_width: Maximum width of the UI in characters
"""
# Core monitoring attributes
self.stats = {} # Task ID -> stats dict
self.memory_status = "NORMAL"
self.start_time = None
self.end_time = None
self.is_running = False
self.queue_stats = {
"total_queued": 0,
"highest_wait_time": 0.0,
"avg_wait_time": 0.0
}
self.urls_total = urls_total
self.urls_completed = 0
self.peak_memory_percent = 0.0
self.peak_memory_time = 0.0
# Status counts
self.status_counts = {
CrawlStatus.QUEUED.name: 0,
CrawlStatus.IN_PROGRESS.name: 0,
CrawlStatus.COMPLETED.name: 0,
CrawlStatus.FAILED.name: 0
}
# Requeue tracking
self.requeued_count = 0
# Thread-safety
self._lock = threading.RLock()
# Terminal UI
self.enable_ui = enable_ui
self.terminal_ui = TerminalUI(
refresh_rate=refresh_rate,
max_width=max_width
) if enable_ui else None
def start(self):
"""
Start the monitoring session.
- Initializes the start_time
- Sets is_running to True
- Starts the terminal UI if enabled
"""
with self._lock:
self.start_time = time.time()
self.is_running = True
# Start the terminal UI
if self.enable_ui and self.terminal_ui:
self.terminal_ui.start(self)
def stop(self):
"""
Stop the monitoring session.
- Records end_time
- Sets is_running to False
- Stops the terminal UI
- Generates final summary statistics
"""
with self._lock:
self.end_time = time.time()
self.is_running = False
# Stop the terminal UI
if self.enable_ui and self.terminal_ui:
self.terminal_ui.stop()
def add_task(self, task_id: str, url: str):
"""
Register a new task with the monitor.
Args:
task_id: Unique identifier for the task
url: URL being crawled
The task is initialized with:
- status: QUEUED
- url: The URL to crawl
- enqueue_time: Current time
- memory_usage: 0
- peak_memory: 0
- wait_time: 0
- retry_count: 0
"""
with self._lock:
self.stats[task_id] = {
"task_id": task_id,
"url": url,
"status": CrawlStatus.QUEUED.name,
"enqueue_time": time.time(),
"start_time": None,
"end_time": None,
"memory_usage": 0.0,
"peak_memory": 0.0,
"error_message": "",
"wait_time": 0.0,
"retry_count": 0,
"duration": "0:00",
"counted_requeue": False
}
# Update status counts
self.status_counts[CrawlStatus.QUEUED.name] += 1
def update_task(
self,
task_id: str,
status: Optional[CrawlStatus] = None,
start_time: Optional[float] = None,
end_time: Optional[float] = None,
memory_usage: Optional[float] = None,
peak_memory: Optional[float] = None,
error_message: Optional[str] = None,
retry_count: Optional[int] = None,
wait_time: Optional[float] = None
):
"""
Update statistics for a specific task.
Args:
task_id: Unique identifier for the task
status: New status (QUEUED, IN_PROGRESS, COMPLETED, FAILED)
start_time: When task execution started
end_time: When task execution ended
memory_usage: Current memory usage in MB
peak_memory: Maximum memory usage in MB
error_message: Error description if failed
retry_count: Number of retry attempts
wait_time: Time spent in queue
Updates task statistics and updates status counts.
If status changes, decrements old status count and
increments new status count.
"""
with self._lock:
# Check if task exists
if task_id not in self.stats:
return
task_stats = self.stats[task_id]
# Update status counts if status is changing
old_status = task_stats["status"]
if status and status.name != old_status:
self.status_counts[old_status] -= 1
self.status_counts[status.name] += 1
# Track completion
if status == CrawlStatus.COMPLETED:
self.urls_completed += 1
# Track requeues
if old_status in [CrawlStatus.COMPLETED.name, CrawlStatus.FAILED.name] and not task_stats.get("counted_requeue", False):
self.requeued_count += 1
task_stats["counted_requeue"] = True
# Update task statistics
if status:
task_stats["status"] = status.name
if start_time is not None:
task_stats["start_time"] = start_time
if end_time is not None:
task_stats["end_time"] = end_time
if memory_usage is not None:
task_stats["memory_usage"] = memory_usage
# Update peak memory if necessary
current_percent = (memory_usage / psutil.virtual_memory().total) * 100
if current_percent > self.peak_memory_percent:
self.peak_memory_percent = current_percent
self.peak_memory_time = time.time()
if peak_memory is not None:
task_stats["peak_memory"] = peak_memory
if error_message is not None:
task_stats["error_message"] = error_message
if retry_count is not None:
task_stats["retry_count"] = retry_count
if wait_time is not None:
task_stats["wait_time"] = wait_time
# Calculate duration
if task_stats["start_time"]:
end = task_stats["end_time"] or time.time()
duration = end - task_stats["start_time"]
task_stats["duration"] = self._format_time(duration)
def update_memory_status(self, status: str):
"""
Update the current memory status.
Args:
status: Memory status (NORMAL, PRESSURE, CRITICAL, or custom)
Also updates the UI to reflect the new status.
"""
with self._lock:
self.memory_status = status
def update_queue_statistics(
self,
total_queued: int,
highest_wait_time: float,
avg_wait_time: float
):
"""
Update statistics related to the task queue.
Args:
total_queued: Number of tasks currently in queue
highest_wait_time: Longest wait time of any queued task
avg_wait_time: Average wait time across all queued tasks
"""
with self._lock:
self.queue_stats = {
"total_queued": total_queued,
"highest_wait_time": highest_wait_time,
"avg_wait_time": avg_wait_time
}
def get_task_stats(self, task_id: str) -> Dict:
"""
Get statistics for a specific task.
Args:
task_id: Unique identifier for the task
Returns:
Dictionary containing all task statistics
"""
with self._lock:
return self.stats.get(task_id, {}).copy()
def get_all_task_stats(self) -> Dict[str, Dict]:
"""
Get statistics for all tasks.
Returns:
Dictionary mapping task_ids to their statistics
"""
with self._lock:
return self.stats.copy()
def get_memory_status(self) -> str:
"""
Get the current memory status.
Returns:
Current memory status string
"""
with self._lock:
return self.memory_status
def get_queue_stats(self) -> Dict:
"""
Get current queue statistics.
Returns:
Dictionary with queue statistics including:
- total_queued: Number of tasks in queue
- highest_wait_time: Longest wait time
- avg_wait_time: Average wait time
"""
with self._lock:
return self.queue_stats.copy()
def get_summary(self) -> Dict:
"""
Get a summary of all crawler statistics.
Returns:
Dictionary containing:
- runtime: Total runtime in seconds
- urls_total: Total URLs to process
- urls_completed: Number of completed URLs
- completion_percentage: Percentage complete
- status_counts: Count of tasks in each status
- memory_status: Current memory status
- peak_memory_percent: Highest memory usage
- peak_memory_time: When peak memory occurred
- avg_task_duration: Average task processing time
- estimated_completion_time: Projected finish time
- requeue_rate: Percentage of tasks requeued
"""
with self._lock:
# Calculate runtime
current_time = time.time()
runtime = current_time - (self.start_time or current_time)
# Calculate completion percentage
completion_percentage = 0
if self.urls_total > 0:
completion_percentage = (self.urls_completed / self.urls_total) * 100
# Calculate average task duration for completed tasks
completed_tasks = [
task for task in self.stats.values()
if task["status"] == CrawlStatus.COMPLETED.name and task.get("start_time") and task.get("end_time")
]
avg_task_duration = 0
if completed_tasks:
total_duration = sum(task["end_time"] - task["start_time"] for task in completed_tasks)
avg_task_duration = total_duration / len(completed_tasks)
# Calculate requeue rate
requeue_rate = 0
if len(self.stats) > 0:
requeue_rate = (self.requeued_count / len(self.stats)) * 100
# Calculate estimated completion time
estimated_completion_time = "N/A"
if avg_task_duration > 0 and self.urls_total > 0 and self.urls_completed > 0:
remaining_tasks = self.urls_total - self.urls_completed
estimated_seconds = remaining_tasks * avg_task_duration
estimated_completion_time = self._format_time(estimated_seconds)
return {
"runtime": runtime,
"urls_total": self.urls_total,
"urls_completed": self.urls_completed,
"completion_percentage": completion_percentage,
"status_counts": self.status_counts.copy(),
"memory_status": self.memory_status,
"peak_memory_percent": self.peak_memory_percent,
"peak_memory_time": self.peak_memory_time,
"avg_task_duration": avg_task_duration,
"estimated_completion_time": estimated_completion_time,
"requeue_rate": requeue_rate,
"requeued_count": self.requeued_count
}
def render(self):
"""
Render the terminal UI.
This is the main UI rendering loop that:
1. Updates all statistics
2. Formats the display
3. Renders the ASCII interface
4. Handles keyboard input
Note: The actual rendering is handled by the TerminalUI class
which uses the rich library's Live display.
"""
if self.enable_ui and self.terminal_ui:
# Force an update of the UI
if hasattr(self.terminal_ui, '_update_display'):
self.terminal_ui._update_display()
def _format_time(self, seconds: float) -> str:
"""
Format time in hours:minutes:seconds.
Args:
seconds: Time in seconds
Returns:
Formatted time string (e.g., "1:23:45")
"""
delta = timedelta(seconds=int(seconds))
hours, remainder = divmod(delta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if hours > 0:
return f"{hours}:{minutes:02}:{seconds:02}"
else:
return f"{minutes}:{seconds:02}"
def _calculate_estimated_completion(self) -> str:
"""
Calculate estimated completion time based on current progress.
Returns:
Formatted time string
"""
summary = self.get_summary()
return summary.get("estimated_completion_time", "N/A")
# Example code for testing
if __name__ == "__main__":
# Initialize the monitor
monitor = CrawlerMonitor(urls_total=100)
# Start monitoring
monitor.start()
try:
# Simulate some tasks
for i in range(20):
task_id = str(uuid.uuid4())
url = f"https://example.com/page{i}"
monitor.add_task(task_id, url)
# Simulate 20% of tasks are already running
if i < 4:
monitor.update_task(
task_id=task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=time.time() - 30, # Started 30 seconds ago
memory_usage=10.5
)
# Simulate 10% of tasks are completed
if i >= 4 and i < 6:
start_time = time.time() - 60
end_time = time.time() - 15
monitor.update_task(
task_id=task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=start_time,
memory_usage=8.2
)
monitor.update_task(
task_id=task_id,
status=CrawlStatus.COMPLETED,
end_time=end_time,
memory_usage=0,
peak_memory=15.7
)
# Simulate 5% of tasks fail
if i >= 6 and i < 7:
start_time = time.time() - 45
end_time = time.time() - 20
monitor.update_task(
task_id=task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=start_time,
memory_usage=12.3
)
monitor.update_task(
task_id=task_id,
status=CrawlStatus.FAILED,
end_time=end_time,
memory_usage=0,
peak_memory=18.2,
error_message="Connection timeout"
)
# Simulate memory pressure
monitor.update_memory_status("PRESSURE")
# Simulate queue statistics
monitor.update_queue_statistics(
total_queued=16, # 20 - 4 (in progress)
highest_wait_time=120.5,
avg_wait_time=60.2
)
# Keep the monitor running for a demonstration
print("Crawler Monitor is running. Press 'q' to exit.")
while monitor.is_running:
time.sleep(0.1)
except KeyboardInterrupt:
print("\nExiting crawler monitor...")
finally:
# Stop the monitor
monitor.stop()
print("Crawler monitor exited successfully.")

View File

@@ -4,84 +4,43 @@ from dotenv import load_dotenv
load_dotenv() # Load environment variables from .env file
# Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy
DEFAULT_PROVIDER = "openai/gpt-4o"
DEFAULT_PROVIDER_API_KEY = "OPENAI_API_KEY"
DEFAULT_PROVIDER = "openai/gpt-4o-mini"
MODEL_REPO_BRANCH = "new-release-0.0.2"
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
PROVIDER_MODELS = {
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
"groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"),
"groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"),
"openai/gpt-4o-mini": os.getenv("OPENAI_API_KEY"),
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
"openai/o3-mini": os.getenv("OPENAI_API_KEY"),
"openai/o3-mini-high": os.getenv("OPENAI_API_KEY"),
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
"anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"),
"gemini/gemini-pro": os.getenv("GEMINI_API_KEY"),
'gemini/gemini-1.5-pro': os.getenv("GEMINI_API_KEY"),
'gemini/gemini-2.0-flash': os.getenv("GEMINI_API_KEY"),
'gemini/gemini-2.0-flash-exp': os.getenv("GEMINI_API_KEY"),
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
}
PROVIDER_MODELS_PREFIXES = {
"ollama": "no-token-needed", # Any model from Ollama no need for API token
"groq": os.getenv("GROQ_API_KEY"),
"openai": os.getenv("OPENAI_API_KEY"),
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
"gemini": os.getenv("GEMINI_API_KEY"),
"deepseek": os.getenv("DEEPSEEK_API_KEY"),
}
# Chunk token threshold
CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens
CHUNK_TOKEN_THRESHOLD = 2 ** 11 # 2048 tokens
OVERLAP_RATE = 0.1
WORD_TOKEN_RATE = 1.3
# Threshold for the minimum number of word in a HTML tag to be considered
# Threshold for the minimum number of word in a HTML tag to be considered
MIN_WORD_THRESHOLD = 1
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1
IMPORTANT_ATTRS = ["src", "href", "alt", "title", "width", "height"]
ONLY_TEXT_ELIGIBLE_TAGS = [
"b",
"i",
"u",
"span",
"del",
"ins",
"sub",
"sup",
"strong",
"em",
"code",
"kbd",
"var",
"s",
"q",
"abbr",
"cite",
"dfn",
"time",
"small",
"mark",
]
IMPORTANT_ATTRS = ['src', 'href', 'alt', 'title', 'width', 'height']
ONLY_TEXT_ELIGIBLE_TAGS = ['b', 'i', 'u', 'span', 'del', 'ins', 'sub', 'sup', 'strong', 'em', 'code', 'kbd', 'var', 's', 'q', 'abbr', 'cite', 'dfn', 'time', 'small', 'mark']
SOCIAL_MEDIA_DOMAINS = [
"facebook.com",
"twitter.com",
"x.com",
"linkedin.com",
"instagram.com",
"pinterest.com",
"tiktok.com",
"snapchat.com",
"reddit.com",
]
'facebook.com',
'twitter.com',
'x.com',
'linkedin.com',
'instagram.com',
'pinterest.com',
'tiktok.com',
'snapchat.com',
'reddit.com',
]
# Threshold for the Image extraction - Range is 1 to 6
# Images are scored based on point based system, to filter based on usefulness. Points are assigned
@@ -93,54 +52,4 @@ SOCIAL_MEDIA_DOMAINS = [
# If image is in the first half of the total images extracted from the page
IMAGE_SCORE_THRESHOLD = 2
MAX_METRICS_HISTORY = 1000
NEED_MIGRATION = True
URL_LOG_SHORTEN_LENGTH = 30
SHOW_DEPRECATION_WARNINGS = True
SCREENSHOT_HEIGHT_TRESHOLD = 10000
PAGE_TIMEOUT = 60000
DOWNLOAD_PAGE_TIMEOUT = 60000
# Global user settings with descriptions and default values
USER_SETTINGS = {
"DEFAULT_LLM_PROVIDER": {
"default": "openai/gpt-4o",
"description": "Default LLM provider in 'company/model' format (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')",
"type": "string"
},
"DEFAULT_LLM_PROVIDER_TOKEN": {
"default": "",
"description": "API token for the default LLM provider",
"type": "string",
"secret": True
},
"VERBOSE": {
"default": False,
"description": "Enable verbose output for all commands",
"type": "boolean"
},
"BROWSER_HEADLESS": {
"default": True,
"description": "Run browser in headless mode by default",
"type": "boolean"
},
"BROWSER_TYPE": {
"default": "chromium",
"description": "Default browser type (chromium or firefox)",
"type": "string",
"options": ["chromium", "firefox"]
},
"CACHE_MODE": {
"default": "bypass",
"description": "Default cache mode (bypass, use, or refresh)",
"type": "string",
"options": ["bypass", "use", "refresh"]
},
"USER_AGENT_MODE": {
"default": "default",
"description": "Default user agent mode (default, random, or mobile)",
"type": "string",
"options": ["default", "random", "mobile"]
}
}
MAX_METRICS_HISTORY = 1000

View File

@@ -0,0 +1,198 @@
from bs4 import BeautifulSoup, Tag
import re
from typing import Optional
class ContentCleaningStrategy:
def __init__(self):
# Precompile regex patterns for performance
self.negative_patterns = re.compile(r'nav|footer|header|sidebar|ads|comment', re.I)
self.positive_patterns = re.compile(r'content|article|main|post', re.I)
self.priority_tags = {'article', 'main', 'section', 'div'}
self.non_content_tags = {'nav', 'footer', 'header', 'aside'}
# Thresholds
self.text_density_threshold = 9.0
self.min_word_count = 50
self.link_density_threshold = 0.2
self.max_dom_depth = 10 # To prevent excessive DOM traversal
def clean(self, clean_html: str, soup = None) -> str:
"""
Main function that takes cleaned HTML and returns super cleaned HTML.
Args:
clean_html (str): The cleaned HTML content.
Returns:
str: The super cleaned HTML containing only the main content.
"""
try:
if not clean_html or not isinstance(clean_html, str):
return ''
if not soup:
# soup = BeautifulSoup(clean_html, 'html.parser')
soup = BeautifulSoup(clean_html, 'lxml')
main_content = self.extract_main_content(soup)
if main_content:
super_clean_element = self.clean_element(main_content)
return super_clean_element.encode_contents().decode('utf-8')
else:
return ''
except Exception:
# Handle exceptions silently or log them as needed
return ''
def extract_main_content(self, soup) -> Optional[Tag]:
"""
Identifies and extracts the main content element from the HTML.
Args:
soup (BeautifulSoup): The parsed HTML soup.
Returns:
Optional[Tag]: The Tag object containing the main content, or None if not found.
"""
candidates = []
for element in soup.find_all(self.priority_tags):
if self.is_non_content_tag(element):
continue
if self.has_negative_class_id(element):
continue
score = self.calculate_content_score(element)
candidates.append((score, element))
if not candidates:
return None
# Sort candidates by score in descending order
candidates.sort(key=lambda x: x[0], reverse=True)
# Select the element with the highest score
best_element = candidates[0][1]
return best_element
def calculate_content_score(self, element: Tag) -> float:
"""
Calculates a score for an element based on various heuristics.
Args:
element (Tag): The HTML element to score.
Returns:
float: The content score of the element.
"""
score = 0.0
if self.is_priority_tag(element):
score += 5.0
if self.has_positive_class_id(element):
score += 3.0
if self.has_negative_class_id(element):
score -= 3.0
if self.is_high_text_density(element):
score += 2.0
if self.is_low_link_density(element):
score += 2.0
if self.has_sufficient_content(element):
score += 2.0
if self.has_headings(element):
score += 3.0
dom_depth = self.calculate_dom_depth(element)
score += min(dom_depth, self.max_dom_depth) * 0.5 # Adjust weight as needed
return score
def is_priority_tag(self, element: Tag) -> bool:
"""Checks if the element is a priority tag."""
return element.name in self.priority_tags
def is_non_content_tag(self, element: Tag) -> bool:
"""Checks if the element is a non-content tag."""
return element.name in self.non_content_tags
def has_negative_class_id(self, element: Tag) -> bool:
"""Checks if the element has negative indicators in its class or id."""
class_id = ' '.join(filter(None, [
self.get_attr_str(element.get('class')),
element.get('id', '')
]))
return bool(self.negative_patterns.search(class_id))
def has_positive_class_id(self, element: Tag) -> bool:
"""Checks if the element has positive indicators in its class or id."""
class_id = ' '.join(filter(None, [
self.get_attr_str(element.get('class')),
element.get('id', '')
]))
return bool(self.positive_patterns.search(class_id))
@staticmethod
def get_attr_str(attr) -> str:
"""Converts an attribute value to a string."""
if isinstance(attr, list):
return ' '.join(attr)
elif isinstance(attr, str):
return attr
else:
return ''
def is_high_text_density(self, element: Tag) -> bool:
"""Determines if the element has high text density."""
text_density = self.calculate_text_density(element)
return text_density > self.text_density_threshold
def calculate_text_density(self, element: Tag) -> float:
"""Calculates the text density of an element."""
text_length = len(element.get_text(strip=True))
tag_count = len(element.find_all())
tag_count = tag_count or 1 # Prevent division by zero
return text_length / tag_count
def is_low_link_density(self, element: Tag) -> bool:
"""Determines if the element has low link density."""
link_density = self.calculate_link_density(element)
return link_density < self.link_density_threshold
def calculate_link_density(self, element: Tag) -> float:
"""Calculates the link density of an element."""
text = element.get_text(strip=True)
if not text:
return 0.0
link_text = ' '.join(a.get_text(strip=True) for a in element.find_all('a'))
return len(link_text) / len(text) if text else 0.0
def has_sufficient_content(self, element: Tag) -> bool:
"""Checks if the element has sufficient word count."""
word_count = len(element.get_text(strip=True).split())
return word_count >= self.min_word_count
def calculate_dom_depth(self, element: Tag) -> int:
"""Calculates the depth of an element in the DOM tree."""
depth = 0
current_element = element
while current_element.parent and depth < self.max_dom_depth:
depth += 1
current_element = current_element.parent
return depth
def has_headings(self, element: Tag) -> bool:
"""Checks if the element contains heading tags."""
return bool(element.find(['h1', 'h2', 'h3']))
def clean_element(self, element: Tag) -> Tag:
"""
Cleans the selected element by removing unnecessary attributes and nested non-content elements.
Args:
element (Tag): The HTML element to clean.
Returns:
Tag: The cleaned HTML element.
"""
for tag in element.find_all(['script', 'style', 'aside']):
tag.decompose()
for tag in element.find_all():
attrs = dict(tag.attrs)
for attr in attrs:
if attr in ['style', 'onclick', 'onmouseover', 'align', 'bgcolor']:
del tag.attrs[attr]
return element

File diff suppressed because it is too large Load Diff

View File

@@ -1,900 +0,0 @@
import re
from itertools import chain
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
from bs4 import BeautifulSoup
import asyncio
import requests
from .config import (
MIN_WORD_THRESHOLD,
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
IMAGE_SCORE_THRESHOLD,
ONLY_TEXT_ELIGIBLE_TAGS,
IMPORTANT_ATTRS,
SOCIAL_MEDIA_DOMAINS,
)
from bs4 import NavigableString, Comment
from bs4 import PageElement, Tag
from urllib.parse import urljoin
from requests.exceptions import InvalidSchema
from .utils import (
extract_metadata,
normalize_url,
is_external_url,
get_base_domain,
extract_metadata_using_lxml,
extract_page_context,
calculate_link_intrinsic_score,
)
from lxml import etree
from lxml import html as lhtml
from typing import List
from .models import ScrapingResult, MediaItem, Link, Media, Links
import copy
# Pre-compile regular expressions for Open Graph and Twitter metadata
OG_REGEX = re.compile(r"^og:")
TWITTER_REGEX = re.compile(r"^twitter:")
DIMENSION_REGEX = re.compile(r"(\d+)(\D*)")
# Function to parse srcset
def parse_srcset(s: str) -> List[Dict]:
if not s:
return []
variants = []
for part in s.split(","):
part = part.strip()
if not part:
continue
parts = part.split()
if len(parts) >= 1:
url = parts[0]
width = (
parts[1].rstrip("w").split('.')[0]
if len(parts) > 1 and parts[1].endswith("w")
else None
)
variants.append({"url": url, "width": width})
return variants
# Function to parse image height/width value and units
def parse_dimension(dimension):
if dimension:
# match = re.match(r"(\d+)(\D*)", dimension)
match = DIMENSION_REGEX.match(dimension)
if match:
number = int(match.group(1))
unit = match.group(2) or "px" # Default unit is 'px' if not specified
return number, unit
return None, None
# Fetch image file metadata to extract size and extension
def fetch_image_file_size(img, base_url):
# If src is relative path construct full URL, if not it may be CDN URL
img_url = urljoin(base_url, img.get("src"))
try:
response = requests.head(img_url)
if response.status_code == 200:
return response.headers.get("Content-Length", None)
else:
print(f"Failed to retrieve file size for {img_url}")
return None
except InvalidSchema:
return None
finally:
return
class ContentScrapingStrategy(ABC):
@abstractmethod
def scrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
pass
@abstractmethod
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
pass
class LXMLWebScrapingStrategy(ContentScrapingStrategy):
"""
LXML-based implementation for fast web content scraping.
This is the primary scraping strategy in Crawl4AI, providing high-performance
HTML parsing and content extraction using the lxml library.
Note: WebScrapingStrategy is now an alias for this class to maintain
backward compatibility.
"""
def __init__(self, logger=None):
self.logger = logger
self.DIMENSION_REGEX = re.compile(r"(\d+)(\D*)")
self.BASE64_PATTERN = re.compile(r'data:image/[^;]+;base64,([^"]+)')
def _log(self, level, message, tag="SCRAPE", **kwargs):
"""Helper method to safely use logger."""
if self.logger:
log_method = getattr(self.logger, level)
log_method(message=message, tag=tag, **kwargs)
def scrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
"""
Main entry point for content scraping.
Args:
url (str): The URL of the page to scrape.
html (str): The HTML content of the page.
**kwargs: Additional keyword arguments.
Returns:
ScrapingResult: A structured result containing the scraped content.
"""
actual_url = kwargs.get("redirected_url", url)
raw_result = self._scrap(actual_url, html, **kwargs)
if raw_result is None:
return ScrapingResult(
cleaned_html="",
success=False,
media=Media(),
links=Links(),
metadata={},
)
# Convert media items
media = Media(
images=[
MediaItem(**img)
for img in raw_result.get("media", {}).get("images", [])
if img
],
videos=[
MediaItem(**vid)
for vid in raw_result.get("media", {}).get("videos", [])
if vid
],
audios=[
MediaItem(**aud)
for aud in raw_result.get("media", {}).get("audios", [])
if aud
],
tables=raw_result.get("media", {}).get("tables", [])
)
# Convert links
links = Links(
internal=[
Link(**link)
for link in raw_result.get("links", {}).get("internal", [])
if link
],
external=[
Link(**link)
for link in raw_result.get("links", {}).get("external", [])
if link
],
)
return ScrapingResult(
cleaned_html=raw_result.get("cleaned_html", ""),
success=raw_result.get("success", False),
media=media,
links=links,
metadata=raw_result.get("metadata", {}),
)
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
"""
Main entry point for asynchronous content scraping.
Args:
url (str): The URL of the page to scrape.
html (str): The HTML content of the page.
**kwargs: Additional keyword arguments.
Returns:
ScrapingResult: A structured result containing the scraped content.
"""
return await asyncio.to_thread(self.scrap, url, html, **kwargs)
def process_element(self, url, element: lhtml.HtmlElement, **kwargs) -> Dict[str, Any]:
"""
Process an HTML element.
How it works:
1. Check if the element is an image, video, or audio.
2. Extract the element's attributes and content.
3. Process the element based on its type.
4. Return the processed element information.
Args:
url (str): The URL of the page containing the element.
element (lhtml.HtmlElement): The HTML element to process.
**kwargs: Additional keyword arguments.
Returns:
dict: A dictionary containing the processed element information.
"""
media = {"images": [], "videos": [], "audios": [], "tables": []}
internal_links_dict = {}
external_links_dict = {}
self._process_element(
url, element, media, internal_links_dict, external_links_dict, **kwargs
)
return {
"media": media,
"internal_links_dict": internal_links_dict,
"external_links_dict": external_links_dict,
}
def _process_element(
self,
url: str,
element: lhtml.HtmlElement,
media: Dict[str, List],
internal_links_dict: Dict[str, Any],
external_links_dict: Dict[str, Any],
page_context: dict = None,
**kwargs,
) -> bool:
base_domain = kwargs.get("base_domain", get_base_domain(url))
exclude_domains = set(kwargs.get("exclude_domains", []))
# Process links
try:
base_element = element.xpath("//head/base[@href]")
if base_element:
base_href = base_element[0].get("href", "").strip()
if base_href:
url = base_href
except Exception as e:
self._log("error", f"Error extracting base URL: {str(e)}", "SCRAPE")
pass
for link in element.xpath(".//a[@href]"):
href = link.get("href", "").strip()
if not href:
continue
try:
normalized_href = normalize_url(
href, url,
preserve_https=kwargs.get('preserve_https_for_internal_links', False),
original_scheme=kwargs.get('original_scheme')
)
link_data = {
"href": normalized_href,
"text": link.text_content().strip(),
"title": link.get("title", "").strip(),
"base_domain": base_domain,
}
# Add intrinsic scoring if enabled
if kwargs.get("score_links", False) and page_context is not None:
try:
intrinsic_score = calculate_link_intrinsic_score(
link_text=link_data["text"],
url=normalized_href,
title_attr=link_data["title"],
class_attr=link.get("class", ""),
rel_attr=link.get("rel", ""),
page_context=page_context
)
link_data["intrinsic_score"] = intrinsic_score
except Exception:
# Fail gracefully - assign default score
link_data["intrinsic_score"] = 0
else:
# No scoring enabled - assign infinity (all links equal priority)
link_data["intrinsic_score"] = 0
is_external = is_external_url(normalized_href, base_domain)
if is_external:
link_base_domain = get_base_domain(normalized_href)
link_data["base_domain"] = link_base_domain
if (
kwargs.get("exclude_external_links", False)
or link_base_domain in exclude_domains
):
link.getparent().remove(link)
continue
if normalized_href not in external_links_dict:
external_links_dict[normalized_href] = link_data
else:
if normalized_href not in internal_links_dict:
internal_links_dict[normalized_href] = link_data
except Exception as e:
self._log("error", f"Error processing link: {str(e)}", "SCRAPE")
continue
# Process images
images = element.xpath(".//img")
total_images = len(images)
for idx, img in enumerate(images):
src = img.get("src") or ""
img_domain = get_base_domain(src)
# Decide if we need to exclude this image
# 1) If its domain is in exclude_domains, remove.
# 2) Or if exclude_external_images=True and it's an external domain, remove.
if (img_domain in exclude_domains) or (
kwargs.get("exclude_external_images", False)
and is_external_url(src, base_domain)
):
parent = img.getparent()
if parent is not None:
parent.remove(img)
continue
# Otherwise, process the image as usual.
try:
processed_images = self.process_image(
img, url, idx, total_images, **kwargs
)
if processed_images:
media["images"].extend(processed_images)
except Exception as e:
self._log("error", f"Error processing image: {str(e)}", "SCRAPE")
# Process videos and audios
for media_type in ["video", "audio"]:
for elem in element.xpath(f".//{media_type}"):
media_info = {
"src": elem.get("src"),
"alt": elem.get("alt"),
"type": media_type,
"description": self.find_closest_parent_with_useful_text(
elem, **kwargs
),
}
media[f"{media_type}s"].append(media_info)
# Process source tags within media elements
for source in elem.xpath(".//source"):
if src := source.get("src"):
media[f"{media_type}s"].append({**media_info, "src": src})
# Clean up unwanted elements
if kwargs.get("remove_forms", False):
for form in element.xpath(".//form"):
form.getparent().remove(form)
if excluded_tags := kwargs.get("excluded_tags", []):
for tag in excluded_tags:
for elem in element.xpath(f".//{tag}"):
elem.getparent().remove(elem)
if excluded_selector := kwargs.get("excluded_selector", ""):
try:
for elem in element.cssselect(excluded_selector):
elem.getparent().remove(elem)
except Exception:
pass # Invalid selector
return True
def find_closest_parent_with_useful_text(
self, element: lhtml.HtmlElement, **kwargs
) -> Optional[str]:
image_description_min_word_threshold = kwargs.get(
"image_description_min_word_threshold", IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD
)
current = element
while current is not None:
if (
current.text
and len(current.text_content().split())
>= image_description_min_word_threshold
):
return current.text_content().strip()
current = current.getparent()
return None
def flatten_nested_elements(self, element: lhtml.HtmlElement) -> lhtml.HtmlElement:
"""Flatten nested elements of the same type in LXML tree"""
if len(element) == 1 and element.tag == element[0].tag:
return self.flatten_nested_elements(element[0])
for child in element:
child_idx = element.index(child)
flattened_child = self.flatten_nested_elements(child)
if flattened_child is not child: # Only replace if actually flattened
element[child_idx] = flattened_child
return element
def process_image(
self, img: lhtml.HtmlElement, url: str, index: int, total_images: int, **kwargs
) -> Optional[List[Dict]]:
# Quick validation checks
style = img.get("style", "")
alt = img.get("alt", "")
src = img.get("src", "")
data_src = img.get("data-src", "")
srcset = img.get("srcset", "")
data_srcset = img.get("data-srcset", "")
if "display:none" in style:
return None
parent = img.getparent()
if parent.tag in ["button", "input"]:
return None
parent_classes = parent.get("class", "").split()
if any(
"button" in cls or "icon" in cls or "logo" in cls for cls in parent_classes
):
return None
# If src is in class or alt, likely an icon
if (src and any(c in src for c in ["button", "icon", "logo"])) or (
alt and any(c in alt for c in ["button", "icon", "logo"])
):
return None
# Score calculation
score = 0
if (width := img.get("width")) and width.isdigit():
score += 1 if int(width) > 150 else 0
if (height := img.get("height")) and height.isdigit():
score += 1 if int(height) > 150 else 0
if alt:
score += 1
score += index / total_images < 0.5
# Check formats in all possible sources
image_formats = {"jpg", "jpeg", "png", "webp", "avif", "gif"}
detected_format = None
for url in [src, data_src, srcset, data_srcset]:
if url:
format_matches = [fmt for fmt in image_formats if fmt in url.lower()]
if format_matches:
detected_format = format_matches[0]
score += 1
break
if srcset or data_srcset:
score += 1
if picture := img.xpath("./ancestor::picture[1]"):
score += 1
if score <= kwargs.get("image_score_threshold", IMAGE_SCORE_THRESHOLD):
return None
# Process image variants
unique_urls = set()
image_variants = []
base_info = {
"alt": alt,
"desc": self.find_closest_parent_with_useful_text(img, **kwargs),
"score": score,
"type": "image",
"group_id": index,
"format": detected_format,
}
def add_variant(src: str, width: Optional[str] = None):
if src and not src.startswith("data:") and src not in unique_urls:
unique_urls.add(src)
variant = {**base_info, "src": src}
if width:
variant["width"] = width
image_variants.append(variant)
# Add variants from different sources
add_variant(src)
add_variant(data_src)
for srcset_attr in [srcset, data_srcset]:
if srcset_attr:
for source in parse_srcset(srcset_attr):
add_variant(source["url"], source["width"])
# Handle picture element
if picture:
for source in picture[0].xpath(".//source[@srcset]"):
if source_srcset := source.get("srcset"):
for src_data in parse_srcset(source_srcset):
add_variant(src_data["url"], src_data["width"])
# Check framework-specific attributes
for attr, value in img.attrib.items():
if (
attr.startswith("data-")
and ("src" in attr or "srcset" in attr)
and "http" in value
):
add_variant(value)
return image_variants if image_variants else None
def remove_empty_elements_fast(self, root, word_count_threshold=5):
"""
Remove elements that fall below the desired word threshold in a single pass from the bottom up.
Skips non-element nodes like HtmlComment and bypasses certain tags that are allowed to have no content.
"""
bypass_tags = {
"a",
"img",
"br",
"hr",
"input",
"meta",
"link",
"source",
"track",
"wbr",
"tr",
"td",
"th",
}
for el in reversed(list(root.iterdescendants())):
if not isinstance(el, lhtml.HtmlElement):
continue
if el.tag in bypass_tags:
continue
text_content = (el.text_content() or "").strip()
if (
len(text_content.split()) < word_count_threshold
and not el.getchildren()
):
parent = el.getparent()
if parent is not None:
parent.remove(el)
return root
def remove_unwanted_attributes_fast(
self, root: lhtml.HtmlElement, important_attrs=None, keep_data_attributes=False
) -> lhtml.HtmlElement:
"""
Removes all attributes from each element (including root) except those in `important_attrs`.
If `keep_data_attributes=True`, also retain any attribute starting with 'data-'.
Returns the same root element, mutated in-place, for fluent usage.
"""
if important_attrs is None:
important_attrs = set(IMPORTANT_ATTRS)
# If you want to handle the root as well, use 'include_self=True'
# so you don't miss attributes on the top-level element.
# Manually include the root, then all its descendants
for el in chain((root,), root.iterdescendants()):
# We only remove attributes on HtmlElement nodes, skip comments or text nodes
if not isinstance(el, lhtml.HtmlElement):
continue
old_attribs = dict(el.attrib)
new_attribs = {}
for attr_name, attr_val in old_attribs.items():
# If it's an important attribute, keep it
if attr_name in important_attrs:
new_attribs[attr_name] = attr_val
# Or if keep_data_attributes is True and it's a 'data-*' attribute
elif keep_data_attributes and attr_name.startswith("data-"):
new_attribs[attr_name] = attr_val
# Clear old attributes and set the filtered set
el.attrib.clear()
el.attrib.update(new_attribs)
return root
def _scrap(
self,
url: str,
html: str,
word_count_threshold: int = MIN_WORD_THRESHOLD,
css_selector: str = None,
target_elements: List[str] = None,
**kwargs,
) -> Dict[str, Any]:
if not html:
return None
success = True
try:
doc = lhtml.document_fromstring(html)
# Match BeautifulSoup's behavior of using body or full doc
# body = doc.xpath('//body')[0] if doc.xpath('//body') else doc
body = doc
base_domain = get_base_domain(url)
# Extract page context for link scoring (if enabled) - do this BEFORE any removals
page_context = None
if kwargs.get("score_links", False):
try:
# Extract title
title_elements = doc.xpath('//title')
page_title = title_elements[0].text_content() if title_elements else ""
# Extract headlines
headlines = []
for tag in ['h1', 'h2', 'h3']:
elements = doc.xpath(f'//{tag}')
for el in elements:
text = el.text_content().strip()
if text:
headlines.append(text)
headlines_text = ' '.join(headlines)
# Extract meta description
meta_desc_elements = doc.xpath('//meta[@name="description"]/@content')
meta_description = meta_desc_elements[0] if meta_desc_elements else ""
# Create page context
page_context = extract_page_context(page_title, headlines_text, meta_description, url)
except Exception:
page_context = {} # Fail gracefully
# Early removal of all images if exclude_all_images is set
# This is more efficient in lxml as we remove elements before any processing
if kwargs.get("exclude_all_images", False):
for img in body.xpath('//img'):
if img.getparent() is not None:
img.getparent().remove(img)
# Add comment removal
if kwargs.get("remove_comments", False):
comments = body.xpath("//comment()")
for comment in comments:
comment.getparent().remove(comment)
# Handle tag-based removal first
excluded_tags = set(kwargs.get("excluded_tags", []) or [])
if excluded_tags:
for tag in excluded_tags:
for element in body.xpath(f".//{tag}"):
if element.getparent() is not None:
element.getparent().remove(element)
# Handle CSS selector-based exclusion
excluded_selector = kwargs.get("excluded_selector", "")
if excluded_selector:
try:
for element in body.cssselect(excluded_selector):
if element.getparent() is not None:
element.getparent().remove(element)
except Exception as e:
self._log(
"error", f"Error with excluded CSS selector: {str(e)}", "SCRAPE"
)
# Extract metadata before any content filtering
try:
meta = extract_metadata_using_lxml(
"", doc
) # Using same function as BeautifulSoup version
except Exception as e:
self._log("error", f"Error extracting metadata: {str(e)}", "SCRAPE")
meta = {}
content_element = None
if target_elements:
try:
for_content_targeted_element = []
for target_element in target_elements:
for_content_targeted_element.extend(body.cssselect(target_element))
content_element = lhtml.Element("div")
content_element.extend(copy.deepcopy(for_content_targeted_element))
except Exception as e:
self._log("error", f"Error with target element detection: {str(e)}", "SCRAPE")
return None
else:
content_element = body
# Remove script and style tags
for tag in ["script", "style", "link", "meta", "noscript"]:
for element in body.xpath(f".//{tag}"):
if element.getparent() is not None:
element.getparent().remove(element)
# Handle social media and domain exclusions
kwargs["exclude_domains"] = set(kwargs.get("exclude_domains", []))
if kwargs.get("exclude_social_media_links", False):
kwargs["exclude_social_media_domains"] = set(
kwargs.get("exclude_social_media_domains", [])
+ SOCIAL_MEDIA_DOMAINS
)
kwargs["exclude_domains"].update(kwargs["exclude_social_media_domains"])
# Process forms if needed
if kwargs.get("remove_forms", False):
for form in body.xpath(".//form"):
if form.getparent() is not None:
form.getparent().remove(form)
# Process content
media = {"images": [], "videos": [], "audios": [], "tables": []}
internal_links_dict = {}
external_links_dict = {}
self._process_element(
url,
body,
media,
internal_links_dict,
external_links_dict,
page_context=page_context,
base_domain=base_domain,
**kwargs,
)
# Extract tables using the table extraction strategy if provided
if 'table' not in excluded_tags:
table_extraction = kwargs.get('table_extraction')
if table_extraction:
# Pass logger to the strategy if it doesn't have one
if not table_extraction.logger:
table_extraction.logger = self.logger
# Extract tables using the strategy
extracted_tables = table_extraction.extract_tables(body, **kwargs)
media["tables"].extend(extracted_tables)
# Handle only_text option
if kwargs.get("only_text", False):
for tag in ONLY_TEXT_ELIGIBLE_TAGS:
for element in body.xpath(f".//{tag}"):
if element.text:
new_text = lhtml.Element("span")
new_text.text = element.text_content()
if element.getparent() is not None:
element.getparent().replace(element, new_text)
# Clean base64 images
for img in body.xpath(".//img[@src]"):
src = img.get("src", "")
if self.BASE64_PATTERN.match(src):
img.set("src", self.BASE64_PATTERN.sub("", src))
# Remove empty elements
self.remove_empty_elements_fast(body, 1)
# Remove unneeded attributes
self.remove_unwanted_attributes_fast(
body, keep_data_attributes=kwargs.get("keep_data_attributes", False)
)
# Generate output HTML
cleaned_html = lhtml.tostring(
# body,
content_element,
encoding="unicode",
pretty_print=True,
method="html",
with_tail=False,
).strip()
# Create links dictionary in the format expected by LinkPreview
links = {
"internal": list(internal_links_dict.values()),
"external": list(external_links_dict.values()),
}
# Extract head content for links if configured
link_preview_config = kwargs.get("link_preview_config")
if link_preview_config is not None:
try:
import asyncio
from .link_preview import LinkPreview
from .models import Links, Link
verbose = link_preview_config.verbose
if verbose:
self._log("info", "Starting link head extraction for {internal} internal and {external} external links",
params={"internal": len(links["internal"]), "external": len(links["external"])}, tag="LINK_EXTRACT")
# Convert dict links to Link objects
internal_links = [Link(**link_data) for link_data in links["internal"]]
external_links = [Link(**link_data) for link_data in links["external"]]
links_obj = Links(internal=internal_links, external=external_links)
# Create a config object for LinkPreview
class TempCrawlerRunConfig:
def __init__(self, link_config, score_links):
self.link_preview_config = link_config
self.score_links = score_links
config = TempCrawlerRunConfig(link_preview_config, kwargs.get("score_links", False))
# Extract head content (run async operation in sync context)
async def extract_links():
async with LinkPreview(self.logger) as extractor:
return await extractor.extract_link_heads(links_obj, config)
# Run the async operation
try:
# Check if we're already in an async context
loop = asyncio.get_running_loop()
# If we're in an async context, we need to run in a thread
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(asyncio.run, extract_links())
updated_links = future.result()
except RuntimeError:
# No running loop, we can use asyncio.run directly
updated_links = asyncio.run(extract_links())
# Convert back to dict format
links["internal"] = [link.dict() for link in updated_links.internal]
links["external"] = [link.dict() for link in updated_links.external]
if verbose:
successful_internal = len([l for l in updated_links.internal if l.head_extraction_status == "valid"])
successful_external = len([l for l in updated_links.external if l.head_extraction_status == "valid"])
self._log("info", "Link head extraction completed: {internal_success}/{internal_total} internal, {external_success}/{external_total} external",
params={
"internal_success": successful_internal,
"internal_total": len(updated_links.internal),
"external_success": successful_external,
"external_total": len(updated_links.external)
}, tag="LINK_EXTRACT")
else:
self._log("info", "Link head extraction completed successfully", tag="LINK_EXTRACT")
except Exception as e:
self._log("error", f"Error during link head extraction: {str(e)}", tag="LINK_EXTRACT")
# Continue with original links if head extraction fails
return {
"cleaned_html": cleaned_html,
"success": success,
"media": media,
"links": links,
"metadata": meta,
}
except Exception as e:
self._log("error", f"Error processing HTML: {str(e)}", "SCRAPE")
# Create error message in case of failure
error_body = lhtml.Element("div")
# Use etree.SubElement rather than lhtml.SubElement
error_div = etree.SubElement(error_body, "div", id="crawl4ai_error_message")
error_div.text = f"""
Crawl4AI Error: This page is not fully supported.
Error Message: {str(e)}
Possible reasons:
1. The page may have restrictions that prevent crawling.
2. The page might not be fully loaded.
Suggestions:
- Try calling the crawl function with these parameters:
magic=True,
- Set headless=False to visualize what's happening on the page.
If the issue persists, please check the page's structure and any potential anti-crawling measures.
"""
cleaned_html = lhtml.tostring(
error_body, encoding="unicode", pretty_print=True
)
return {
"cleaned_html": cleaned_html,
"success": False,
"media": {
"images": [],
"videos": [],
"audios": [],
"tables": []
},
"links": {"internal": [], "external": []},
"metadata": {},
}
# Backward compatibility alias
WebScrapingStrategy = LXMLWebScrapingStrategy

View File

@@ -0,0 +1,549 @@
import re # Point 1: Pre-Compile Regular Expressions
from abc import ABC, abstractmethod
from typing import Dict, Any
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import asyncio, requests, re, os
from .config import *
from bs4 import element, NavigableString, Comment
from urllib.parse import urljoin
from requests.exceptions import InvalidSchema
from .content_cleaning_strategy import ContentCleaningStrategy
from .utils import (
sanitize_input_encode,
sanitize_html,
extract_metadata,
InvalidCSSSelectorError,
# CustomHTML2Text,
normalize_url,
is_external_url
)
from .html2text import HTML2Text
class CustomHTML2Text(HTML2Text):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.inside_pre = False
self.inside_code = False
self.preserve_tags = set() # Set of tags to preserve
self.current_preserved_tag = None
self.preserved_content = []
self.preserve_depth = 0
# Configuration options
self.skip_internal_links = False
self.single_line_break = False
self.mark_code = False
self.include_sup_sub = False
self.body_width = 0
self.ignore_mailto_links = True
self.ignore_links = False
self.escape_backslash = False
self.escape_dot = False
self.escape_plus = False
self.escape_dash = False
self.escape_snob = False
def update_params(self, **kwargs):
"""Update parameters and set preserved tags."""
for key, value in kwargs.items():
if key == 'preserve_tags':
self.preserve_tags = set(value)
else:
setattr(self, key, value)
def handle_tag(self, tag, attrs, start):
# Handle preserved tags
if tag in self.preserve_tags:
if start:
if self.preserve_depth == 0:
self.current_preserved_tag = tag
self.preserved_content = []
# Format opening tag with attributes
attr_str = ''.join(f' {k}="{v}"' for k, v in attrs.items() if v is not None)
self.preserved_content.append(f'<{tag}{attr_str}>')
self.preserve_depth += 1
return
else:
self.preserve_depth -= 1
if self.preserve_depth == 0:
self.preserved_content.append(f'</{tag}>')
# Output the preserved HTML block with proper spacing
preserved_html = ''.join(self.preserved_content)
self.o('\n' + preserved_html + '\n')
self.current_preserved_tag = None
return
# If we're inside a preserved tag, collect all content
if self.preserve_depth > 0:
if start:
# Format nested tags with attributes
attr_str = ''.join(f' {k}="{v}"' for k, v in attrs.items() if v is not None)
self.preserved_content.append(f'<{tag}{attr_str}>')
else:
self.preserved_content.append(f'</{tag}>')
return
# Handle pre tags
if tag == 'pre':
if start:
self.o('```\n')
self.inside_pre = True
else:
self.o('\n```')
self.inside_pre = False
# elif tag in ["h1", "h2", "h3", "h4", "h5", "h6"]:
# pass
else:
super().handle_tag(tag, attrs, start)
def handle_data(self, data, entity_char=False):
"""Override handle_data to capture content within preserved tags."""
if self.preserve_depth > 0:
self.preserved_content.append(data)
return
super().handle_data(data, entity_char)
# Pre-compile regular expressions for Open Graph and Twitter metadata
OG_REGEX = re.compile(r'^og:')
TWITTER_REGEX = re.compile(r'^twitter:')
DIMENSION_REGEX = re.compile(r"(\d+)(\D*)")
# Function to parse image height/width value and units
def parse_dimension(dimension):
if dimension:
# match = re.match(r"(\d+)(\D*)", dimension)
match = DIMENSION_REGEX.match(dimension)
if match:
number = int(match.group(1))
unit = match.group(2) or 'px' # Default unit is 'px' if not specified
return number, unit
return None, None
# Fetch image file metadata to extract size and extension
def fetch_image_file_size(img, base_url):
#If src is relative path construct full URL, if not it may be CDN URL
img_url = urljoin(base_url,img.get('src'))
try:
response = requests.head(img_url)
if response.status_code == 200:
return response.headers.get('Content-Length',None)
else:
print(f"Failed to retrieve file size for {img_url}")
return None
except InvalidSchema as e:
return None
finally:
return
class ContentScrapingStrategy(ABC):
@abstractmethod
def scrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
pass
@abstractmethod
async def ascrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
pass
class WebScrapingStrategy(ContentScrapingStrategy):
def scrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
return self._get_content_of_website_optimized(url, html, is_async=False, **kwargs)
async def ascrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
return await asyncio.to_thread(self._get_content_of_website_optimized, url, html, **kwargs)
def _get_content_of_website_optimized(self, url: str, html: str, word_count_threshold: int = MIN_WORD_THRESHOLD, css_selector: str = None, **kwargs) -> Dict[str, Any]:
success = True
if not html:
return None
# soup = BeautifulSoup(html, 'html.parser')
soup = BeautifulSoup(html, 'lxml')
body = soup.body
try:
meta = extract_metadata("", soup)
except Exception as e:
print('Error extracting metadata:', str(e))
meta = {}
image_description_min_word_threshold = kwargs.get('image_description_min_word_threshold', IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD)
for tag in kwargs.get('excluded_tags', []) or []:
for el in body.select(tag):
el.decompose()
if css_selector:
selected_elements = body.select(css_selector)
if not selected_elements:
return {
'markdown': '',
'cleaned_html': '',
'success': True,
'media': {'images': [], 'videos': [], 'audios': []},
'links': {'internal': [], 'external': []},
'metadata': {},
'message': f"No elements found for CSS selector: {css_selector}"
}
# raise InvalidCSSSelectorError(f"Invalid CSS selector, No elements found for CSS selector: {css_selector}")
body = soup.new_tag('div')
for el in selected_elements:
body.append(el)
links = {'internal': [], 'external': []}
media = {'images': [], 'videos': [], 'audios': []}
internal_links_dict = {}
external_links_dict = {}
# Extract meaningful text for media files from closest parent
def find_closest_parent_with_useful_text(tag):
current_tag = tag
while current_tag:
current_tag = current_tag.parent
# Get the text content of the parent tag
if current_tag:
text_content = current_tag.get_text(separator=' ',strip=True)
# Check if the text content has at least word_count_threshold
if len(text_content.split()) >= image_description_min_word_threshold:
return text_content
return None
def process_image(img, url, index, total_images):
#Check if an image has valid display and inside undesired html elements
def is_valid_image(img, parent, parent_classes):
style = img.get('style', '')
src = img.get('src', '')
classes_to_check = ['button', 'icon', 'logo']
tags_to_check = ['button', 'input']
return all([
'display:none' not in style,
src,
not any(s in var for var in [src, img.get('alt', ''), *parent_classes] for s in classes_to_check),
parent.name not in tags_to_check
])
#Score an image for it's usefulness
def score_image_for_usefulness(img, base_url, index, images_count):
image_height = img.get('height')
height_value, height_unit = parse_dimension(image_height)
image_width = img.get('width')
width_value, width_unit = parse_dimension(image_width)
image_size = 0 #int(fetch_image_file_size(img,base_url) or 0)
image_src = img.get('src','')
if "data:image/" in image_src:
image_format = image_src.split(',')[0].split(';')[0].split('/')[1]
else:
image_format = os.path.splitext(img.get('src',''))[1].lower()
# Remove . from format
image_format = image_format.strip('.').split('?')[0]
score = 0
if height_value:
if height_unit == 'px' and height_value > 150:
score += 1
if height_unit in ['%','vh','vmin','vmax'] and height_value >30:
score += 1
if width_value:
if width_unit == 'px' and width_value > 150:
score += 1
if width_unit in ['%','vh','vmin','vmax'] and width_value >30:
score += 1
if image_size > 10000:
score += 1
if img.get('alt') != '':
score+=1
if any(image_format==format for format in ['jpg','png','webp']):
score+=1
if index/images_count<0.5:
score+=1
return score
if not is_valid_image(img, img.parent, img.parent.get('class', [])):
return None
score = score_image_for_usefulness(img, url, index, total_images)
if score <= IMAGE_SCORE_THRESHOLD:
return None
return {
'src': img.get('src', ''),
'data-src': img.get('data-src', ''),
'alt': img.get('alt', ''),
'desc': find_closest_parent_with_useful_text(img),
'score': score,
'type': 'image'
}
def remove_unwanted_attributes(element, important_attrs, keep_data_attributes=False):
attrs_to_remove = []
for attr in element.attrs:
if attr not in important_attrs:
if keep_data_attributes:
if not attr.startswith('data-'):
attrs_to_remove.append(attr)
else:
attrs_to_remove.append(attr)
for attr in attrs_to_remove:
del element[attr]
def process_element(element: element.PageElement) -> bool:
try:
if isinstance(element, NavigableString):
if isinstance(element, Comment):
element.extract()
return False
# if element.name == 'img':
# process_image(element, url, 0, 1)
# return True
if element.name in ['script', 'style', 'link', 'meta', 'noscript']:
element.decompose()
return False
keep_element = False
exclude_social_media_domains = SOCIAL_MEDIA_DOMAINS + kwargs.get('exclude_social_media_domains', [])
exclude_social_media_domains = list(set(exclude_social_media_domains))
try:
if element.name == 'a' and element.get('href'):
href = element.get('href', '').strip()
if not href: # Skip empty hrefs
return False
url_base = url.split('/')[2]
# Normalize the URL
try:
normalized_href = normalize_url(href, url)
except ValueError as e:
# logging.warning(f"Invalid URL format: {href}, Error: {str(e)}")
return False
link_data = {
'href': normalized_href,
'text': element.get_text().strip(),
'title': element.get('title', '').strip()
}
# Check for duplicates and add to appropriate dictionary
is_external = is_external_url(normalized_href, url_base)
if is_external:
if normalized_href not in external_links_dict:
external_links_dict[normalized_href] = link_data
else:
if normalized_href not in internal_links_dict:
internal_links_dict[normalized_href] = link_data
keep_element = True
# Handle external link exclusions
if is_external:
if kwargs.get('exclude_external_links', False):
element.decompose()
return False
elif kwargs.get('exclude_social_media_links', False):
if any(domain in normalized_href.lower() for domain in exclude_social_media_domains):
element.decompose()
return False
elif kwargs.get('exclude_domains', []):
if any(domain in normalized_href.lower() for domain in kwargs.get('exclude_domains', [])):
element.decompose()
return False
except Exception as e:
raise Exception(f"Error processing links: {str(e)}")
try:
if element.name == 'img':
potential_sources = ['src', 'data-src', 'srcset' 'data-lazy-src', 'data-original']
src = element.get('src', '')
while not src and potential_sources:
src = element.get(potential_sources.pop(0), '')
if not src:
element.decompose()
return False
# If it is srcset pick up the first image
if 'srcset' in element.attrs:
src = element.attrs['srcset'].split(',')[0].split(' ')[0]
# Check flag if we should remove external images
if kwargs.get('exclude_external_images', False):
src_url_base = src.split('/')[2]
url_base = url.split('/')[2]
if url_base not in src_url_base:
element.decompose()
return False
if not kwargs.get('exclude_external_images', False) and kwargs.get('exclude_social_media_links', False):
src_url_base = src.split('/')[2]
url_base = url.split('/')[2]
if any(domain in src for domain in exclude_social_media_domains):
element.decompose()
return False
# Handle exclude domains
if kwargs.get('exclude_domains', []):
if any(domain in src for domain in kwargs.get('exclude_domains', [])):
element.decompose()
return False
return True # Always keep image elements
except Exception as e:
raise "Error processing images"
# Check if flag to remove all forms is set
if kwargs.get('remove_forms', False) and element.name == 'form':
element.decompose()
return False
if element.name in ['video', 'audio']:
media[f"{element.name}s"].append({
'src': element.get('src'),
'alt': element.get('alt'),
'type': element.name,
'description': find_closest_parent_with_useful_text(element)
})
source_tags = element.find_all('source')
for source_tag in source_tags:
media[f"{element.name}s"].append({
'src': source_tag.get('src'),
'alt': element.get('alt'),
'type': element.name,
'description': find_closest_parent_with_useful_text(element)
})
return True # Always keep video and audio elements
if element.name in ONLY_TEXT_ELIGIBLE_TAGS:
if kwargs.get('only_text', False):
element.replace_with(element.get_text())
try:
remove_unwanted_attributes(element, IMPORTANT_ATTRS, kwargs.get('keep_data_attributes', False))
except Exception as e:
print('Error removing unwanted attributes:', str(e))
# Process children
for child in list(element.children):
if isinstance(child, NavigableString) and not isinstance(child, Comment):
if len(child.strip()) > 0:
keep_element = True
else:
if process_element(child):
keep_element = True
# Check word count
if not keep_element:
word_count = len(element.get_text(strip=True).split())
keep_element = word_count >= word_count_threshold
if not keep_element:
element.decompose()
return keep_element
except Exception as e:
print('Error processing element:', str(e))
return False
process_element(body)
# Update the links dictionary with unique links
links['internal'] = list(internal_links_dict.values())
links['external'] = list(external_links_dict.values())
# # Process images using ThreadPoolExecutor
imgs = body.find_all('img')
with ThreadPoolExecutor() as executor:
image_results = list(executor.map(process_image, imgs, [url]*len(imgs), range(len(imgs)), [len(imgs)]*len(imgs)))
media['images'] = [result for result in image_results if result is not None]
def flatten_nested_elements(node):
if isinstance(node, NavigableString):
return node
if len(node.contents) == 1 and isinstance(node.contents[0], element.Tag) and node.contents[0].name == node.name:
return flatten_nested_elements(node.contents[0])
node.contents = [flatten_nested_elements(child) for child in node.contents]
return node
body = flatten_nested_elements(body)
base64_pattern = re.compile(r'data:image/[^;]+;base64,([^"]+)')
for img in imgs:
src = img.get('src', '')
if base64_pattern.match(src):
# Replace base64 data with empty string
img['src'] = base64_pattern.sub('', src)
str_body = ""
try:
str_body = body.encode_contents().decode('utf-8')
except Exception as e:
# Reset body to the original HTML
success = False
body = BeautifulSoup(html, 'html.parser')
# Create a new div with a special ID
error_div = body.new_tag('div', id='crawl4ai_error_message')
error_div.string = '''
Crawl4AI Error: This page is not fully supported.
Possible reasons:
1. The page may have restrictions that prevent crawling.
2. The page might not be fully loaded.
Suggestions:
- Try calling the crawl function with these parameters:
magic=True,
- Set headless=False to visualize what's happening on the page.
If the issue persists, please check the page's structure and any potential anti-crawling measures.
'''
# Append the error div to the body
body.body.append(error_div)
str_body = body.encode_contents().decode('utf-8')
print(f"[LOG] 😧 Error: After processing the crawled HTML and removing irrelevant tags, nothing was left in the page. Check the markdown for further details.")
cleaned_html = str_body.replace('\n\n', '\n').replace(' ', ' ')
try:
h = CustomHTML2Text()
h.update_params(**kwargs.get('html2text', {}))
markdown = h.handle(cleaned_html)
except Exception as e:
markdown = h.handle(sanitize_html(cleaned_html))
markdown = markdown.replace(' ```', '```')
fit_markdown = "Set flag 'fit_markdown' to True to get cleaned HTML content."
fit_html = "Set flag 'fit_markdown' to True to get cleaned HTML content."
if kwargs.get('fit_markdown', False):
cleaner = ContentCleaningStrategy()
fit_html = cleaner.clean(cleaned_html)
fit_markdown = h.handle(fit_html)
cleaned_html = sanitize_html(cleaned_html)
return {
'markdown': markdown,
'fit_markdown': fit_markdown,
'fit_html': fit_html,
'cleaned_html': cleaned_html,
'success': success,
'media': media,
'links': links,
'metadata': meta
}

View File

@@ -15,53 +15,54 @@ import logging, time
import base64
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
from typing import Callable
from typing import List, Callable
import requests
import os
from pathlib import Path
from .utils import *
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
logger = logging.getLogger('selenium.webdriver.remote.remote_connection')
logger.setLevel(logging.WARNING)
logger_driver = logging.getLogger("selenium.webdriver.common.service")
logger_driver = logging.getLogger('selenium.webdriver.common.service')
logger_driver.setLevel(logging.WARNING)
urllib3_logger = logging.getLogger("urllib3.connectionpool")
urllib3_logger = logging.getLogger('urllib3.connectionpool')
urllib3_logger.setLevel(logging.WARNING)
# Disable http.client logging
http_client_logger = logging.getLogger("http.client")
http_client_logger = logging.getLogger('http.client')
http_client_logger.setLevel(logging.WARNING)
# Disable driver_finder and service logging
driver_finder_logger = logging.getLogger("selenium.webdriver.common.driver_finder")
driver_finder_logger = logging.getLogger('selenium.webdriver.common.driver_finder')
driver_finder_logger.setLevel(logging.WARNING)
class CrawlerStrategy(ABC):
@abstractmethod
def crawl(self, url: str, **kwargs) -> str:
pass
@abstractmethod
def take_screenshot(self, save_path: str):
pass
@abstractmethod
def update_user_agent(self, user_agent: str):
pass
@abstractmethod
def set_hook(self, hook_type: str, hook: Callable):
pass
class CloudCrawlerStrategy(CrawlerStrategy):
def __init__(self, use_cached_html=False):
def __init__(self, use_cached_html = False):
super().__init__()
self.use_cached_html = use_cached_html
def crawl(self, url: str) -> str:
data = {
"urls": [url],
@@ -75,7 +76,6 @@ class CloudCrawlerStrategy(CrawlerStrategy):
html = response["results"][0]["html"]
return sanitize_input_encode(html)
class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
super().__init__()
@@ -87,25 +87,20 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
if kwargs.get("user_agent"):
self.options.add_argument("--user-agent=" + kwargs.get("user_agent"))
else:
user_agent = kwargs.get(
"user_agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
)
user_agent = kwargs.get("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
self.options.add_argument(f"--user-agent={user_agent}")
self.options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
)
self.options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
self.options.headless = kwargs.get("headless", True)
if self.options.headless:
self.options.add_argument("--headless")
self.options.add_argument("--disable-gpu")
self.options.add_argument("--disable-gpu")
self.options.add_argument("--window-size=1920,1080")
self.options.add_argument("--no-sandbox")
self.options.add_argument("--disable-dev-shm-usage")
self.options.add_argument("--disable-blink-features=AutomationControlled")
self.options.add_argument("--disable-blink-features=AutomationControlled")
# self.options.add_argument("--disable-dev-shm-usage")
self.options.add_argument("--disable-gpu")
# self.options.add_argument("--disable-extensions")
@@ -125,45 +120,48 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
self.use_cached_html = use_cached_html
self.js_code = js_code
self.verbose = kwargs.get("verbose", False)
# Hooks
self.hooks = {
"on_driver_created": None,
"on_user_agent_updated": None,
"before_get_url": None,
"after_get_url": None,
"before_return_html": None,
'on_driver_created': None,
'on_user_agent_updated': None,
'before_get_url': None,
'after_get_url': None,
'before_return_html': None
}
# chromedriver_autoinstaller.install()
# import chromedriver_autoinstaller
# crawl4ai_folder = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
# crawl4ai_folder = os.path.join(Path.home(), ".crawl4ai")
# driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=self.options)
# chromedriver_path = chromedriver_autoinstaller.install()
# chromedriver_path = chromedriver_autoinstaller.utils.download_chromedriver()
# self.service = Service(chromedriver_autoinstaller.install())
# chromedriver_path = ChromeDriverManager().install()
# self.service = Service(chromedriver_path)
# self.service.log_path = "NUL"
# self.driver = webdriver.Chrome(service=self.service, options=self.options)
# Use selenium-manager (built into Selenium 4.10.0+)
self.service = Service()
self.driver = webdriver.Chrome(options=self.options)
self.driver = self.execute_hook("on_driver_created", self.driver)
self.driver = self.execute_hook('on_driver_created', self.driver)
if kwargs.get("cookies"):
for cookie in kwargs.get("cookies"):
self.driver.add_cookie(cookie)
def set_hook(self, hook_type: str, hook: Callable):
if hook_type in self.hooks:
self.hooks[hook_type] = hook
else:
raise ValueError(f"Invalid hook type: {hook_type}")
def execute_hook(self, hook_type: str, *args):
hook = self.hooks.get(hook_type)
if hook:
@@ -172,9 +170,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
if isinstance(result, webdriver.Chrome):
return result
else:
raise TypeError(
f"Hook {hook_type} must return an instance of webdriver.Chrome or None."
)
raise TypeError(f"Hook {hook_type} must return an instance of webdriver.Chrome or None.")
# If the hook returns None or there is no hook, return self.driver
return self.driver
@@ -182,77 +178,60 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
self.options.add_argument(f"user-agent={user_agent}")
self.driver.quit()
self.driver = webdriver.Chrome(service=self.service, options=self.options)
self.driver = self.execute_hook("on_user_agent_updated", self.driver)
self.driver = self.execute_hook('on_user_agent_updated', self.driver)
def set_custom_headers(self, headers: dict):
# Enable Network domain for sending headers
self.driver.execute_cdp_cmd("Network.enable", {})
self.driver.execute_cdp_cmd('Network.enable', {})
# Set extra HTTP headers
self.driver.execute_cdp_cmd("Network.setExtraHTTPHeaders", {"headers": headers})
self.driver.execute_cdp_cmd('Network.setExtraHTTPHeaders', {'headers': headers})
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
initial_length = len(self.driver.page_source)
for ix in range(max_checks):
# print(f"Checking page load: {ix}")
time.sleep(check_interval)
current_length = len(self.driver.page_source)
if current_length != initial_length:
break
return self.driver.page_source
def crawl(self, url: str, **kwargs) -> str:
# Create md5 hash of the URL
import hashlib
url_hash = hashlib.md5(url.encode()).hexdigest()
if self.use_cached_html:
cache_file_path = os.path.join(
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
".crawl4ai",
"cache",
url_hash,
)
cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url_hash)
if os.path.exists(cache_file_path):
with open(cache_file_path, "r") as f:
return sanitize_input_encode(f.read())
try:
self.driver = self.execute_hook("before_get_url", self.driver)
self.driver = self.execute_hook('before_get_url', self.driver)
if self.verbose:
print(f"[LOG] 🕸️ Crawling {url} using LocalSeleniumCrawlerStrategy...")
self.driver.get(url) # <html><head></head><body></body></html>
self.driver.get(url) #<html><head></head><body></body></html>
WebDriverWait(self.driver, 20).until(
lambda d: d.execute_script("return document.readyState") == "complete"
lambda d: d.execute_script('return document.readyState') == 'complete'
)
WebDriverWait(self.driver, 10).until(
EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
)
self.driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);"
)
self.driver = self.execute_hook("after_get_url", self.driver)
html = sanitize_input_encode(
self._ensure_page_load()
) # self.driver.page_source
can_not_be_done_headless = (
False # Look at my creativity for naming variables
)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
self.driver = self.execute_hook('after_get_url', self.driver)
html = sanitize_input_encode(self._ensure_page_load()) # self.driver.page_source
can_not_be_done_headless = False # Look at my creativity for naming variables
# TODO: Very ugly approach, but promise to change it!
if (
kwargs.get("bypass_headless", False)
or html == "<html><head></head><body></body></html>"
):
print(
"[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode..."
)
if kwargs.get('bypass_headless', False) or html == "<html><head></head><body></body></html>":
print("[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode...")
can_not_be_done_headless = True
options = Options()
options.headless = False
@@ -260,31 +239,27 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
options.add_argument("--window-size=5,5")
driver = webdriver.Chrome(service=self.service, options=options)
driver.get(url)
self.driver = self.execute_hook("after_get_url", driver)
self.driver = self.execute_hook('after_get_url', driver)
html = sanitize_input_encode(driver.page_source)
driver.quit()
# Execute JS code if provided
self.js_code = kwargs.get("js_code", self.js_code)
if self.js_code and type(self.js_code) == str:
self.driver.execute_script(self.js_code)
# Optionally, wait for some condition after executing the JS code
WebDriverWait(self.driver, 10).until(
lambda driver: driver.execute_script("return document.readyState")
== "complete"
lambda driver: driver.execute_script("return document.readyState") == "complete"
)
elif self.js_code and type(self.js_code) == list:
for js in self.js_code:
self.driver.execute_script(js)
WebDriverWait(self.driver, 10).until(
lambda driver: driver.execute_script(
"return document.readyState"
)
== "complete"
lambda driver: driver.execute_script("return document.readyState") == "complete"
)
# Optionally, wait for some condition after executing the JS code : Contributed by (https://github.com/jonymusky)
wait_for = kwargs.get("wait_for", False)
wait_for = kwargs.get('wait_for', False)
if wait_for:
if callable(wait_for):
print("[LOG] 🔄 Waiting for condition...")
@@ -293,37 +268,32 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
print("[LOG] 🔄 Waiting for condition...")
WebDriverWait(self.driver, 20).until(
EC.presence_of_element_located((By.CSS_SELECTOR, wait_for))
)
)
if not can_not_be_done_headless:
html = sanitize_input_encode(self.driver.page_source)
self.driver = self.execute_hook("before_return_html", self.driver, html)
self.driver = self.execute_hook('before_return_html', self.driver, html)
# Store in cache
cache_file_path = os.path.join(
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
".crawl4ai",
"cache",
url_hash,
)
cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url_hash)
with open(cache_file_path, "w", encoding="utf-8") as f:
f.write(html)
if self.verbose:
print(f"[LOG] ✅ Crawled {url} successfully!")
return html
except InvalidArgumentException as e:
if not hasattr(e, "msg"):
except InvalidArgumentException:
if not hasattr(e, 'msg'):
e.msg = sanitize_input_encode(str(e))
raise InvalidArgumentException(f"Failed to crawl {url}: {e.msg}")
except WebDriverException as e:
# If e does nlt have msg attribute create it and set it to str(e)
if not hasattr(e, "msg"):
if not hasattr(e, 'msg'):
e.msg = sanitize_input_encode(str(e))
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
except Exception as e:
if not hasattr(e, "msg"):
if not hasattr(e, 'msg'):
e.msg = sanitize_input_encode(str(e))
raise Exception(f"Failed to crawl {url}: {e.msg}")
@@ -331,9 +301,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
try:
# Get the dimensions of the page
total_width = self.driver.execute_script("return document.body.scrollWidth")
total_height = self.driver.execute_script(
"return document.body.scrollHeight"
)
total_height = self.driver.execute_script("return document.body.scrollHeight")
# Set the window size to the dimensions of the page
self.driver.set_window_size(total_width, total_height)
@@ -345,27 +313,25 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
image = Image.open(BytesIO(screenshot))
# Convert image to RGB mode (this will handle both RGB and RGBA images)
rgb_image = image.convert("RGB")
rgb_image = image.convert('RGB')
# Convert to JPEG and compress
buffered = BytesIO()
rgb_image.save(buffered, format="JPEG", quality=85)
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
if self.verbose:
print("[LOG] 📸 Screenshot taken and converted to base64")
print(f"[LOG] 📸 Screenshot taken and converted to base64")
return img_base64
except Exception as e:
error_message = sanitize_input_encode(
f"Failed to take screenshot: {str(e)}"
)
error_message = sanitize_input_encode(f"Failed to take screenshot: {str(e)}")
print(error_message)
# Generate an image with black background
img = Image.new("RGB", (800, 600), color="black")
img = Image.new('RGB', (800, 600), color='black')
draw = ImageDraw.Draw(img)
# Load a font
try:
font = ImageFont.truetype("arial.ttf", 40)
@@ -379,16 +345,16 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
# Calculate text position
text_position = (10, 10)
# Draw the text on the image
draw.text(text_position, wrapped_text, fill=text_color, font=font)
# Convert to base64
buffered = BytesIO()
img.save(buffered, format="JPEG")
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
return img_base64
def quit(self):
self.driver.quit()

View File

@@ -1,20 +0,0 @@
from crawl4ai.hub import BaseCrawler
__meta__ = {
"version": "1.2.0",
"tested_on": ["amazon.com"],
"rate_limit": "50 RPM",
"schema": {"product": ["name", "price"]}
}
class AmazonProductCrawler(BaseCrawler):
async def run(self, url: str, **kwargs) -> str:
try:
self.logger.info(f"Crawling {url}")
return '{"product": {"name": "Test Amazon Product"}}'
except Exception as e:
self.logger.error(f"Crawl failed: {str(e)}")
return json.dumps({
"error": str(e),
"metadata": self.meta # Include meta in error response
})

View File

@@ -1,131 +0,0 @@
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
from crawl4ai.hub import BaseCrawler
from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema
from crawl4ai import JsonCssExtractionStrategy
from pathlib import Path
import json
import os
from typing import Dict
class GoogleSearchCrawler(BaseCrawler):
__meta__ = {
"version": "1.0.0",
"tested_on": ["google.com/search*"],
"rate_limit": "10 RPM",
"description": "Crawls Google Search results (text + images)",
}
def __init__(self):
super().__init__()
self.js_script = (Path(__file__).parent /
"script.js").read_text()
async def run(self, url="", query: str = "", search_type: str = "text", schema_cache_path = None, **kwargs) -> str:
"""Crawl Google Search results for a query"""
url = f"https://www.google.com/search?q={query}&gl=sg&hl=en" if search_type == "text" else f"https://www.google.com/search?q={query}&gl=sg&hl=en&tbs=qdr:d&udm=2"
if kwargs.get("page_start", 1) > 1:
url = f"{url}&start={kwargs['page_start'] * 10}"
if kwargs.get("page_length", 1) > 1:
url = f"{url}&num={kwargs['page_length']}"
browser_config = BrowserConfig(headless=True, verbose=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
config = CrawlerRunConfig(
cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS),
keep_attrs=["id", "class"],
keep_data_attributes=True,
delay_before_return_html=kwargs.get(
"delay", 2 if search_type == "image" else 1),
js_code=self.js_script if search_type == "image" else None,
)
result = await crawler.arun(url=url, config=config)
if not result.success:
return json.dumps({"error": result.error})
if search_type == "image":
if result.js_execution_result.get("success", False) is False:
return json.dumps({"error": result.js_execution_result.get("error", "Unknown error")})
if "results" in result.js_execution_result:
image_result = result.js_execution_result['results'][0]
if image_result.get("success", False) is False:
return json.dumps({"error": image_result.get("error", "Unknown error")})
return json.dumps(image_result["result"], indent=4)
# For text search, extract structured data
schemas = await self._build_schemas(result.cleaned_html, schema_cache_path)
extracted = {
key: JsonCssExtractionStrategy(schema=schemas[key]).run(
url=url, sections=[result.html]
)
for key in schemas
}
return json.dumps(extracted, indent=4)
async def _build_schemas(self, html: str, schema_cache_path: str = None) -> Dict[str, Dict]:
"""Build extraction schemas (organic, top stories, etc.)"""
home_dir = get_home_folder() if not schema_cache_path else schema_cache_path
os.makedirs(f"{home_dir}/schema", exist_ok=True)
# cleaned_html = optimize_html(html, threshold=100)
cleaned_html = preprocess_html_for_schema(html)
organic_schema = None
if os.path.exists(f"{home_dir}/schema/organic_schema.json"):
with open(f"{home_dir}/schema/organic_schema.json", "r") as f:
organic_schema = json.load(f)
else:
organic_schema = JsonCssExtractionStrategy.generate_schema(
html=cleaned_html,
target_json_example="""{
"title": "...",
"link": "...",
"snippet": "...",
"date": "1 hour ago",
}""",
query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text. date."""
)
with open(f"{home_dir}/schema/organic_schema.json", "w") as f:
f.write(json.dumps(organic_schema))
top_stories_schema = None
if os.path.exists(f"{home_dir}/schema/top_stories_schema.json"):
with open(f"{home_dir}/schema/top_stories_schema.json", "r") as f:
top_stories_schema = json.load(f)
else:
top_stories_schema = JsonCssExtractionStrategy.generate_schema(
html=cleaned_html,
target_json_example="""{
"title": "...",
"link": "...",
"source": "Insider Monkey",
"date": "1 hour ago",
}""",
query="""The given html is the crawled html from Google search result. Please find the schema for Top Story item int he given html, I am interested in title, link, source. date and imageUrl."""
)
with open(f"{home_dir}/schema/top_stories_schema.json", "w") as f:
f.write(json.dumps(top_stories_schema))
suggested_query_schema = None
if os.path.exists(f"{home_dir}/schema/suggested_query_schema.json"):
with open(f"{home_dir}/schema/suggested_query_schema.json", "r") as f:
suggested_query_schema = json.load(f)
else:
suggested_query_schema = JsonCssExtractionStrategy.generate_schema(
html=cleaned_html,
target_json_example="""{
"query": "A for Apple",
}""",
query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "People also search for" within the given HTML. I am interested in the queries only."""
)
with open(f"{home_dir}/schema/suggested_query_schema.json", "w") as f:
f.write(json.dumps(suggested_query_schema))
return {
"organic_schema": organic_schema,
"top_stories_schema": top_stories_schema,
"suggested_query_schema": suggested_query_schema,
}

View File

@@ -1,115 +0,0 @@
(() => {
// Function to extract image data from Google Images page
function extractImageData() {
const keys = Object.keys(window.W_jd);
let allImageData = [];
let currentPosition = 0;
// Get the symbol we'll use (from first valid entry)
let targetSymbol;
for (let key of keys) {
try {
const symbols = Object.getOwnPropertySymbols(window.W_jd[key]);
if (symbols.length > 0) {
targetSymbol = symbols[0];
break;
}
} catch (e) {
continue;
}
}
if (!targetSymbol) return [];
// Iterate through ALL keys
for (let key of keys) {
try {
const o1 = window.W_jd[key][targetSymbol]
if (!o1) continue;
const data = Object.values(o1)[0]
// const data = window.W_jd[key][targetSymbol]?.Ws;
// Check if this is a valid image data entry
if (data && Array.isArray(data[1])) {
const processedData = processImageEntry(data, currentPosition);
if (processedData) {
allImageData.push(processedData);
currentPosition++;
}
}
} catch (e) {
continue;
}
}
return allImageData;
}
function processImageEntry(entry, position) {
const imageData = entry[1];
if (!Array.isArray(imageData)) return null;
// Extract the image ID
const imageId = imageData[1];
if (!imageId) return null;
// Find the corresponding DOM element
const domElement = document.querySelector(`[data-docid="${imageId}"]`);
if (!domElement) return null;
// Extract data from the array structure
const [
_,
id,
thumbnailInfo,
imageInfo,
__,
___,
rgb,
____,
_____,
metadata
] = imageData;
// Ensure we have the required data
if (!thumbnailInfo || !imageInfo) return null;
// Extract metadata from DOM
const title = domElement?.querySelector('.toI8Rb')?.textContent?.trim();
const source = domElement?.querySelector('.guK3rf')?.textContent?.trim();
const link = domElement?.querySelector('a.EZAeBe')?.href;
if (!link) return null;
// Build Google Image URL
const googleUrl = buildGoogleImageUrl(imageInfo[0], link, imageId, imageInfo[1], imageInfo[2]);
return {
title,
imageUrl: imageInfo[0],
imageWidth: imageInfo[2],
imageHeight: imageInfo[1],
thumbnailUrl: thumbnailInfo[0],
thumbnailWidth: thumbnailInfo[2],
thumbnailHeight: thumbnailInfo[1],
source,
domain: metadata['2000']?.[1] || new URL(link).hostname,
link,
googleUrl,
position: position + 1
};
}
function buildGoogleImageUrl(imgUrl, refUrl, tbnid, height, width) {
const params = new URLSearchParams({
imgurl: imgUrl,
tbnid: tbnid,
imgrefurl: refUrl,
docid: tbnid,
w: width.toString(),
h: height.toString(),
});
return `https://www.google.com/imgres?${params.toString()}`;
}
return extractImageData();
})();

View File

@@ -3,17 +3,15 @@ from pathlib import Path
import sqlite3
from typing import Optional, Tuple
DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
DB_PATH = os.path.join(Path.home(), ".crawl4ai")
os.makedirs(DB_PATH, exist_ok=True)
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
def init_db():
global DB_PATH
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(
"""
cursor.execute('''
CREATE TABLE IF NOT EXISTS crawled_data (
url TEXT PRIMARY KEY,
html TEXT,
@@ -26,42 +24,31 @@ def init_db():
metadata TEXT DEFAULT "{}",
screenshot TEXT DEFAULT ""
)
"""
)
''')
conn.commit()
conn.close()
def alter_db_add_screenshot(new_column: str = "media"):
check_db_path()
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
)
cursor.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
conn.commit()
conn.close()
except Exception as e:
print(f"Error altering database to add screenshot column: {e}")
def check_db_path():
if not DB_PATH:
raise ValueError("Database path is not set or is empty.")
def get_cached_url(
url: str,
) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
check_db_path()
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(
"SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?",
(url,),
)
cursor.execute('SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?', (url,))
result = cursor.fetchone()
conn.close()
return result
@@ -69,25 +56,12 @@ def get_cached_url(
print(f"Error retrieving cached URL: {e}")
return None
def cache_url(
url: str,
html: str,
cleaned_html: str,
markdown: str,
extracted_content: str,
success: bool,
media: str = "{}",
links: str = "{}",
metadata: str = "{}",
screenshot: str = "",
):
def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool, media : str = "{}", links : str = "{}", metadata : str = "{}", screenshot: str = ""):
check_db_path()
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(
"""
cursor.execute('''
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(url) DO UPDATE SET
@@ -100,32 +74,18 @@ def cache_url(
links = excluded.links,
metadata = excluded.metadata,
screenshot = excluded.screenshot
""",
(
url,
html,
cleaned_html,
markdown,
extracted_content,
success,
media,
links,
metadata,
screenshot,
),
)
''', (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot))
conn.commit()
conn.close()
except Exception as e:
print(f"Error caching URL: {e}")
def get_total_count() -> int:
check_db_path()
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM crawled_data")
cursor.execute('SELECT COUNT(*) FROM crawled_data')
result = cursor.fetchone()
conn.close()
return result[0]
@@ -133,48 +93,43 @@ def get_total_count() -> int:
print(f"Error getting total count: {e}")
return 0
def clear_db():
check_db_path()
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute("DELETE FROM crawled_data")
cursor.execute('DELETE FROM crawled_data')
conn.commit()
conn.close()
except Exception as e:
print(f"Error clearing database: {e}")
def flush_db():
check_db_path()
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute("DROP TABLE crawled_data")
cursor.execute('DROP TABLE crawled_data')
conn.commit()
conn.close()
except Exception as e:
print(f"Error flushing database: {e}")
def update_existing_records(new_column: str = "media", default_value: str = "{}"):
check_db_path()
try:
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(
f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL'
)
cursor.execute(f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL')
conn.commit()
conn.close()
except Exception as e:
print(f"Error updating existing records: {e}")
if __name__ == "__main__":
# Delete the existing database file
if os.path.exists(DB_PATH):
os.remove(DB_PATH)
init_db()
init_db()
# alter_db_add_screenshot("COL_NAME")

View File

@@ -1,47 +0,0 @@
# deep_crawling/__init__.py
from .base_strategy import DeepCrawlDecorator, DeepCrawlStrategy
from .bfs_strategy import BFSDeepCrawlStrategy
from .bff_strategy import BestFirstCrawlingStrategy
from .dfs_strategy import DFSDeepCrawlStrategy
from .filters import (
FilterChain,
ContentTypeFilter,
DomainFilter,
URLFilter,
URLPatternFilter,
FilterStats,
ContentRelevanceFilter,
SEOFilter
)
from .scorers import (
KeywordRelevanceScorer,
URLScorer,
CompositeScorer,
DomainAuthorityScorer,
FreshnessScorer,
PathDepthScorer,
ContentTypeScorer
)
__all__ = [
"DeepCrawlDecorator",
"DeepCrawlStrategy",
"BFSDeepCrawlStrategy",
"BestFirstCrawlingStrategy",
"DFSDeepCrawlStrategy",
"FilterChain",
"ContentTypeFilter",
"DomainFilter",
"URLFilter",
"URLPatternFilter",
"FilterStats",
"ContentRelevanceFilter",
"SEOFilter",
"KeywordRelevanceScorer",
"URLScorer",
"CompositeScorer",
"DomainAuthorityScorer",
"FreshnessScorer",
"PathDepthScorer",
"ContentTypeScorer",
]

View File

@@ -1,159 +0,0 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import AsyncGenerator, Optional, Set, List, Dict
from functools import wraps
from contextvars import ContextVar
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
class DeepCrawlDecorator:
"""Decorator that adds deep crawling capability to arun method."""
deep_crawl_active = ContextVar("deep_crawl_active", default=False)
def __init__(self, crawler: AsyncWebCrawler):
self.crawler = crawler
def __call__(self, original_arun):
@wraps(original_arun)
async def wrapped_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
# If deep crawling is already active, call the original method to avoid recursion.
if config and config.deep_crawl_strategy and not self.deep_crawl_active.get():
token = self.deep_crawl_active.set(True)
# Await the arun call to get the actual result object.
result_obj = await config.deep_crawl_strategy.arun(
crawler=self.crawler,
start_url=url,
config=config
)
if config.stream:
async def result_wrapper():
try:
async for result in result_obj:
yield result
finally:
self.deep_crawl_active.reset(token)
return result_wrapper()
else:
try:
return result_obj
finally:
self.deep_crawl_active.reset(token)
return await original_arun(url, config=config, **kwargs)
return wrapped_arun
class DeepCrawlStrategy(ABC):
"""
Abstract base class for deep crawling strategies.
Core functions:
- arun: Main entry point that returns an async generator of CrawlResults.
- shutdown: Clean up resources.
- can_process_url: Validate a URL and decide whether to process it.
- _process_links: Extract and process links from a CrawlResult.
"""
@abstractmethod
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Batch (non-streaming) mode:
Processes one BFS level at a time, then yields all the results.
"""
pass
@abstractmethod
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Streaming mode:
Processes one BFS level at a time and yields results immediately as they arrive.
"""
pass
async def arun(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: Optional[CrawlerRunConfig] = None,
) -> RunManyReturn:
"""
Traverse the given URL using the specified crawler.
Args:
start_url (str): The URL from which to start crawling.
crawler (AsyncWebCrawler): The crawler instance to use.
crawler_run_config (Optional[CrawlerRunConfig]): Crawler configuration.
Returns:
Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
"""
if config is None:
raise ValueError("CrawlerRunConfig must be provided")
if config.stream:
return self._arun_stream(start_url, crawler, config)
else:
return await self._arun_batch(start_url, crawler, config)
def __call__(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig):
return self.arun(start_url, crawler, config)
@abstractmethod
async def shutdown(self) -> None:
"""
Clean up resources used by the deep crawl strategy.
"""
pass
@abstractmethod
async def can_process_url(self, url: str, depth: int) -> bool:
"""
Validate the URL format and apply custom filtering logic.
Args:
url (str): The URL to validate.
depth (int): The current depth in the crawl.
Returns:
bool: True if the URL should be processed, False otherwise.
"""
pass
@abstractmethod
async def link_discovery(
self,
result: CrawlResult,
source_url: str,
current_depth: int,
visited: Set[str],
next_level: List[tuple],
depths: Dict[str, int],
) -> None:
"""
Extract and process links from the given crawl result.
This method should:
- Validate each extracted URL using can_process_url.
- Optionally score URLs.
- Append valid URLs (and their parent references) to the next_level list.
- Update the depths dictionary with the new depth for each URL.
Args:
result (CrawlResult): The result from a crawl operation.
source_url (str): The URL from which this result was obtained.
current_depth (int): The depth at which the source URL was processed.
visited (Set[str]): Set of already visited URLs.
next_level (List[tuple]): List of tuples (url, parent_url) for the next BFS level.
depths (Dict[str, int]): Mapping of URLs to their current depth.
"""
pass

View File

@@ -1,271 +0,0 @@
# best_first_crawling_strategy.py
import asyncio
import logging
from datetime import datetime
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
from urllib.parse import urlparse
from ..models import TraversalStats
from .filters import FilterChain
from .scorers import URLScorer
from . import DeepCrawlStrategy
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
from ..utils import normalize_url_for_deep_crawl
from math import inf as infinity
# Configurable batch size for processing items from the priority queue
BATCH_SIZE = 10
class BestFirstCrawlingStrategy(DeepCrawlStrategy):
"""
Best-First Crawling Strategy using a priority queue.
This strategy prioritizes URLs based on their score, ensuring that higher-value
pages are crawled first. It reimplements the core traversal loop to use a priority
queue while keeping URL validation and link discovery consistent with our design.
Core methods:
- arun: Returns either a list (batch mode) or an async generator (stream mode).
- _arun_best_first: Core generator that uses a priority queue to yield CrawlResults.
- can_process_url: Validates URLs and applies filtering (inherited behavior).
- link_discovery: Extracts and validates links from a CrawlResult.
"""
def __init__(
self,
max_depth: int,
filter_chain: FilterChain = FilterChain(),
url_scorer: Optional[URLScorer] = None,
include_external: bool = False,
max_pages: int = infinity,
logger: Optional[logging.Logger] = None,
):
self.max_depth = max_depth
self.filter_chain = filter_chain
self.url_scorer = url_scorer
self.include_external = include_external
self.max_pages = max_pages
# self.logger = logger or logging.getLogger(__name__)
# Ensure logger is always a Logger instance, not a dict from serialization
if isinstance(logger, logging.Logger):
self.logger = logger
else:
# Create a new logger if logger is None, dict, or any other non-Logger type
self.logger = logging.getLogger(__name__)
self.stats = TraversalStats(start_time=datetime.now())
self._cancel_event = asyncio.Event()
self._pages_crawled = 0
async def can_process_url(self, url: str, depth: int) -> bool:
"""
Validate the URL format and apply filtering.
For the starting URL (depth 0), filtering is bypassed.
"""
try:
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc:
raise ValueError("Missing scheme or netloc")
if parsed.scheme not in ("http", "https"):
raise ValueError("Invalid scheme")
if "." not in parsed.netloc:
raise ValueError("Invalid domain")
except Exception as e:
self.logger.warning(f"Invalid URL: {url}, error: {e}")
return False
if depth != 0 and not await self.filter_chain.apply(url):
return False
return True
async def link_discovery(
self,
result: CrawlResult,
source_url: str,
current_depth: int,
visited: Set[str],
next_links: List[Tuple[str, Optional[str]]],
depths: Dict[str, int],
) -> None:
"""
Extract links from the crawl result, validate them, and append new URLs
(with their parent references) to next_links.
Also updates the depths dictionary.
"""
new_depth = current_depth + 1
if new_depth > self.max_depth:
return
# If we've reached the max pages limit, don't discover new links
remaining_capacity = self.max_pages - self._pages_crawled
if remaining_capacity <= 0:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
return
# Retrieve internal links; include external links if enabled.
links = result.links.get("internal", [])
if self.include_external:
links += result.links.get("external", [])
# If we have more links than remaining capacity, limit how many we'll process
valid_links = []
for link in links:
url = link.get("href")
base_url = normalize_url_for_deep_crawl(url, source_url)
if base_url in visited:
continue
if not await self.can_process_url(url, new_depth):
self.stats.urls_skipped += 1
continue
valid_links.append(base_url)
# Record the new depths and add to next_links
for url in valid_links:
depths[url] = new_depth
next_links.append((url, source_url))
async def _arun_best_first(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Core best-first crawl method using a priority queue.
The queue items are tuples of (score, depth, url, parent_url). Lower scores
are treated as higher priority. URLs are processed in batches for efficiency.
"""
queue: asyncio.PriorityQueue = asyncio.PriorityQueue()
# Push the initial URL with score 0 and depth 0.
initial_score = self.url_scorer.score(start_url) if self.url_scorer else 0
await queue.put((-initial_score, 0, start_url, None))
visited: Set[str] = set()
depths: Dict[str, int] = {start_url: 0}
while not queue.empty() and not self._cancel_event.is_set():
# Stop if we've reached the max pages limit
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
break
# Calculate how many more URLs we can process in this batch
remaining = self.max_pages - self._pages_crawled
batch_size = min(BATCH_SIZE, remaining)
if batch_size <= 0:
# No more pages to crawl
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
break
batch: List[Tuple[float, int, str, Optional[str]]] = []
# Retrieve up to BATCH_SIZE items from the priority queue.
for _ in range(BATCH_SIZE):
if queue.empty():
break
item = await queue.get()
score, depth, url, parent_url = item
if url in visited:
continue
visited.add(url)
batch.append(item)
if not batch:
continue
# Process the current batch of URLs.
urls = [item[2] for item in batch]
batch_config = config.clone(deep_crawl_strategy=None, stream=True)
stream_gen = await crawler.arun_many(urls=urls, config=batch_config)
async for result in stream_gen:
result_url = result.url
# Find the corresponding tuple from the batch.
corresponding = next((item for item in batch if item[2] == result_url), None)
if not corresponding:
continue
score, depth, url, parent_url = corresponding
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
result.metadata["parent_url"] = parent_url
result.metadata["score"] = -score
# Count only successful crawls toward max_pages limit
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
yield result
# Only discover links from successful crawls
if result.success:
# Discover new links from this result
new_links: List[Tuple[str, Optional[str]]] = []
await self.link_discovery(result, result_url, depth, visited, new_links, depths)
for new_url, new_parent in new_links:
new_depth = depths.get(new_url, depth + 1)
new_score = self.url_scorer.score(new_url) if self.url_scorer else 0
await queue.put((-new_score, new_depth, new_url, new_parent))
# End of crawl.
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Best-first crawl in batch mode.
Aggregates all CrawlResults into a list.
"""
results: List[CrawlResult] = []
async for result in self._arun_best_first(start_url, crawler, config):
results.append(result)
return results
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Best-first crawl in streaming mode.
Yields CrawlResults as they become available.
"""
async for result in self._arun_best_first(start_url, crawler, config):
yield result
async def arun(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: Optional[CrawlerRunConfig] = None,
) -> "RunManyReturn":
"""
Main entry point for best-first crawling.
Returns either a list (batch mode) or an async generator (stream mode)
of CrawlResults.
"""
if config is None:
raise ValueError("CrawlerRunConfig must be provided")
if config.stream:
return self._arun_stream(start_url, crawler, config)
else:
return await self._arun_batch(start_url, crawler, config)
async def shutdown(self) -> None:
"""
Signal cancellation and clean up resources.
"""
self._cancel_event.set()
self.stats.end_time = datetime.now()

View File

@@ -1,260 +0,0 @@
# bfs_deep_crawl_strategy.py
import asyncio
import logging
from datetime import datetime
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
from urllib.parse import urlparse
from ..models import TraversalStats
from .filters import FilterChain
from .scorers import URLScorer
from . import DeepCrawlStrategy
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult
from ..utils import normalize_url_for_deep_crawl, efficient_normalize_url_for_deep_crawl
from math import inf as infinity
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
"""
Breadth-First Search deep crawling strategy.
Core functions:
- arun: Main entry point; splits execution into batch or stream modes.
- link_discovery: Extracts, filters, and (if needed) scores the outgoing URLs.
- can_process_url: Validates URL format and applies the filter chain.
"""
def __init__(
self,
max_depth: int,
filter_chain: FilterChain = FilterChain(),
url_scorer: Optional[URLScorer] = None,
include_external: bool = False,
score_threshold: float = -infinity,
max_pages: int = infinity,
logger: Optional[logging.Logger] = None,
):
self.max_depth = max_depth
self.filter_chain = filter_chain
self.url_scorer = url_scorer
self.include_external = include_external
self.score_threshold = score_threshold
self.max_pages = max_pages
# self.logger = logger or logging.getLogger(__name__)
# Ensure logger is always a Logger instance, not a dict from serialization
if isinstance(logger, logging.Logger):
self.logger = logger
else:
# Create a new logger if logger is None, dict, or any other non-Logger type
self.logger = logging.getLogger(__name__)
self.stats = TraversalStats(start_time=datetime.now())
self._cancel_event = asyncio.Event()
self._pages_crawled = 0
async def can_process_url(self, url: str, depth: int) -> bool:
"""
Validates the URL and applies the filter chain.
For the start URL (depth 0) filtering is bypassed.
"""
try:
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc:
raise ValueError("Missing scheme or netloc")
if parsed.scheme not in ("http", "https"):
raise ValueError("Invalid scheme")
if "." not in parsed.netloc:
raise ValueError("Invalid domain")
except Exception as e:
self.logger.warning(f"Invalid URL: {url}, error: {e}")
return False
if depth != 0 and not await self.filter_chain.apply(url):
return False
return True
async def link_discovery(
self,
result: CrawlResult,
source_url: str,
current_depth: int,
visited: Set[str],
next_level: List[Tuple[str, Optional[str]]],
depths: Dict[str, int],
) -> None:
"""
Extracts links from the crawl result, validates and scores them, and
prepares the next level of URLs.
Each valid URL is appended to next_level as a tuple (url, parent_url)
and its depth is tracked.
"""
next_depth = current_depth + 1
if next_depth > self.max_depth:
return
# If we've reached the max pages limit, don't discover new links
remaining_capacity = self.max_pages - self._pages_crawled
if remaining_capacity <= 0:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
return
# Get internal links and, if enabled, external links.
links = result.links.get("internal", [])
if self.include_external:
links += result.links.get("external", [])
valid_links = []
# First collect all valid links
for link in links:
url = link.get("href")
# Strip URL fragments to avoid duplicate crawling
# base_url = url.split('#')[0] if url else url
base_url = normalize_url_for_deep_crawl(url, source_url)
if base_url in visited:
continue
if not await self.can_process_url(url, next_depth):
self.stats.urls_skipped += 1
continue
# Score the URL if a scorer is provided
score = self.url_scorer.score(base_url) if self.url_scorer else 0
# Skip URLs with scores below the threshold
if score < self.score_threshold:
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
self.stats.urls_skipped += 1
continue
visited.add(base_url)
valid_links.append((base_url, score))
# If we have more valid links than capacity, sort by score and take the top ones
if len(valid_links) > remaining_capacity:
if self.url_scorer:
# Sort by score in descending order
valid_links.sort(key=lambda x: x[1], reverse=True)
# Take only as many as we have capacity for
valid_links = valid_links[:remaining_capacity]
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
# Process the final selected links
for url, score in valid_links:
# attach the score to metadata if needed
if score:
result.metadata = result.metadata or {}
result.metadata["score"] = score
next_level.append((url, source_url))
depths[url] = next_depth
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Batch (non-streaming) mode:
Processes one BFS level at a time, then yields all the results.
"""
visited: Set[str] = set()
# current_level holds tuples: (url, parent_url)
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
depths: Dict[str, int] = {start_url: 0}
results: List[CrawlResult] = []
while current_level and not self._cancel_event.is_set():
# Check if we've already reached max_pages before starting a new level
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
break
next_level: List[Tuple[str, Optional[str]]] = []
urls = [url for url, _ in current_level]
# Clone the config to disable deep crawling recursion and enforce batch mode.
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
batch_results = await crawler.arun_many(urls=urls, config=batch_config)
# Update pages crawled counter - count only successful crawls
successful_results = [r for r in batch_results if r.success]
self._pages_crawled += len(successful_results)
for result in batch_results:
url = result.url
depth = depths.get(url, 0)
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
parent_url = next((parent for (u, parent) in current_level if u == url), None)
result.metadata["parent_url"] = parent_url
results.append(result)
# Only discover links from successful crawls
if result.success:
# Link discovery will handle the max pages limit internally
await self.link_discovery(result, url, depth, visited, next_level, depths)
current_level = next_level
return results
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Streaming mode:
Processes one BFS level at a time and yields results immediately as they arrive.
"""
visited: Set[str] = set()
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
depths: Dict[str, int] = {start_url: 0}
while current_level and not self._cancel_event.is_set():
next_level: List[Tuple[str, Optional[str]]] = []
urls = [url for url, _ in current_level]
visited.update(urls)
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
stream_gen = await crawler.arun_many(urls=urls, config=stream_config)
# Keep track of processed results for this batch
results_count = 0
async for result in stream_gen:
url = result.url
depth = depths.get(url, 0)
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
parent_url = next((parent for (u, parent) in current_level if u == url), None)
result.metadata["parent_url"] = parent_url
# Count only successful crawls
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
results_count += 1
yield result
# Only discover links from successful crawls
if result.success:
# Link discovery will handle the max pages limit internally
await self.link_discovery(result, url, depth, visited, next_level, depths)
# If we didn't get results back (e.g. due to errors), avoid getting stuck in an infinite loop
# by considering these URLs as visited but not counting them toward the max_pages limit
if results_count == 0 and urls:
self.logger.warning(f"No results returned for {len(urls)} URLs, marking as visited")
current_level = next_level
async def shutdown(self) -> None:
"""
Clean up resources and signal cancellation of the crawl.
"""
self._cancel_event.set()
self.stats.end_time = datetime.now()

View File

@@ -1,432 +0,0 @@
from __future__ import annotations
# I just got crazy, trying to wrute K&R C but in Python. Right now I feel like I'm in a quantum state.
# I probably won't use this; I just want to leave it here. A century later, the future human race will be like, "WTF?"
# ------ Imports That Will Make You Question Reality ------ #
from functools import wraps
from contextvars import ContextVar
import inspect
from crawl4ai import CacheMode
from crawl4ai.async_configs import CrawlerRunConfig
from crawl4ai.models import CrawlResult, TraversalStats
from crawl4ai.deep_crawling.filters import FilterChain
from crawl4ai.async_webcrawler import AsyncWebCrawler
import time
import logging
from urllib.parse import urlparse
from abc import ABC, abstractmethod
from collections import deque
import asyncio
from typing import (
AsyncGenerator,
Dict,
List,
TypeVar,
Generic,
Tuple,
Callable,
Awaitable,
Union,
)
from functools import lru_cache
import mmh3
from bitarray import bitarray
import numpy as np
from heapq import heappush, heappop
# ------ Type Algebra Mastery ------ #
CrawlResultT = TypeVar("CrawlResultT", bound="CrawlResult")
PriorityT = TypeVar("PriorityT")
P = TypeVar("P")
# ------ Hyperscalar Context Management ------ #
deep_crawl_ctx = ContextVar("deep_crawl_stack", default=deque())
# ------ Algebraic Crawler Monoid ------ #
class TraversalContext:
__slots__ = ('visited', 'frontier', 'depths', 'priority_fn', 'current_depth')
def __init__(self,
priority_fn: Callable[[str], Awaitable[float]] = lambda _: 1.0):
self.visited: BloomFilter = BloomFilter(10**6, 0.01) # 1M items, 1% FP
self.frontier: PriorityQueue = PriorityQueue()
self.depths: Dict[str, int] = {}
self.priority_fn = priority_fn
self.current_depth = 0
def clone_for_level(self) -> TraversalContext:
"""Monadic context propagation"""
new_ctx = TraversalContext(self.priority_fn)
new_ctx.visited = self.visited.copy()
new_ctx.depths = self.depths.copy()
new_ctx.current_depth = self.current_depth
return new_ctx
class PriorityQueue(Generic[PriorityT]):
"""Fibonacci heap-inspired priority queue with O(1) amortized operations"""
__slots__ = ('_heap', '_index')
def __init__(self):
self._heap: List[Tuple[PriorityT, float, P]] = []
self._index: Dict[P, int] = {}
def insert(self, priority: PriorityT, item: P) -> None:
tiebreaker = time.time() # Ensure FIFO for equal priorities
heappush(self._heap, (priority, tiebreaker, item))
self._index[item] = len(self._heap) - 1
def extract(self, top_n = 1) -> P:
items = []
for _ in range(top_n):
if not self._heap:
break
priority, _, item = heappop(self._heap)
del self._index[item]
items.append(item)
if not items:
raise IndexError("Priority queue empty")
return items
# while self._heap:
# _, _, item = heappop(self._heap)
# if item in self._index:
# del self._index[item]
# return item
raise IndexError("Priority queue empty")
def is_empty(self) -> bool:
return not bool(self._heap)
class BloomFilter:
"""Optimal Bloom filter using murmur3 hash avalanche"""
__slots__ = ('size', 'hashes', 'bits')
def __init__(self, capacity: int, error_rate: float):
self.size = self._optimal_size(capacity, error_rate)
self.hashes = self._optimal_hashes(capacity, self.size)
self.bits = bitarray(self.size)
self.bits.setall(False)
@staticmethod
def _optimal_size(n: int, p: float) -> int:
m = - (n * np.log(p)) / (np.log(2) ** 2)
return int(np.ceil(m))
@staticmethod
def _optimal_hashes(n: int, m: int) -> int:
k = (m / n) * np.log(2)
return int(np.ceil(k))
def add(self, item: str) -> None:
for seed in range(self.hashes):
digest = mmh3.hash(item, seed) % self.size
self.bits[digest] = True
def __contains__(self, item: str) -> bool:
return all(
self.bits[mmh3.hash(item, seed) % self.size]
for seed in range(self.hashes)
)
def copy(self) -> BloomFilter:
new = object.__new__(BloomFilter)
new.size = self.size
new.hashes = self.hashes
new.bits = self.bits.copy()
return new
def __len__(self) -> int:
"""
Estimates the number of items in the filter using the
count of set bits and the formula:
n = -m/k * ln(1 - X/m)
where:
m = size of bit array
k = number of hash functions
X = count of set bits
"""
set_bits = self.bits.count(True)
if set_bits == 0:
return 0
# Use the inverse bloom filter formula to estimate cardinality
return int(
-(self.size / self.hashes) *
np.log(1 - set_bits / self.size)
)
def bit_count(self) -> int:
"""Returns the raw count of set bits in the filter"""
return self.bits.count(True)
def __repr__(self) -> str:
return f"BloomFilter(est_items={len(self)}, bits={self.bit_count()}/{self.size})"
# ------ Hyper-Optimal Deep Crawl Core ------ #
class DeepCrawlDecorator:
"""Metaprogramming marvel: Zero-cost deep crawl abstraction"""
def __init__(self, crawler: AsyncWebCrawler):
self.crawler = crawler
def __call__(self, original_arun: Callable) -> Callable:
@wraps(original_arun)
async def quantum_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
stack = deep_crawl_ctx.get()
if config and config.deep_crawl_strategy and not stack:
stack.append(self.crawler)
try:
deep_crawl_ctx.set(stack)
async for result in config.deep_crawl_strategy.traverse(
start_url=url,
crawler=self.crawler,
config=config
):
yield result
finally:
stack.pop()
deep_crawl_ctx.set(stack)
else:
result = await original_arun(url, config=config, **kwargs)
yield result
return quantum_arun
async def collect_results(url, crawler, config):
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
setattr(crawler, "arun", getattr(crawler, "original_arun"))
ret = crawler.arun(url, config=config)
# If arun is an async generator, iterate over it
if inspect.isasyncgen(ret):
return [r async for r in ret]
# Otherwise, await the coroutine and normalize to a list
result = await ret
return result if isinstance(result, list) else [result]
async def collect_many_results(url, crawler, config):
# Replace back arun to its original implementation
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
setattr(crawler, "arun", getattr(crawler, "original_arun"))
ret = crawler.arun_many(url, config=config)
# If arun is an async generator, iterate over it
if inspect.isasyncgen(ret):
return [r async for r in ret]
# Otherwise, await the coroutine and normalize to a list
result = await ret
return result if isinstance(result, list) else [result]
# ------ Deep Crawl Strategy Interface ------ #
CrawlResultT = TypeVar("CrawlResultT", bound=CrawlResult)
# In batch mode we return List[CrawlResult] and in stream mode an AsyncGenerator.
RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
class DeepCrawlStrategy(ABC):
"""Abstract base class that will make Dijkstra smile"""
@abstractmethod
async def traverse(self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig) -> RunManyReturn:
"""Traverse with O(1) memory complexity via generator fusion"""
...
@abstractmethod
def precompute_priority(self, url: str) -> Awaitable[float]:
"""Quantum-inspired priority precomputation"""
pass
@abstractmethod
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
"""Hilbert-curve optimized link generation"""
pass
# ------ BFS That Would Make Knuth Proud ------ #
def calculate_quantum_batch_size(
depth: int,
max_depth: int,
frontier_size: int,
visited_size: int
) -> int:
"""
Calculates optimal batch size for URL processing using quantum-inspired mathematical principles.
This function implements a sophisticated batch size calculation using:
1. Golden Ratio (φ) based scaling for optimal irrationality
2. Depth-aware amplitude modulation
3. Harmonic series dampening
4. Logarithmic growth control
5. Dynamic frontier adaptation
The formula follows the quantum harmonic oscillator principle:
N = ⌈φ^(2d) * log₂(|V|) * H(d)⁻¹ * min(20, |F|/10)⌉
where:
φ = Golden Ratio ((1 + √5) / 2)
d = depth factor (normalized remaining depth)
|V| = size of visited set
H(d) = d-th harmonic number
|F| = frontier size
Args:
depth (int): Current traversal depth
max_depth (int): Maximum allowed depth
frontier_size (int): Current size of frontier queue
visited_size (int): Number of URLs visited so far
Returns:
int: Optimal batch size bounded between 1 and 100
Mathematical Properties:
- Maintains O(log n) growth with respect to visited size
- Provides φ-optimal distribution of resources
- Ensures quantum-like state transitions between depths
- Harmonically dampened to prevent exponential explosion
"""
# Golden ratio φ = (1 + √5) / 2
φ = (1 + 5 ** 0.5) / 2
# Calculate normalized depth factor [0, 1]
depth_factor = (max_depth - depth) / max_depth if depth < max_depth else 0
# Compute harmonic number for current depth
harmonic = sum(1/k for k in range(1, depth + 2))
# Calculate quantum batch size
batch_size = int(np.ceil(
(φ ** (depth_factor * 2)) * # Golden ratio scaling
np.log2(visited_size + 2) * # Logarithmic growth factor
(1 / harmonic) * # Harmonic dampening
max(1, min(20, frontier_size / 10)) # Frontier-aware scaling
))
# Enforce practical bounds
return max(1, min(100, batch_size))
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
"""Breadth-First Search with Einstein-Rosen bridge optimization"""
__slots__ = ('max_depth', 'filter_chain', 'priority_fn', 'stats', '_cancel')
def __init__(self,
max_depth: int,
filter_chain: FilterChain = FilterChain(),
priority_fn: Callable[[str], Awaitable[float]] = lambda url: 1.0,
logger: logging.Logger = None):
self.max_depth = max_depth
self.filter_chain = filter_chain
self.priority_fn = priority_fn
self.stats = TraversalStats()
self._cancel = asyncio.Event()
self.semaphore = asyncio.Semaphore(1000)
async def traverse(self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig) -> RunManyReturn:
"""Non-blocking BFS with O(b^d) time complexity awareness"""
ctx = TraversalContext(self.priority_fn)
ctx.frontier.insert(self.priority_fn(start_url), (start_url, None, 0))
ctx.visited.add(start_url)
ctx.depths[start_url] = 0
while not ctx.frontier.is_empty() and not self._cancel.is_set():
# Use the best algorith, to find top_n value
top_n = calculate_quantum_batch_size(
depth=ctx.current_depth,
max_depth=self.max_depth,
frontier_size=len(ctx.frontier._heap),
visited_size=len(ctx.visited)
)
urls = ctx.frontier.extract(top_n=top_n)
# url, parent, depth = ctx.frontier.extract(top_n=top_n)
if urls:
ctx.current_depth = urls[0][2]
async with self.semaphore:
results = await collect_many_results([url for (url, parent, depth) in urls], crawler, config)
# results = await asyncio.gather(*[
# collect_results(url, crawler, config) for (url, parent, depth) in urls
# ])
# result = _result[0]
for ix, result in enumerate(results):
url, parent, depth = result.url, urls[ix][1], urls[ix][2]
result.metadata['depth'] = depth
result.metadata['parent'] = parent
yield result
if depth < self.max_depth:
async for link in self.link_hypercube(result):
if link not in ctx.visited:
priority = self.priority_fn(link)
ctx.frontier.insert(priority, (link, url, depth + 1))
ctx.visited.add(link)
ctx.depths[link] = depth + 1
@lru_cache(maxsize=65536)
async def validate_url(self, url: str) -> bool:
"""Memoized URL validation with λ-calculus purity"""
try:
parsed = urlparse(url)
return (parsed.scheme in {'http', 'https'}
and '.' in parsed.netloc
and await self.filter_chain.apply(url))
except Exception:
return False
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
"""Hilbert-ordered link generation with O(1) yield latency"""
links = (link['href'] for link in result.links.get('internal', []))
validated = filter(self.validate_url, links)
for link in sorted(validated, key=lambda x: -self.priority_fn(x)):
yield link
def __aiter__(self) -> AsyncGenerator[CrawlResult, None]:
"""Native async iterator interface"""
return self.traverse()
async def __anext__(self) -> CrawlResult:
"""True async iterator protocol implementation"""
result = await self.traverse().__anext__()
if result:
return result
raise StopAsyncIteration
async def precompute_priority(self, url):
return super().precompute_priority(url)
async def shutdown(self):
self._cancel.set()
# ------ Usage That Will Drop Jaws ------ #
async def main():
"""Quantum crawl example"""
strategy = BFSDeepCrawlStrategy(
max_depth=2,
priority_fn=lambda url: 1.0 / (len(url) + 1e-9), # Inverse length priority
# filter_chain=FilterChain(...)
)
config: CrawlerRunConfig = CrawlerRunConfig(
deep_crawl_strategy=strategy,
stream=False,
verbose=True,
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler() as crawler:
run_decorator = DeepCrawlDecorator(crawler)
setattr(crawler, "original_arun", crawler.arun)
crawler.arun = run_decorator(crawler.arun)
start_time = time.perf_counter()
async for result in crawler.arun("https://docs.crawl4ai.com", config=config):
print(f"🌀 {result.url} (Depth: {result.metadata['depth']})")
print(f"Deep crawl completed in {time.perf_counter() - start_time:.2f}s")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,110 +0,0 @@
# dfs_deep_crawl_strategy.py
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
from ..models import CrawlResult
from .bfs_strategy import BFSDeepCrawlStrategy # noqa
from ..types import AsyncWebCrawler, CrawlerRunConfig
class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
"""
Depth-First Search (DFS) deep crawling strategy.
Inherits URL validation and link discovery from BFSDeepCrawlStrategy.
Overrides _arun_batch and _arun_stream to use a stack (LIFO) for DFS traversal.
"""
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Batch (non-streaming) DFS mode.
Uses a stack to traverse URLs in DFS order, aggregating CrawlResults into a list.
"""
visited: Set[str] = set()
# Stack items: (url, parent_url, depth)
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
depths: Dict[str, int] = {start_url: 0}
results: List[CrawlResult] = []
while stack and not self._cancel_event.is_set():
url, parent, depth = stack.pop()
if url in visited or depth > self.max_depth:
continue
visited.add(url)
# Clone config to disable recursive deep crawling.
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
url_results = await crawler.arun_many(urls=[url], config=batch_config)
for result in url_results:
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
result.metadata["parent_url"] = parent
if self.url_scorer:
result.metadata["score"] = self.url_scorer.score(url)
results.append(result)
# Count only successful crawls toward max_pages limit
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
# Only discover links from successful crawls
new_links: List[Tuple[str, Optional[str]]] = []
await self.link_discovery(result, url, depth, visited, new_links, depths)
# Push new links in reverse order so the first discovered is processed next.
for new_url, new_parent in reversed(new_links):
new_depth = depths.get(new_url, depth + 1)
stack.append((new_url, new_parent, new_depth))
return results
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Streaming DFS mode.
Uses a stack to traverse URLs in DFS order and yields CrawlResults as they become available.
"""
visited: Set[str] = set()
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
depths: Dict[str, int] = {start_url: 0}
while stack and not self._cancel_event.is_set():
url, parent, depth = stack.pop()
if url in visited or depth > self.max_depth:
continue
visited.add(url)
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
stream_gen = await crawler.arun_many(urls=[url], config=stream_config)
async for result in stream_gen:
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
result.metadata["parent_url"] = parent
if self.url_scorer:
result.metadata["score"] = self.url_scorer.score(url)
yield result
# Only count successful crawls toward max_pages limit
# and only discover links from successful crawls
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
new_links: List[Tuple[str, Optional[str]]] = []
await self.link_discovery(result, url, depth, visited, new_links, depths)
for new_url, new_parent in reversed(new_links):
new_depth = depths.get(new_url, depth + 1)
stack.append((new_url, new_parent, new_depth))

View File

@@ -1,694 +0,0 @@
from abc import ABC, abstractmethod
from typing import List, Pattern, Set, Union
from urllib.parse import urlparse
from array import array
import re
import logging
from functools import lru_cache
import fnmatch
from dataclasses import dataclass
import weakref
import math
from collections import defaultdict
from typing import Dict
from ..utils import HeadPeekr
import asyncio
import inspect
@dataclass
class FilterStats:
__slots__ = ("_counters",)
def __init__(self):
# Use array of unsigned ints for atomic operations
self._counters = array("I", [0, 0, 0]) # total, passed, rejected
@property
def total_urls(self):
return self._counters[0]
@property
def passed_urls(self):
return self._counters[1]
@property
def rejected_urls(self):
return self._counters[2]
class URLFilter(ABC):
"""Optimized base filter class"""
__slots__ = ("name", "stats", "_logger_ref")
def __init__(self, name: str = None):
self.name = name or self.__class__.__name__
self.stats = FilterStats()
# Lazy logger initialization using weakref
self._logger_ref = None
@property
def logger(self):
if self._logger_ref is None or self._logger_ref() is None:
logger = logging.getLogger(f"urlfilter.{self.name}")
self._logger_ref = weakref.ref(logger)
return self._logger_ref()
@abstractmethod
def apply(self, url: str) -> bool:
pass
def _update_stats(self, passed: bool):
# Use direct array index for speed
self.stats._counters[0] += 1 # total
self.stats._counters[1] += passed # passed
self.stats._counters[2] += not passed # rejected
class FilterChain:
"""Optimized filter chain"""
__slots__ = ("filters", "stats", "_logger_ref")
def __init__(self, filters: List[URLFilter] = None):
self.filters = tuple(filters or []) # Immutable tuple for speed
self.stats = FilterStats()
self._logger_ref = None
@property
def logger(self):
if self._logger_ref is None or self._logger_ref() is None:
logger = logging.getLogger("urlfilter.chain")
self._logger_ref = weakref.ref(logger)
return self._logger_ref()
def add_filter(self, filter_: URLFilter) -> "FilterChain":
"""Add a filter to the chain"""
self.filters.append(filter_)
return self # Enable method chaining
async def apply(self, url: str) -> bool:
"""Apply all filters concurrently when possible"""
self.stats._counters[0] += 1 # Total processed URLs
tasks = []
for f in self.filters:
result = f.apply(url)
if inspect.isawaitable(result):
tasks.append(result) # Collect async tasks
elif not result: # Sync rejection
self.stats._counters[2] += 1 # Sync rejected
return False
if tasks:
results = await asyncio.gather(*tasks)
# Count how many filters rejected
rejections = results.count(False)
self.stats._counters[2] += rejections
if not all(results):
return False # Stop early if any filter rejected
self.stats._counters[1] += 1 # Passed
return True
class URLPatternFilter(URLFilter):
"""Pattern filter balancing speed and completeness"""
__slots__ = (
"patterns", # Store original patterns for serialization
"use_glob", # Store original use_glob for serialization
"reverse", # Store original reverse for serialization
"_simple_suffixes",
"_simple_prefixes",
"_domain_patterns",
"_path_patterns",
"_reverse",
)
PATTERN_TYPES = {
"SUFFIX": 1, # *.html
"PREFIX": 2, # /foo/*
"DOMAIN": 3, # *.example.com
"PATH": 4, # Everything else
"REGEX": 5,
}
def __init__(
self,
patterns: Union[str, Pattern, List[Union[str, Pattern]]],
use_glob: bool = True,
reverse: bool = False,
):
super().__init__()
# Store original constructor params for serialization
self.patterns = patterns
self.use_glob = use_glob
self.reverse = reverse
self._reverse = reverse
patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns
self._simple_suffixes = set()
self._simple_prefixes = set()
self._domain_patterns = []
self._path_patterns = []
for pattern in patterns:
pattern_type = self._categorize_pattern(pattern)
self._add_pattern(pattern, pattern_type)
def _categorize_pattern(self, pattern: str) -> int:
"""Categorize pattern for specialized handling"""
if not isinstance(pattern, str):
return self.PATTERN_TYPES["PATH"]
# Check if it's a regex pattern
if pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern:
return self.PATTERN_TYPES["REGEX"]
if pattern.count("*") == 1:
if pattern.startswith("*."):
return self.PATTERN_TYPES["SUFFIX"]
if pattern.endswith("/*"):
return self.PATTERN_TYPES["PREFIX"]
if "://" in pattern and pattern.startswith("*."):
return self.PATTERN_TYPES["DOMAIN"]
return self.PATTERN_TYPES["PATH"]
def _add_pattern(self, pattern: str, pattern_type: int):
"""Add pattern to appropriate matcher"""
if pattern_type == self.PATTERN_TYPES["REGEX"]:
# For regex patterns, compile directly without glob translation
if isinstance(pattern, str) and (
pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern
):
self._path_patterns.append(re.compile(pattern))
return
elif pattern_type == self.PATTERN_TYPES["SUFFIX"]:
self._simple_suffixes.add(pattern[2:])
elif pattern_type == self.PATTERN_TYPES["PREFIX"]:
self._simple_prefixes.add(pattern[:-2])
elif pattern_type == self.PATTERN_TYPES["DOMAIN"]:
self._domain_patterns.append(re.compile(pattern.replace("*.", r"[^/]+\.")))
else:
if isinstance(pattern, str):
# Handle complex glob patterns
if "**" in pattern:
pattern = pattern.replace("**", ".*")
if "{" in pattern:
# Convert {a,b} to (a|b)
pattern = re.sub(
r"\{([^}]+)\}",
lambda m: f'({"|".join(m.group(1).split(","))})',
pattern,
)
pattern = fnmatch.translate(pattern)
self._path_patterns.append(
pattern if isinstance(pattern, Pattern) else re.compile(pattern)
)
@lru_cache(maxsize=10000)
def apply(self, url: str) -> bool:
# Quick suffix check (*.html)
if self._simple_suffixes:
path = url.split("?")[0]
if path.split("/")[-1].split(".")[-1] in self._simple_suffixes:
result = True
self._update_stats(result)
return not result if self._reverse else result
# Domain check
if self._domain_patterns:
for pattern in self._domain_patterns:
if pattern.match(url):
result = True
self._update_stats(result)
return not result if self._reverse else result
# Prefix check (/foo/*)
if self._simple_prefixes:
path = url.split("?")[0]
# if any(path.startswith(p) for p in self._simple_prefixes):
# result = True
# self._update_stats(result)
# return not result if self._reverse else result
####
# Modified the prefix matching logic to ensure path boundary checking:
# - Check if the matched prefix is followed by a path separator (`/`), query parameter (`?`), fragment (`#`), or is at the end of the path
# - This ensures `/api/` only matches complete path segments, not substrings like `/apiv2/`
####
for prefix in self._simple_prefixes:
if path.startswith(prefix):
if len(path) == len(prefix) or path[len(prefix)] in ['/', '?', '#']:
result = True
self._update_stats(result)
return not result if self._reverse else result
# Complex patterns
if self._path_patterns:
if any(p.search(url) for p in self._path_patterns):
result = True
self._update_stats(result)
return not result if self._reverse else result
result = False
self._update_stats(result)
return not result if self._reverse else result
class ContentTypeFilter(URLFilter):
"""Optimized content type filter using fast lookups"""
__slots__ = ("allowed_types", "_ext_map", "_check_extension")
# Fast extension to mime type mapping
_MIME_MAP = {
# Text Formats
"txt": "text/plain",
"html": "text/html",
"htm": "text/html",
"xhtml": "application/xhtml+xml",
"css": "text/css",
"csv": "text/csv",
"ics": "text/calendar",
"js": "application/javascript",
# Images
"bmp": "image/bmp",
"gif": "image/gif",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"png": "image/png",
"svg": "image/svg+xml",
"tiff": "image/tiff",
"ico": "image/x-icon",
"webp": "image/webp",
# Audio
"mp3": "audio/mpeg",
"wav": "audio/wav",
"ogg": "audio/ogg",
"m4a": "audio/mp4",
"aac": "audio/aac",
# Video
"mp4": "video/mp4",
"mpeg": "video/mpeg",
"webm": "video/webm",
"avi": "video/x-msvideo",
"mov": "video/quicktime",
"flv": "video/x-flv",
"wmv": "video/x-ms-wmv",
"mkv": "video/x-matroska",
# Applications
"json": "application/json",
"xml": "application/xml",
"pdf": "application/pdf",
"zip": "application/zip",
"gz": "application/gzip",
"tar": "application/x-tar",
"rar": "application/vnd.rar",
"7z": "application/x-7z-compressed",
"exe": "application/vnd.microsoft.portable-executable",
"msi": "application/x-msdownload",
# Fonts
"woff": "font/woff",
"woff2": "font/woff2",
"ttf": "font/ttf",
"otf": "font/otf",
# Microsoft Office
"doc": "application/msword",
"dot": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xls": "application/vnd.ms-excel",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
# OpenDocument Formats
"odt": "application/vnd.oasis.opendocument.text",
"ods": "application/vnd.oasis.opendocument.spreadsheet",
"odp": "application/vnd.oasis.opendocument.presentation",
# Archives
"tar.gz": "application/gzip",
"tgz": "application/gzip",
"bz2": "application/x-bzip2",
# Others
"rtf": "application/rtf",
"apk": "application/vnd.android.package-archive",
"epub": "application/epub+zip",
"jar": "application/java-archive",
"swf": "application/x-shockwave-flash",
"midi": "audio/midi",
"mid": "audio/midi",
"ps": "application/postscript",
"ai": "application/postscript",
"eps": "application/postscript",
# Custom or less common
"bin": "application/octet-stream",
"dmg": "application/x-apple-diskimage",
"iso": "application/x-iso9660-image",
"deb": "application/x-debian-package",
"rpm": "application/x-rpm",
"sqlite": "application/vnd.sqlite3",
# Placeholder
"unknown": "application/octet-stream", # Fallback for unknown file types
# php
"php": "application/x-httpd-php",
"php3": "application/x-httpd-php",
"php4": "application/x-httpd-php",
"php5": "application/x-httpd-php",
"php7": "application/x-httpd-php",
"phtml": "application/x-httpd-php",
"phps": "application/x-httpd-php-source",
}
@staticmethod
@lru_cache(maxsize=1000)
def _extract_extension(url: str) -> str:
"""Extracts file extension from a URL."""
# Remove scheme (http://, https://) if present
if "://" in url:
url = url.split("://", 1)[-1] # Get everything after '://'
# Remove domain (everything up to the first '/')
path_start = url.find("/")
path = url[path_start:] if path_start != -1 else ""
# Extract last filename in path
filename = path.rsplit("/", 1)[-1] if "/" in path else ""
# Extract and validate extension
if "." not in filename:
return ""
return filename.rpartition(".")[-1].lower()
def __init__(
self,
allowed_types: Union[str, List[str]],
check_extension: bool = True,
ext_map: Dict[str, str] = _MIME_MAP,
):
super().__init__()
# Normalize and store as frozenset for fast lookup
self.allowed_types = frozenset(
t.lower()
for t in (
allowed_types if isinstance(allowed_types, list) else [allowed_types]
)
)
self._check_extension = check_extension
# Pre-compute extension map for allowed types
self._ext_map = frozenset(
ext
for ext, mime in self._MIME_MAP.items()
if any(allowed in mime for allowed in self.allowed_types)
)
@lru_cache(maxsize=1000)
def _check_url_cached(self, url: str) -> bool:
"""Cached URL checking"""
if not self._check_extension:
return True
ext = self._extract_extension(url)
if not ext:
return True
return ext in self._ext_map
def apply(self, url: str) -> bool:
"""Fast extension check with caching"""
result = self._check_url_cached(url)
self._update_stats(result)
return result
class DomainFilter(URLFilter):
"""Optimized domain filter with fast lookups and caching"""
__slots__ = ("_allowed_domains", "_blocked_domains", "_domain_cache")
# Regex for fast domain extraction
_DOMAIN_REGEX = re.compile(r"://([^/]+)")
def __init__(
self,
allowed_domains: Union[str, List[str]] = None,
blocked_domains: Union[str, List[str]] = None,
):
super().__init__()
# Convert inputs to frozensets for immutable, fast lookups
self._allowed_domains = (
frozenset(self._normalize_domains(allowed_domains))
if allowed_domains
else None
)
self._blocked_domains = (
frozenset(self._normalize_domains(blocked_domains))
if blocked_domains
else frozenset()
)
@staticmethod
def _normalize_domains(domains: Union[str, List[str]]) -> Set[str]:
"""Fast domain normalization"""
if isinstance(domains, str):
return {domains.lower()}
return {d.lower() for d in domains}
@staticmethod
def _is_subdomain(domain: str, parent_domain: str) -> bool:
"""Check if domain is a subdomain of parent_domain"""
return domain == parent_domain or domain.endswith(f".{parent_domain}")
@staticmethod
@lru_cache(maxsize=10000)
def _extract_domain(url: str) -> str:
"""Ultra-fast domain extraction with regex and caching"""
match = DomainFilter._DOMAIN_REGEX.search(url)
return match.group(1).lower() if match else ""
def apply(self, url: str) -> bool:
"""Optimized domain checking with early returns"""
# Skip processing if no filters
if not self._blocked_domains and self._allowed_domains is None:
self._update_stats(True)
return True
domain = self._extract_domain(url)
# Check for blocked domains, including subdomains
for blocked in self._blocked_domains:
if self._is_subdomain(domain, blocked):
self._update_stats(False)
return False
# If no allowed domains specified, accept all non-blocked
if self._allowed_domains is None:
self._update_stats(True)
return True
# Check if domain matches any allowed domain (including subdomains)
for allowed in self._allowed_domains:
if self._is_subdomain(domain, allowed):
self._update_stats(True)
return True
# No matches found
self._update_stats(False)
return False
class ContentRelevanceFilter(URLFilter):
"""BM25-based relevance filter using head section content"""
__slots__ = ("query_terms", "threshold", "k1", "b", "avgdl")
def __init__(
self,
query: str,
threshold: float,
k1: float = 1.2,
b: float = 0.75,
avgdl: int = 1000,
):
super().__init__(name="BM25RelevanceFilter")
self.query_terms = self._tokenize(query)
self.threshold = threshold
self.k1 = k1 # TF saturation parameter
self.b = b # Length normalization parameter
self.avgdl = avgdl # Average document length (empirical value)
async def apply(self, url: str) -> bool:
head_content = await HeadPeekr.peek_html(url)
if not head_content:
self._update_stats(False)
return False
# Field extraction with weighting
fields = {
"title": HeadPeekr.get_title(head_content) or "",
"meta": HeadPeekr.extract_meta_tags(head_content),
}
doc_text = self._build_document(fields)
score = self._bm25(doc_text)
decision = score >= self.threshold
self._update_stats(decision)
return decision
def _build_document(self, fields: Dict) -> str:
"""Weighted document construction"""
return " ".join(
[
fields["title"] * 3, # Title weight
fields["meta"].get("description", "") * 2,
fields["meta"].get("keywords", ""),
" ".join(fields["meta"].values()),
]
)
def _tokenize(self, text: str) -> List[str]:
"""Fast case-insensitive tokenization"""
return text.lower().split()
def _bm25(self, document: str) -> float:
"""Optimized BM25 implementation for head sections"""
doc_terms = self._tokenize(document)
doc_len = len(doc_terms)
tf = defaultdict(int)
for term in doc_terms:
tf[term] += 1
score = 0.0
for term in set(self.query_terms):
term_freq = tf[term]
idf = math.log((1 + 1) / (term_freq + 0.5) + 1) # Simplified IDF
numerator = term_freq * (self.k1 + 1)
denominator = term_freq + self.k1 * (
1 - self.b + self.b * (doc_len / self.avgdl)
)
score += idf * (numerator / denominator)
return score
class SEOFilter(URLFilter):
"""Quantitative SEO quality assessment filter using head section analysis"""
__slots__ = ("threshold", "_weights", "_kw_patterns")
# Based on SEMrush/Google ranking factors research
DEFAULT_WEIGHTS = {
"title_length": 0.15,
"title_kw": 0.18,
"meta_description": 0.12,
"canonical": 0.10,
"robot_ok": 0.20, # Most critical factor
"schema_org": 0.10,
"url_quality": 0.15,
}
def __init__(
self,
threshold: float = 0.65,
keywords: List[str] = None,
weights: Dict[str, float] = None,
):
super().__init__(name="SEOFilter")
self.threshold = threshold
self._weights = weights or self.DEFAULT_WEIGHTS
self._kw_patterns = (
re.compile(
r"\b({})\b".format("|".join(map(re.escape, keywords or []))), re.I
)
if keywords
else None
)
async def apply(self, url: str) -> bool:
head_content = await HeadPeekr.peek_html(url)
if not head_content:
self._update_stats(False)
return False
meta = HeadPeekr.extract_meta_tags(head_content)
title = HeadPeekr.get_title(head_content) or ""
parsed_url = urlparse(url)
scores = {
"title_length": self._score_title_length(title),
"title_kw": self._score_keyword_presence(title),
"meta_description": self._score_meta_description(
meta.get("description", "")
),
"canonical": self._score_canonical(meta.get("canonical"), url),
"robot_ok": 1.0 if "noindex" not in meta.get("robots", "") else 0.0,
"schema_org": self._score_schema_org(head_content),
"url_quality": self._score_url_quality(parsed_url),
}
total_score = sum(
weight * scores[factor] for factor, weight in self._weights.items()
)
decision = total_score >= self.threshold
self._update_stats(decision)
return decision
def _score_title_length(self, title: str) -> float:
length = len(title)
if 50 <= length <= 60:
return 1.0
if 40 <= length < 50 or 60 < length <= 70:
return 0.7
return 0.3 # Poor length
def _score_keyword_presence(self, text: str) -> float:
if not self._kw_patterns:
return 0.0
matches = len(self._kw_patterns.findall(text))
return min(matches * 0.3, 1.0) # Max 3 matches
def _score_meta_description(self, desc: str) -> float:
length = len(desc)
if 140 <= length <= 160:
return 1.0
return 0.5 if 120 <= length <= 200 else 0.2
def _score_canonical(self, canonical: str, original: str) -> float:
if not canonical:
return 0.5 # Neutral score
return 1.0 if canonical == original else 0.2
def _score_schema_org(self, html: str) -> float:
# Detect any schema.org markup in head
return (
1.0
if re.search(r'<script[^>]+type=["\']application/ld\+json', html)
else 0.0
)
def _score_url_quality(self, parsed_url) -> float:
score = 1.0
path = parsed_url.path.lower()
# Penalty factors
if len(path) > 80:
score *= 0.7
if re.search(r"\d{4}", path):
score *= 0.8 # Numbers in path
if parsed_url.query:
score *= 0.6 # URL parameters
if "_" in path:
score *= 0.9 # Underscores vs hyphens
return score

View File

@@ -1,519 +0,0 @@
from abc import ABC, abstractmethod
from typing import List, Dict, Optional
from dataclasses import dataclass
from urllib.parse import urlparse, unquote
import re
import logging
from functools import lru_cache
from array import array
import ctypes
import platform
PLATFORM = platform.system()
# Pre-computed scores for common year differences
_SCORE_LOOKUP = [1.0, 0.5, 0.3333333333333333, 0.25]
# Pre-computed scores for common year differences
_FRESHNESS_SCORES = [
1.0, # Current year
0.9, # Last year
0.8, # 2 years ago
0.7, # 3 years ago
0.6, # 4 years ago
0.5, # 5 years ago
]
class ScoringStats:
__slots__ = ('_urls_scored', '_total_score', '_min_score', '_max_score')
def __init__(self):
self._urls_scored = 0
self._total_score = 0.0
self._min_score = None # Lazy initialization
self._max_score = None
def update(self, score: float) -> None:
"""Optimized update with minimal operations"""
self._urls_scored += 1
self._total_score += score
# Lazy min/max tracking - only if actually accessed
if self._min_score is not None:
if score < self._min_score:
self._min_score = score
if self._max_score is not None:
if score > self._max_score:
self._max_score = score
def get_average(self) -> float:
"""Direct calculation instead of property"""
return self._total_score / self._urls_scored if self._urls_scored else 0.0
def get_min(self) -> float:
"""Lazy min calculation"""
if self._min_score is None:
self._min_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
return self._min_score
def get_max(self) -> float:
"""Lazy max calculation"""
if self._max_score is None:
self._max_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
return self._max_score
class URLScorer(ABC):
__slots__ = ('_weight', '_stats')
def __init__(self, weight: float = 1.0):
# Store weight directly as float32 for memory efficiency
self._weight = ctypes.c_float(weight).value
self._stats = ScoringStats()
@abstractmethod
def _calculate_score(self, url: str) -> float:
"""Calculate raw score for URL."""
pass
def score(self, url: str) -> float:
"""Calculate weighted score with minimal overhead."""
score = self._calculate_score(url) * self._weight
self._stats.update(score)
return score
@property
def stats(self):
"""Access to scoring statistics."""
return self._stats
@property
def weight(self):
return self._weight
class CompositeScorer(URLScorer):
__slots__ = ('_scorers', '_normalize', '_weights_array', '_score_array')
def __init__(self, scorers: List[URLScorer], normalize: bool = True):
"""Initialize composite scorer combining multiple scoring strategies.
Optimized for:
- Fast parallel scoring
- Memory efficient score aggregation
- Quick short-circuit conditions
- Pre-allocated arrays
Args:
scorers: List of scoring strategies to combine
normalize: Whether to normalize final score by scorer count
"""
super().__init__(weight=1.0)
self._scorers = scorers
self._normalize = normalize
# Pre-allocate arrays for scores and weights
self._weights_array = array('f', [s.weight for s in scorers])
self._score_array = array('f', [0.0] * len(scorers))
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate combined score from all scoring strategies.
Uses:
1. Pre-allocated arrays for scores
2. Short-circuit on zero scores
3. Optimized normalization
4. Vectorized operations where possible
Args:
url: URL to score
Returns:
Combined and optionally normalized score
"""
total_score = 0.0
scores = self._score_array
# Get scores from all scorers
for i, scorer in enumerate(self._scorers):
# Use public score() method which applies weight
scores[i] = scorer.score(url)
total_score += scores[i]
# Normalize if requested
if self._normalize and self._scorers:
count = len(self._scorers)
return total_score / count
return total_score
def score(self, url: str) -> float:
"""Public scoring interface with stats tracking.
Args:
url: URL to score
Returns:
Final combined score
"""
score = self._calculate_score(url)
self.stats.update(score)
return score
class KeywordRelevanceScorer(URLScorer):
__slots__ = ('_weight', '_stats', '_keywords', '_case_sensitive')
def __init__(self, keywords: List[str], weight: float = 1.0, case_sensitive: bool = False):
super().__init__(weight=weight)
self._case_sensitive = case_sensitive
# Pre-process keywords once
self._keywords = [k if case_sensitive else k.lower() for k in keywords]
@lru_cache(maxsize=10000)
def _url_bytes(self, url: str) -> bytes:
"""Cache decoded URL bytes"""
return url.encode('utf-8') if self._case_sensitive else url.lower().encode('utf-8')
def _calculate_score(self, url: str) -> float:
"""Fast string matching without regex or byte conversion"""
if not self._case_sensitive:
url = url.lower()
matches = sum(1 for k in self._keywords if k in url)
# Fast return paths
if not matches:
return 0.0
if matches == len(self._keywords):
return 1.0
return matches / len(self._keywords)
class PathDepthScorer(URLScorer):
__slots__ = ('_weight', '_stats', '_optimal_depth') # Remove _url_cache
def __init__(self, optimal_depth: int = 3, weight: float = 1.0):
super().__init__(weight=weight)
self._optimal_depth = optimal_depth
@staticmethod
@lru_cache(maxsize=10000)
def _quick_depth(path: str) -> int:
"""Ultra fast path depth calculation.
Examples:
- "http://example.com" -> 0 # No path segments
- "http://example.com/" -> 0 # Empty path
- "http://example.com/a" -> 1
- "http://example.com/a/b" -> 2
"""
if not path or path == '/':
return 0
if '/' not in path:
return 0
depth = 0
last_was_slash = True
for c in path:
if c == '/':
if not last_was_slash:
depth += 1
last_was_slash = True
else:
last_was_slash = False
if not last_was_slash:
depth += 1
return depth
@lru_cache(maxsize=10000) # Cache the whole calculation
def _calculate_score(self, url: str) -> float:
pos = url.find('/', url.find('://') + 3)
if pos == -1:
depth = 0
else:
depth = self._quick_depth(url[pos:])
# Use lookup table for common distances
distance = depth - self._optimal_depth
distance = distance if distance >= 0 else -distance # Faster than abs()
if distance < 4:
return _SCORE_LOOKUP[distance]
return 1.0 / (1.0 + distance)
class ContentTypeScorer(URLScorer):
__slots__ = ('_weight', '_exact_types', '_regex_types')
def __init__(self, type_weights: Dict[str, float], weight: float = 1.0):
"""Initialize scorer with type weights map.
Args:
type_weights: Dict mapping file extensions/patterns to scores (e.g. {'.html$': 1.0})
weight: Overall weight multiplier for this scorer
"""
super().__init__(weight=weight)
self._exact_types = {} # Fast lookup for simple extensions
self._regex_types = [] # Fallback for complex patterns
# Split into exact vs regex matchers for performance
for pattern, score in type_weights.items():
if pattern.startswith('.') and pattern.endswith('$'):
ext = pattern[1:-1]
self._exact_types[ext] = score
else:
self._regex_types.append((re.compile(pattern), score))
# Sort complex patterns by score for early exit
self._regex_types.sort(key=lambda x: -x[1])
@staticmethod
@lru_cache(maxsize=10000)
def _quick_extension(url: str) -> str:
"""Extract file extension ultra-fast without regex/splits.
Handles:
- Basic extensions: "example.html" -> "html"
- Query strings: "page.php?id=1" -> "php"
- Fragments: "doc.pdf#page=1" -> "pdf"
- Path params: "file.jpg;width=100" -> "jpg"
Args:
url: URL to extract extension from
Returns:
Extension without dot, or empty string if none found
"""
pos = url.rfind('.')
if pos == -1:
return ''
# Find first non-alphanumeric char after extension
end = len(url)
for i in range(pos + 1, len(url)):
c = url[i]
# Stop at query string, fragment, path param or any non-alphanumeric
if c in '?#;' or not c.isalnum():
end = i
break
return url[pos + 1:end].lower()
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate content type score for URL.
Uses staged approach:
1. Try exact extension match (fast path)
2. Fall back to regex patterns if needed
Args:
url: URL to score
Returns:
Score between 0.0 and 1.0 * weight
"""
# Fast path: direct extension lookup
ext = self._quick_extension(url)
if ext:
score = self._exact_types.get(ext, None)
if score is not None:
return score
# Slow path: regex patterns
for pattern, score in self._regex_types:
if pattern.search(url):
return score
return 0.0
class FreshnessScorer(URLScorer):
__slots__ = ('_weight', '_date_pattern', '_current_year')
def __init__(self, weight: float = 1.0, current_year: int = 2024):
"""Initialize freshness scorer.
Extracts and scores dates from URLs using format:
- YYYY/MM/DD
- YYYY-MM-DD
- YYYY_MM_DD
- YYYY (year only)
Args:
weight: Score multiplier
current_year: Year to calculate freshness against (default 2024)
"""
super().__init__(weight=weight)
self._current_year = current_year
# Combined pattern for all date formats
# Uses non-capturing groups (?:) and alternation
self._date_pattern = re.compile(
r'(?:/' # Path separator
r'|[-_])' # or date separators
r'((?:19|20)\d{2})' # Year group (1900-2099)
r'(?:' # Optional month/day group
r'(?:/|[-_])' # Date separator
r'(?:\d{2})' # Month
r'(?:' # Optional day
r'(?:/|[-_])' # Date separator
r'(?:\d{2})' # Day
r')?' # Day is optional
r')?' # Month/day group is optional
)
@lru_cache(maxsize=10000)
def _extract_year(self, url: str) -> Optional[int]:
"""Extract the most recent year from URL.
Args:
url: URL to extract year from
Returns:
Year as int or None if no valid year found
"""
matches = self._date_pattern.finditer(url)
latest_year = None
# Find most recent year
for match in matches:
year = int(match.group(1))
if (year <= self._current_year and # Sanity check
(latest_year is None or year > latest_year)):
latest_year = year
return latest_year
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate freshness score based on URL date.
More recent years score higher. Uses pre-computed scoring
table for common year differences.
Args:
url: URL to score
Returns:
Score between 0.0 and 1.0 * weight
"""
year = self._extract_year(url)
if year is None:
return 0.5 # Default score
# Use lookup table for common year differences
year_diff = self._current_year - year
if year_diff < len(_FRESHNESS_SCORES):
return _FRESHNESS_SCORES[year_diff]
# Fallback calculation for older content
return max(0.1, 1.0 - year_diff * 0.1)
class DomainAuthorityScorer(URLScorer):
__slots__ = ('_weight', '_domain_weights', '_default_weight', '_top_domains')
def __init__(
self,
domain_weights: Dict[str, float],
default_weight: float = 0.5,
weight: float = 1.0,
):
"""Initialize domain authority scorer.
Args:
domain_weights: Dict mapping domains to authority scores
default_weight: Score for unknown domains
weight: Overall scorer weight multiplier
Example:
{
'python.org': 1.0,
'github.com': 0.9,
'medium.com': 0.7
}
"""
super().__init__(weight=weight)
# Pre-process domains for faster lookup
self._domain_weights = {
domain.lower(): score
for domain, score in domain_weights.items()
}
self._default_weight = default_weight
# Cache top domains for fast path
self._top_domains = {
domain: score
for domain, score in sorted(
domain_weights.items(),
key=lambda x: -x[1]
)[:5] # Keep top 5 highest scoring domains
}
@staticmethod
@lru_cache(maxsize=10000)
def _extract_domain(url: str) -> str:
"""Extract domain from URL ultra-fast.
Handles:
- Basic domains: "example.com"
- Subdomains: "sub.example.com"
- Ports: "example.com:8080"
- IPv4: "192.168.1.1"
Args:
url: Full URL to extract domain from
Returns:
Lowercase domain without port
"""
# Find domain start
start = url.find('://')
if start == -1:
start = 0
else:
start += 3
# Find domain end
end = url.find('/', start)
if end == -1:
end = url.find('?', start)
if end == -1:
end = url.find('#', start)
if end == -1:
end = len(url)
# Extract domain and remove port
domain = url[start:end]
port_idx = domain.rfind(':')
if port_idx != -1:
domain = domain[:port_idx]
return domain.lower()
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate domain authority score.
Uses staged approach:
1. Check top domains (fastest)
2. Check full domain weights
3. Return default weight
Args:
url: URL to score
Returns:
Authority score between 0.0 and 1.0 * weight
"""
domain = self._extract_domain(url)
# Fast path: check top domains first
score = self._top_domains.get(domain)
if score is not None:
return score
# Regular path: check all domains
return self._domain_weights.get(domain, self._default_weight)

View File

@@ -1,168 +0,0 @@
from typing import List, Optional, Union, AsyncGenerator, Dict, Any
import httpx
import json
from urllib.parse import urljoin
import asyncio
from .async_configs import BrowserConfig, CrawlerRunConfig
from .models import CrawlResult
from .async_logger import AsyncLogger, LogLevel
class Crawl4aiClientError(Exception):
"""Base exception for Crawl4ai Docker client errors."""
pass
class ConnectionError(Crawl4aiClientError):
"""Raised when connection to the Docker server fails."""
pass
class RequestError(Crawl4aiClientError):
"""Raised when the server returns an error response."""
pass
class Crawl4aiDockerClient:
"""Client for interacting with Crawl4AI Docker server with token authentication."""
def __init__(
self,
base_url: str = "http://localhost:8000",
timeout: float = 30.0,
verify_ssl: bool = True,
verbose: bool = True,
log_file: Optional[str] = None
):
self.base_url = base_url.rstrip('/')
self.timeout = timeout
self.logger = AsyncLogger(log_file=log_file, log_level=LogLevel.DEBUG, verbose=verbose)
self._http_client = httpx.AsyncClient(
timeout=timeout,
verify=verify_ssl,
headers={"Content-Type": "application/json"}
)
self._token: Optional[str] = None
async def authenticate(self, email: str) -> None:
"""Authenticate with the server and store the token."""
url = urljoin(self.base_url, "/token")
try:
self.logger.info(f"Authenticating with email: {email}", tag="AUTH")
response = await self._http_client.post(url, json={"email": email})
response.raise_for_status()
data = response.json()
self._token = data["access_token"]
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
self.logger.success("Authentication successful", tag="AUTH")
except (httpx.RequestError, httpx.HTTPStatusError) as e:
error_msg = f"Authentication failed: {str(e)}"
self.logger.error(error_msg, tag="ERROR")
raise ConnectionError(error_msg)
async def _check_server(self) -> None:
"""Check if server is reachable, raising an error if not."""
try:
await self._http_client.get(urljoin(self.base_url, "/health"))
self.logger.success(f"Connected to {self.base_url}", tag="READY")
except httpx.RequestError as e:
self.logger.error(f"Server unreachable: {str(e)}", tag="ERROR")
raise ConnectionError(f"Cannot connect to server: {str(e)}")
def _prepare_request(self, urls: List[str], browser_config: Optional[BrowserConfig] = None,
crawler_config: Optional[CrawlerRunConfig] = None) -> Dict[str, Any]:
"""Prepare request data from configs."""
if self._token:
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
return {
"urls": urls,
"browser_config": browser_config.dump() if browser_config else {},
"crawler_config": crawler_config.dump() if crawler_config else {}
}
async def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
"""Make an HTTP request with error handling."""
url = urljoin(self.base_url, endpoint)
try:
response = await self._http_client.request(method, url, **kwargs)
response.raise_for_status()
return response
except httpx.TimeoutException as e:
raise ConnectionError(f"Request timed out: {str(e)}")
except httpx.RequestError as e:
raise ConnectionError(f"Failed to connect: {str(e)}")
except httpx.HTTPStatusError as e:
error_msg = (e.response.json().get("detail", str(e))
if "application/json" in e.response.headers.get("content-type", "")
else str(e))
raise RequestError(f"Server error {e.response.status_code}: {error_msg}")
async def crawl(
self,
urls: List[str],
browser_config: Optional[BrowserConfig] = None,
crawler_config: Optional[CrawlerRunConfig] = None
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
"""Execute a crawl operation."""
await self._check_server()
data = self._prepare_request(urls, browser_config, crawler_config)
is_streaming = crawler_config and crawler_config.stream
self.logger.info(f"Crawling {len(urls)} URLs {'(streaming)' if is_streaming else ''}", tag="CRAWL")
if is_streaming:
async def stream_results() -> AsyncGenerator[CrawlResult, None]:
async with self._http_client.stream("POST", f"{self.base_url}/crawl/stream", json=data) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if line.strip():
result = json.loads(line)
if "error" in result:
self.logger.error_status(url=result.get("url", "unknown"), error=result["error"])
continue
self.logger.url_status(url=result.get("url", "unknown"), success=True, timing=result.get("timing", 0.0))
if result.get("status") == "completed":
continue
else:
yield CrawlResult(**result)
return stream_results()
response = await self._request("POST", "/crawl", json=data)
result_data = response.json()
if not result_data.get("success", False):
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
results = [CrawlResult(**r) for r in result_data.get("results", [])]
self.logger.success(f"Crawl completed with {len(results)} results", tag="CRAWL")
return results[0] if len(results) == 1 else results
async def get_schema(self) -> Dict[str, Any]:
"""Retrieve configuration schemas."""
response = await self._request("GET", "/schema")
return response.json()
async def close(self) -> None:
"""Close the HTTP client session."""
self.logger.info("Closing client", tag="CLOSE")
await self._http_client.aclose()
async def __aenter__(self) -> "Crawl4aiDockerClient":
return self
async def __aexit__(self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Optional[Any]) -> None:
await self.close()
# Example usage
async def main():
async with Crawl4aiDockerClient(verbose=True) as client:
await client.authenticate("user@example.com")
result = await client.crawl(["https://example.com"])
print(result)
schema = await client.get_schema()
print(schema)
if __name__ == "__main__":
asyncio.run(main())

File diff suppressed because it is too large Load Diff

View File

@@ -54,13 +54,13 @@ class HTML2Text(html.parser.HTMLParser):
self.td_count = 0
self.table_start = False
self.unicode_snob = config.UNICODE_SNOB # covered in cli
self.escape_snob = config.ESCAPE_SNOB # covered in cli
self.escape_backslash = config.ESCAPE_BACKSLASH # covered in cli
self.escape_dot = config.ESCAPE_DOT # covered in cli
self.escape_plus = config.ESCAPE_PLUS # covered in cli
self.escape_dash = config.ESCAPE_DASH # covered in cli
self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH
self.body_width = bodywidth # covered in cli
self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli
@@ -144,8 +144,8 @@ class HTML2Text(html.parser.HTMLParser):
def update_params(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
setattr(self, key, value)
def feed(self, data: str) -> None:
data = data.replace("</' + 'script>", "</ignore>")
super().feed(data)
@@ -510,7 +510,6 @@ class HTML2Text(html.parser.HTMLParser):
if tag == "a" and not self.ignore_links:
if start:
self.inside_link = True
if (
"href" in attrs
and attrs["href"] is not None
@@ -527,7 +526,6 @@ class HTML2Text(html.parser.HTMLParser):
else:
self.astack.append(None)
else:
self.inside_link = False
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link and not self.empty_link:
@@ -612,22 +610,13 @@ class HTML2Text(html.parser.HTMLParser):
self.o("[" + str(a_props.count) + "]")
if tag == "dl" and start:
self.p() # Add paragraph break before list starts
self.p_p = 0 # Reset paragraph state
elif tag == "dt" and start:
if self.p_p == 0: # If not first term
self.o("\n\n") # Add spacing before new term-definition pair
self.p_p = 0 # Reset paragraph state
elif tag == "dt" and not start:
self.o("\n") # Single newline between term and definition
elif tag == "dd" and start:
self.o(" ") # Indent definition
elif tag == "dd" and not start:
self.p_p = 0
self.p()
if tag == "dt" and not start:
self.pbr()
if tag == "dd" and start:
self.o(" ")
if tag == "dd" and not start:
self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
@@ -914,13 +903,7 @@ class HTML2Text(html.parser.HTMLParser):
self.empty_link = False
if not self.code and not self.pre and not entity_char:
data = escape_md_section(
data,
snob=self.escape_snob,
escape_dot=self.escape_dot,
escape_plus=self.escape_plus,
escape_dash=self.escape_dash,
)
data = escape_md_section(data, snob=self.escape_snob, escape_dot=self.escape_dot, escape_plus=self.escape_plus, escape_dash=self.escape_dash)
self.preceding_data = data
self.o(data, puredata=True)
@@ -1030,141 +1013,3 @@ def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) ->
h = HTML2Text(baseurl=baseurl, bodywidth=bodywidth)
return h.handle(html)
class CustomHTML2Text(HTML2Text):
def __init__(self, *args, handle_code_in_pre=False, **kwargs):
super().__init__(*args, **kwargs)
self.inside_pre = False
self.inside_code = False
self.inside_link = False
self.preserve_tags = set() # Set of tags to preserve
self.current_preserved_tag = None
self.preserved_content = []
self.preserve_depth = 0
self.handle_code_in_pre = handle_code_in_pre
# Configuration options
self.skip_internal_links = False
self.single_line_break = False
self.mark_code = False
self.include_sup_sub = False
self.body_width = 0
self.ignore_mailto_links = True
self.ignore_links = False
self.escape_backslash = False
self.escape_dot = False
self.escape_plus = False
self.escape_dash = False
self.escape_snob = False
def update_params(self, **kwargs):
"""Update parameters and set preserved tags."""
for key, value in kwargs.items():
if key == "preserve_tags":
self.preserve_tags = set(value)
elif key == "handle_code_in_pre":
self.handle_code_in_pre = value
else:
setattr(self, key, value)
def handle_tag(self, tag, attrs, start):
# Handle preserved tags
if tag in self.preserve_tags:
if start:
if self.preserve_depth == 0:
self.current_preserved_tag = tag
self.preserved_content = []
# Format opening tag with attributes
attr_str = "".join(
f' {k}="{v}"' for k, v in attrs.items() if v is not None
)
self.preserved_content.append(f"<{tag}{attr_str}>")
self.preserve_depth += 1
return
else:
self.preserve_depth -= 1
if self.preserve_depth == 0:
self.preserved_content.append(f"</{tag}>")
# Output the preserved HTML block with proper spacing
preserved_html = "".join(self.preserved_content)
self.o("\n" + preserved_html + "\n")
self.current_preserved_tag = None
return
# If we're inside a preserved tag, collect all content
if self.preserve_depth > 0:
if start:
# Format nested tags with attributes
attr_str = "".join(
f' {k}="{v}"' for k, v in attrs.items() if v is not None
)
self.preserved_content.append(f"<{tag}{attr_str}>")
else:
self.preserved_content.append(f"</{tag}>")
return
# Handle pre tags
if tag == "pre":
if start:
self.o("```\n") # Markdown code block start
self.inside_pre = True
else:
self.o("\n```\n") # Markdown code block end
self.inside_pre = False
elif tag == "code":
if self.inside_pre and not self.handle_code_in_pre:
# Ignore code tags inside pre blocks if handle_code_in_pre is False
return
if start:
if not self.inside_link:
self.o("`") # Only output backtick if not inside a link
self.inside_code = True
else:
if not self.inside_link:
self.o("`") # Only output backtick if not inside a link
self.inside_code = False
# If inside a link, let the parent class handle the content
if self.inside_link:
super().handle_tag(tag, attrs, start)
else:
super().handle_tag(tag, attrs, start)
def handle_data(self, data, entity_char=False):
"""Override handle_data to capture content within preserved tags."""
if self.preserve_depth > 0:
self.preserved_content.append(data)
return
if self.inside_pre:
# Output the raw content for pre blocks, including content inside code tags
self.o(data) # Directly output the data as-is (preserve newlines)
return
if self.inside_code:
# Inline code: no newlines allowed
self.o(data.replace("\n", " "))
return
# Default behavior for other tags
super().handle_data(data, entity_char)
# # Handle pre tags
# if tag == 'pre':
# if start:
# self.o('```\n')
# self.inside_pre = True
# else:
# self.o('\n```')
# self.inside_pre = False
# # elif tag in ["h1", "h2", "h3", "h4", "h5", "h6"]:
# # pass
# else:
# super().handle_tag(tag, attrs, start)
# def handle_data(self, data, entity_char=False):
# """Override handle_data to capture content within preserved tags."""
# if self.preserve_depth > 0:
# self.preserved_content.append(data)
# return
# super().handle_data(data, entity_char)

View File

@@ -1,3 +1,2 @@
class OutCallback:
def __call__(self, s: str) -> None:
...
def __call__(self, s: str) -> None: ...

View File

@@ -210,7 +210,7 @@ def escape_md_section(
snob: bool = False,
escape_dot: bool = True,
escape_plus: bool = True,
escape_dash: bool = True,
escape_dash: bool = True
) -> str:
"""
Escapes markdown-sensitive characters across whole document sections.
@@ -233,7 +233,6 @@ def escape_md_section(
return text
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
"""
Given the lines of a table

View File

@@ -1,69 +0,0 @@
# crawl4ai/hub.py
from abc import ABC, abstractmethod
from typing import Dict, Type, Union
import logging
import importlib
from pathlib import Path
import inspect
logger = logging.getLogger(__name__)
class BaseCrawler(ABC):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
@abstractmethod
async def run(self, url: str = "", **kwargs) -> str:
"""
Implement this method to return JSON string.
Must accept URL + arbitrary kwargs for flexibility.
"""
pass
def __init_subclass__(cls, **kwargs):
"""Enforce interface validation on subclassing"""
super().__init_subclass__(**kwargs)
# Verify run method signature
run_method = cls.run
if not run_method.__code__.co_argcount >= 2: # self + url
raise TypeError(f"{cls.__name__} must implement 'run(self, url: str, **kwargs)'")
# Verify async nature
if not inspect.iscoroutinefunction(run_method):
raise TypeError(f"{cls.__name__}.run must be async")
class CrawlerHub:
_crawlers: Dict[str, Type[BaseCrawler]] = {}
@classmethod
def _discover_crawlers(cls):
"""Dynamically load crawlers from /crawlers in 3 lines"""
base_path = Path(__file__).parent / "crawlers"
for crawler_dir in base_path.iterdir():
if crawler_dir.is_dir():
try:
module = importlib.import_module(
f"crawl4ai.crawlers.{crawler_dir.name}.crawler"
)
for attr in dir(module):
cls._maybe_register_crawler(
getattr(module, attr), crawler_dir.name
)
except Exception as e:
logger.warning(f"Failed {crawler_dir.name}: {str(e)}")
@classmethod
def _maybe_register_crawler(cls, obj, name: str):
"""Brilliant one-liner registration"""
if isinstance(obj, type) and issubclass(obj, BaseCrawler) and obj != BaseCrawler:
module = importlib.import_module(obj.__module__)
obj.meta = getattr(module, "__meta__", {})
cls._crawlers[name] = obj
@classmethod
def get(cls, name: str) -> Union[Type[BaseCrawler], None]:
if not cls._crawlers:
cls._discover_crawlers()
return cls._crawlers.get(name)

View File

@@ -1,212 +0,0 @@
import subprocess
import sys
import asyncio
from .async_logger import AsyncLogger, LogLevel
from pathlib import Path
import os
import shutil
# Initialize logger
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
def setup_home_directory():
"""Set up the .crawl4ai folder structure in the user's home directory."""
base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY")
crawl4ai_folder = Path(base_dir) if base_dir else Path.home()
crawl4ai_config = crawl4ai_folder / "global.yml"
crawl4ai_folder = crawl4ai_folder / ".crawl4ai"
cache_folder = crawl4ai_folder / "cache"
content_folders = [
"html_content",
"cleaned_html",
"markdown_content",
"extracted_content",
"screenshots",
]
# Clean up old cache if exists
if cache_folder.exists():
shutil.rmtree(cache_folder)
# Create new folder structure
crawl4ai_folder.mkdir(exist_ok=True)
cache_folder.mkdir(exist_ok=True)
for folder in content_folders:
(crawl4ai_folder / folder).mkdir(exist_ok=True)
# If config file does not exist, create it
if not crawl4ai_config.exists():
with open(crawl4ai_config, "w") as f:
f.write("")
def post_install():
"""
Run all post-installation tasks.
Checks CRAWL4AI_MODE environment variable. If set to 'api',
skips Playwright browser installation.
"""
logger.info("Running post-installation setup...", tag="INIT")
setup_home_directory()
# Check environment variable to conditionally skip Playwright install
run_mode = os.getenv('CRAWL4AI_MODE')
if run_mode == 'api':
logger.warning(
"CRAWL4AI_MODE=api detected. Skipping Playwright browser installation.",
tag="SETUP"
)
else:
# Proceed with installation only if mode is not 'api'
install_playwright()
run_migration()
# TODO: Will be added in the future
# setup_builtin_browser()
logger.success("Post-installation setup completed!", tag="COMPLETE")
def setup_builtin_browser():
"""Set up a builtin browser for use with Crawl4AI"""
try:
logger.info("Setting up builtin browser...", tag="INIT")
asyncio.run(_setup_builtin_browser())
logger.success("Builtin browser setup completed!", tag="COMPLETE")
except Exception as e:
logger.warning(f"Failed to set up builtin browser: {e}")
logger.warning("You can manually set up a builtin browser using 'crawl4ai-doctor builtin-browser-start'")
async def _setup_builtin_browser():
try:
# Import BrowserProfiler here to avoid circular imports
from .browser_profiler import BrowserProfiler
profiler = BrowserProfiler(logger=logger)
# Launch the builtin browser
cdp_url = await profiler.launch_builtin_browser(headless=True)
if cdp_url:
logger.success(f"Builtin browser launched at {cdp_url}", tag="BROWSER")
else:
logger.warning("Failed to launch builtin browser", tag="BROWSER")
except Exception as e:
logger.warning(f"Error setting up builtin browser: {e}", tag="BROWSER")
raise
def install_playwright():
logger.info("Installing Playwright browsers...", tag="INIT")
try:
# subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chrome"])
subprocess.check_call(
[
sys.executable,
"-m",
"playwright",
"install",
"--with-deps",
"--force",
"chromium",
]
)
logger.success(
"Playwright installation completed successfully.", tag="COMPLETE"
)
except subprocess.CalledProcessError:
# logger.error(f"Error during Playwright installation: {e}", tag="ERROR")
logger.warning(
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
)
except Exception:
# logger.error(f"Unexpected error during Playwright installation: {e}", tag="ERROR")
logger.warning(
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
)
# Install Patchright browsers for undetected browser support
logger.info("Installing Patchright browsers for undetected mode...", tag="INIT")
try:
subprocess.check_call(
[
sys.executable,
"-m",
"patchright",
"install",
"--with-deps",
"--force",
"chromium",
]
)
logger.success(
"Patchright installation completed successfully.", tag="COMPLETE"
)
except subprocess.CalledProcessError:
logger.warning(
f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation."
)
except Exception:
logger.warning(
f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation."
)
def run_migration():
"""Initialize database during installation"""
try:
logger.info("Starting database initialization...", tag="INIT")
from crawl4ai.async_database import async_db_manager
asyncio.run(async_db_manager.initialize())
logger.success(
"Database initialization completed successfully.", tag="COMPLETE"
)
except ImportError:
logger.warning("Database module not found. Will initialize on first use.")
except Exception as e:
logger.warning(f"Database initialization failed: {e}")
logger.warning("Database will be initialized on first use")
async def run_doctor():
"""Test if Crawl4AI is working properly"""
logger.info("Running Crawl4AI health check...", tag="INIT")
try:
from .async_webcrawler import (
AsyncWebCrawler,
BrowserConfig,
CrawlerRunConfig,
CacheMode,
)
browser_config = BrowserConfig(
headless=True,
browser_type="chromium",
ignore_https_errors=True,
light_mode=True,
viewport_width=1280,
viewport_height=720,
)
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
screenshot=True,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
logger.info("Testing crawling capabilities...", tag="TEST")
result = await crawler.arun(url="https://crawl4ai.com", config=run_config)
if result and result.markdown:
logger.success("✅ Crawling test passed!", tag="COMPLETE")
return True
else:
raise Exception("Failed to get content")
except Exception as e:
logger.error(f"❌ Test failed: {e}", tag="ERROR")
return False
def doctor():
"""Entry point for the doctor command"""
import asyncio
asyncio.run(run_doctor())
sys.exit(0)

View File

@@ -1,18 +0,0 @@
import os
# Create a function get name of a js script, then load from the CURRENT folder of this script and return its content as string, make sure its error free
def load_js_script(script_name):
# Get the path of the current script
current_script_path = os.path.dirname(os.path.realpath(__file__))
# Get the path of the script to load
script_path = os.path.join(current_script_path, script_name + ".js")
# Check if the script exists
if not os.path.exists(script_path):
raise ValueError(
f"Script {script_name} not found in the folder {current_script_path}"
)
# Load the content of the script
with open(script_path, "r") as f:
script_content = f.read()
return script_content

View File

@@ -1,25 +0,0 @@
// Pass the Permissions Test.
const originalQuery = window.navigator.permissions.query;
window.navigator.permissions.query = (parameters) =>
parameters.name === "notifications"
? Promise.resolve({ state: Notification.permission })
: originalQuery(parameters);
Object.defineProperty(navigator, "webdriver", {
get: () => undefined,
});
window.navigator.chrome = {
runtime: {},
// Add other properties if necessary
};
Object.defineProperty(navigator, "plugins", {
get: () => [1, 2, 3, 4, 5],
});
Object.defineProperty(navigator, "languages", {
get: () => ["en-US", "en"],
});
Object.defineProperty(document, "hidden", {
get: () => false,
});
Object.defineProperty(document, "visibilityState", {
get: () => "visible",
});

View File

@@ -1,120 +0,0 @@
async () => {
// Function to check if element is visible
const isVisible = (elem) => {
const style = window.getComputedStyle(elem);
return style.display !== "none" && style.visibility !== "hidden" && style.opacity !== "0";
};
// Common selectors for popups and overlays
const commonSelectors = [
// Close buttons first
'button[class*="close" i]',
'button[class*="dismiss" i]',
'button[aria-label*="close" i]',
'button[title*="close" i]',
'a[class*="close" i]',
'span[class*="close" i]',
// Cookie notices
'[class*="cookie-banner" i]',
'[id*="cookie-banner" i]',
'[class*="cookie-consent" i]',
'[id*="cookie-consent" i]',
// Newsletter/subscription dialogs
'[class*="newsletter" i]',
'[class*="subscribe" i]',
// Generic popups/modals
'[class*="popup" i]',
'[class*="modal" i]',
'[class*="overlay" i]',
'[class*="dialog" i]',
'[role="dialog"]',
'[role="alertdialog"]',
];
// Try to click close buttons first
for (const selector of commonSelectors.slice(0, 6)) {
const closeButtons = document.querySelectorAll(selector);
for (const button of closeButtons) {
if (isVisible(button)) {
try {
button.click();
await new Promise((resolve) => setTimeout(resolve, 100));
} catch (e) {
console.log("Error clicking button:", e);
}
}
}
}
// Remove remaining overlay elements
const removeOverlays = () => {
// Find elements with high z-index
const allElements = document.querySelectorAll("*");
for (const elem of allElements) {
const style = window.getComputedStyle(elem);
const zIndex = parseInt(style.zIndex);
const position = style.position;
if (
isVisible(elem) &&
(zIndex > 999 || position === "fixed" || position === "absolute") &&
(elem.offsetWidth > window.innerWidth * 0.5 ||
elem.offsetHeight > window.innerHeight * 0.5 ||
style.backgroundColor.includes("rgba") ||
parseFloat(style.opacity) < 1)
) {
elem.remove();
}
}
// Remove elements matching common selectors
for (const selector of commonSelectors) {
const elements = document.querySelectorAll(selector);
elements.forEach((elem) => {
if (isVisible(elem)) {
elem.remove();
}
});
}
};
// Remove overlay elements
removeOverlays();
// Remove any fixed/sticky position elements at the top/bottom
const removeFixedElements = () => {
const elements = document.querySelectorAll("*");
elements.forEach((elem) => {
const style = window.getComputedStyle(elem);
if ((style.position === "fixed" || style.position === "sticky") && isVisible(elem)) {
elem.remove();
}
});
};
removeFixedElements();
// Remove empty block elements as: div, p, span, etc.
const removeEmptyBlockElements = () => {
const blockElements = document.querySelectorAll(
"div, p, span, section, article, header, footer, aside, nav, main, ul, ol, li, dl, dt, dd, h1, h2, h3, h4, h5, h6"
);
blockElements.forEach((elem) => {
if (elem.innerText.trim() === "") {
elem.remove();
}
});
};
// Remove margin-right and padding-right from body (often added by modal scripts)
document.body.style.marginRight = "0px";
document.body.style.paddingRight = "0px";
document.body.style.overflow = "auto";
// Wait a bit for any animations to complete
document.body.scrollIntoView(false);
await new Promise((resolve) => setTimeout(resolve, 50));
};

View File

@@ -1,54 +0,0 @@
() => {
return new Promise((resolve) => {
const filterImage = (img) => {
// Filter out images that are too small
if (img.width < 100 && img.height < 100) return false;
// Filter out images that are not visible
const rect = img.getBoundingClientRect();
if (rect.width === 0 || rect.height === 0) return false;
// Filter out images with certain class names (e.g., icons, thumbnails)
if (img.classList.contains("icon") || img.classList.contains("thumbnail")) return false;
// Filter out images with certain patterns in their src (e.g., placeholder images)
if (img.src.includes("placeholder") || img.src.includes("icon")) return false;
return true;
};
const images = Array.from(document.querySelectorAll("img")).filter(filterImage);
let imagesLeft = images.length;
if (imagesLeft === 0) {
resolve();
return;
}
const checkImage = (img) => {
if (img.complete && img.naturalWidth !== 0) {
img.setAttribute("width", img.naturalWidth);
img.setAttribute("height", img.naturalHeight);
imagesLeft--;
if (imagesLeft === 0) resolve();
}
};
images.forEach((img) => {
checkImage(img);
if (!img.complete) {
img.onload = () => {
checkImage(img);
};
img.onerror = () => {
imagesLeft--;
if (imagesLeft === 0) resolve();
};
}
});
// Fallback timeout of 5 seconds
// setTimeout(() => resolve(), 5000);
resolve();
});
};

View File

@@ -1,123 +0,0 @@
import click
import sys
import asyncio
from typing import List
from .docs_manager import DocsManager
from .async_logger import AsyncLogger
logger = AsyncLogger(verbose=True)
docs_manager = DocsManager(logger)
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
"""Print formatted table with headers and rows"""
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+"
def format_row(row):
return (
"|"
+ "|".join(
f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
for cell, w in zip(row, widths)
)
+ "|"
)
click.echo(border)
click.echo(format_row(headers))
click.echo(border)
for row in rows:
click.echo(format_row(row))
click.echo(border)
@click.group()
def cli():
"""Crawl4AI Command Line Interface"""
pass
@cli.group()
def docs():
"""Documentation operations"""
pass
@docs.command()
@click.argument("sections", nargs=-1)
@click.option(
"--mode", type=click.Choice(["extended", "condensed"]), default="extended"
)
def combine(sections: tuple, mode: str):
"""Combine documentation sections"""
try:
asyncio.run(docs_manager.ensure_docs_exist())
click.echo(docs_manager.generate(sections, mode))
except Exception as e:
logger.error(str(e), tag="ERROR")
sys.exit(1)
@docs.command()
@click.argument("query")
@click.option("--top-k", "-k", default=5)
@click.option("--build-index", is_flag=True, help="Build index if missing")
def search(query: str, top_k: int, build_index: bool):
"""Search documentation"""
try:
result = docs_manager.search(query, top_k)
if result == "No search index available. Call build_search_index() first.":
if build_index or click.confirm("No search index found. Build it now?"):
asyncio.run(docs_manager.llm_text.generate_index_files())
result = docs_manager.search(query, top_k)
click.echo(result)
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
@docs.command()
def update():
"""Update docs from GitHub"""
try:
asyncio.run(docs_manager.fetch_docs())
click.echo("Documentation updated successfully")
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
@docs.command()
@click.option("--force-facts", is_flag=True, help="Force regenerate fact files")
@click.option("--clear-cache", is_flag=True, help="Clear BM25 cache")
def index(force_facts: bool, clear_cache: bool):
"""Build or rebuild search indexes"""
try:
asyncio.run(docs_manager.ensure_docs_exist())
asyncio.run(
docs_manager.llm_text.generate_index_files(
force_generate_facts=force_facts, clear_bm25_cache=clear_cache
)
)
click.echo("Search indexes built successfully")
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
# Add docs list command
@docs.command()
def list():
"""List available documentation sections"""
try:
sections = docs_manager.list()
print_table(["Sections"], [[section] for section in sections])
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
if __name__ == "__main__":
cli()

View File

@@ -1,75 +0,0 @@
import requests
import shutil
from pathlib import Path
from crawl4ai.async_logger import AsyncLogger
from crawl4ai.llmtxt import AsyncLLMTextManager
class DocsManager:
def __init__(self, logger=None):
self.docs_dir = Path.home() / ".crawl4ai" / "docs"
self.local_docs = Path(__file__).parent.parent / "docs" / "llm.txt"
self.docs_dir.mkdir(parents=True, exist_ok=True)
self.logger = logger or AsyncLogger(verbose=True)
self.llm_text = AsyncLLMTextManager(self.docs_dir, self.logger)
async def ensure_docs_exist(self):
"""Fetch docs if not present"""
if not any(self.docs_dir.iterdir()):
await self.fetch_docs()
async def fetch_docs(self) -> bool:
"""Copy from local docs or download from GitHub"""
try:
# Try local first
if self.local_docs.exists() and (
any(self.local_docs.glob("*.md"))
or any(self.local_docs.glob("*.tokens"))
):
# Empty the local docs directory
for file_path in self.docs_dir.glob("*.md"):
file_path.unlink()
# for file_path in self.docs_dir.glob("*.tokens"):
# file_path.unlink()
for file_path in self.local_docs.glob("*.md"):
shutil.copy2(file_path, self.docs_dir / file_path.name)
# for file_path in self.local_docs.glob("*.tokens"):
# shutil.copy2(file_path, self.docs_dir / file_path.name)
return True
# Fallback to GitHub
response = requests.get(
"https://api.github.com/repos/unclecode/crawl4ai/contents/docs/llm.txt",
headers={"Accept": "application/vnd.github.v3+json"},
)
response.raise_for_status()
for item in response.json():
if item["type"] == "file" and item["name"].endswith(".md"):
content = requests.get(item["download_url"]).text
with open(self.docs_dir / item["name"], "w", encoding="utf-8") as f:
f.write(content)
return True
except Exception as e:
self.logger.error(f"Failed to fetch docs: {str(e)}")
raise
def list(self) -> list[str]:
"""List available topics"""
names = [file_path.stem for file_path in self.docs_dir.glob("*.md")]
# Remove [0-9]+_ prefix
names = [name.split("_", 1)[1] if name[0].isdigit() else name for name in names]
# Exclude those end with .xs.md and .q.md
names = [
name
for name in names
if not name.endswith(".xs") and not name.endswith(".q")
]
return names
def generate(self, sections, mode="extended"):
return self.llm_text.generate(sections, mode)
def search(self, query: str, top_k: int = 5):
return self.llm_text.search(query, top_k)

View File

@@ -1,546 +0,0 @@
import os
from pathlib import Path
import re
from typing import Dict, List, Tuple, Optional, Any
import json
from tqdm import tqdm
import time
import psutil
import numpy as np
from rank_bm25 import BM25Okapi
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from litellm import batch_completion
from .async_logger import AsyncLogger
import litellm
import pickle
import hashlib # <--- ADDED for file-hash
import glob
litellm.set_verbose = False
def _compute_file_hash(file_path: Path) -> str:
"""Compute MD5 hash for the file's entire content."""
hash_md5 = hashlib.md5()
with file_path.open("rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class AsyncLLMTextManager:
def __init__(
self,
docs_dir: Path,
logger: Optional[AsyncLogger] = None,
max_concurrent_calls: int = 5,
batch_size: int = 3,
) -> None:
self.docs_dir = docs_dir
self.logger = logger
self.max_concurrent_calls = max_concurrent_calls
self.batch_size = batch_size
self.bm25_index = None
self.document_map: Dict[str, Any] = {}
self.tokenized_facts: List[str] = []
self.bm25_index_file = self.docs_dir / "bm25_index.pkl"
async def _process_document_batch(self, doc_batch: List[Path]) -> None:
"""Process a batch of documents in parallel"""
contents = []
for file_path in doc_batch:
try:
with open(file_path, "r", encoding="utf-8") as f:
contents.append(f.read())
except Exception as e:
self.logger.error(f"Error reading {file_path}: {str(e)}")
contents.append("") # Add empty content to maintain batch alignment
prompt = """Given a documentation file, generate a list of atomic facts where each fact:
1. Represents a single piece of knowledge
2. Contains variations in terminology for the same concept
3. References relevant code patterns if they exist
4. Is written in a way that would match natural language queries
Each fact should follow this format:
<main_concept>: <fact_statement> | <related_terms> | <code_reference>
Example Facts:
browser_config: Configure headless mode and browser type for AsyncWebCrawler | headless, browser_type, chromium, firefox | BrowserConfig(browser_type="chromium", headless=True)
redis_connection: Redis client connection requires host and port configuration | redis setup, redis client, connection params | Redis(host='localhost', port=6379, db=0)
pandas_filtering: Filter DataFrame rows using boolean conditions | dataframe filter, query, boolean indexing | df[df['column'] > 5]
Wrap your response in <index>...</index> tags.
"""
# Prepare messages for batch processing
messages_list = [
[
{
"role": "user",
"content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}",
}
]
for content in contents
if content
]
try:
responses = batch_completion(
model="anthropic/claude-3-5-sonnet-latest",
messages=messages_list,
logger_fn=None,
)
# Process responses and save index files
for response, file_path in zip(responses, doc_batch):
try:
index_content_match = re.search(
r"<index>(.*?)</index>",
response.choices[0].message.content,
re.DOTALL,
)
if not index_content_match:
self.logger.warning(
f"No <index>...</index> content found for {file_path}"
)
continue
index_content = re.sub(
r"\n\s*\n", "\n", index_content_match.group(1)
).strip()
if index_content:
index_file = file_path.with_suffix(".q.md")
with open(index_file, "w", encoding="utf-8") as f:
f.write(index_content)
self.logger.info(f"Created index file: {index_file}")
else:
self.logger.warning(
f"No index content found in response for {file_path}"
)
except Exception as e:
self.logger.error(
f"Error processing response for {file_path}: {str(e)}"
)
except Exception as e:
self.logger.error(f"Error in batch completion: {str(e)}")
def _validate_fact_line(self, line: str) -> Tuple[bool, Optional[str]]:
if "|" not in line:
return False, "Missing separator '|'"
parts = [p.strip() for p in line.split("|")]
if len(parts) != 3:
return False, f"Expected 3 parts, got {len(parts)}"
concept_part = parts[0]
if ":" not in concept_part:
return False, "Missing ':' in concept definition"
return True, None
def _load_or_create_token_cache(self, fact_file: Path) -> Dict:
"""
Load token cache from .q.tokens if present and matching file hash.
Otherwise return a new structure with updated file-hash.
"""
cache_file = fact_file.with_suffix(".q.tokens")
current_hash = _compute_file_hash(fact_file)
if cache_file.exists():
try:
with open(cache_file, "r") as f:
cache = json.load(f)
# If the hash matches, return it directly
if cache.get("content_hash") == current_hash:
return cache
# Otherwise, we signal that it's changed
self.logger.info(f"Hash changed for {fact_file}, reindex needed.")
except json.JSONDecodeError:
self.logger.warning(f"Corrupt token cache for {fact_file}, rebuilding.")
except Exception as e:
self.logger.warning(f"Error reading cache for {fact_file}: {str(e)}")
# Return a fresh cache
return {"facts": {}, "content_hash": current_hash}
def _save_token_cache(self, fact_file: Path, cache: Dict) -> None:
cache_file = fact_file.with_suffix(".q.tokens")
# Always ensure we're saving the correct file-hash
cache["content_hash"] = _compute_file_hash(fact_file)
with open(cache_file, "w") as f:
json.dump(cache, f)
def preprocess_text(self, text: str) -> List[str]:
parts = [x.strip() for x in text.split("|")] if "|" in text else [text]
# Remove : after the first word of parts[0]
parts[0] = re.sub(r"^(.*?):", r"\1", parts[0])
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words("english")) - {
"how",
"what",
"when",
"where",
"why",
"which",
}
tokens = []
for part in parts:
if "(" in part and ")" in part:
code_tokens = re.findall(
r'[\w_]+(?=\()|[\w_]+(?==[\'"]{1}[\w_]+[\'"]{1})', part
)
tokens.extend(code_tokens)
words = word_tokenize(part.lower())
tokens.extend(
[
lemmatizer.lemmatize(token)
for token in words
if token not in stop_words
]
)
return tokens
def maybe_load_bm25_index(self, clear_cache=False) -> bool:
"""
Load existing BM25 index from disk, if present and clear_cache=False.
"""
if not clear_cache and os.path.exists(self.bm25_index_file):
self.logger.info("Loading existing BM25 index from disk.")
with open(self.bm25_index_file, "rb") as f:
data = pickle.load(f)
self.tokenized_facts = data["tokenized_facts"]
self.bm25_index = data["bm25_index"]
return True
return False
def build_search_index(self, clear_cache=False) -> None:
"""
Checks for new or modified .q.md files by comparing file-hash.
If none need reindexing and clear_cache is False, loads existing index if available.
Otherwise, reindexes only changed/new files and merges or creates a new index.
"""
# If clear_cache is True, we skip partial logic: rebuild everything from scratch
if clear_cache:
self.logger.info("Clearing cache and rebuilding full search index.")
if self.bm25_index_file.exists():
self.bm25_index_file.unlink()
process = psutil.Process()
self.logger.info("Checking which .q.md files need (re)indexing...")
# Gather all .q.md files
q_files = [
self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md")
]
# We'll store known (unchanged) facts in these lists
existing_facts: List[str] = []
existing_tokens: List[List[str]] = []
# Keep track of invalid lines for logging
invalid_lines = []
needSet = [] # files that must be (re)indexed
for qf in q_files:
token_cache_file = qf.with_suffix(".q.tokens")
# If no .q.tokens or clear_cache is True → definitely reindex
if clear_cache or not token_cache_file.exists():
needSet.append(qf)
continue
# Otherwise, load the existing cache and compare hash
cache = self._load_or_create_token_cache(qf)
# If the .q.tokens was out of date (i.e. changed hash), we reindex
if len(cache["facts"]) == 0 or cache.get(
"content_hash"
) != _compute_file_hash(qf):
needSet.append(qf)
else:
# File is unchanged → retrieve cached token data
for line, cache_data in cache["facts"].items():
existing_facts.append(line)
existing_tokens.append(cache_data["tokens"])
self.document_map[line] = qf # track the doc for that fact
if not needSet and not clear_cache:
# If no file needs reindexing, try loading existing index
if self.maybe_load_bm25_index(clear_cache=False):
self.logger.info(
"No new/changed .q.md files found. Using existing BM25 index."
)
return
else:
# If there's no existing index, we must build a fresh index from the old caches
self.logger.info(
"No existing BM25 index found. Building from cached facts."
)
if existing_facts:
self.logger.info(
f"Building BM25 index with {len(existing_facts)} cached facts."
)
self.bm25_index = BM25Okapi(existing_tokens)
self.tokenized_facts = existing_facts
with open(self.bm25_index_file, "wb") as f:
pickle.dump(
{
"bm25_index": self.bm25_index,
"tokenized_facts": self.tokenized_facts,
},
f,
)
else:
self.logger.warning("No facts found at all. Index remains empty.")
return
# ----------------------------------------------------- /Users/unclecode/.crawl4ai/docs/14_proxy_security.q.q.tokens '/Users/unclecode/.crawl4ai/docs/14_proxy_security.q.md'
# If we reach here, we have new or changed .q.md files
# We'll parse them, reindex them, and then combine with existing_facts
# -----------------------------------------------------
self.logger.info(f"{len(needSet)} file(s) need reindexing. Parsing now...")
# 1) Parse the new or changed .q.md files
new_facts = []
new_tokens = []
with tqdm(total=len(needSet), desc="Indexing changed files") as file_pbar:
for file in needSet:
# We'll build up a fresh cache
fresh_cache = {"facts": {}, "content_hash": _compute_file_hash(file)}
try:
with open(file, "r", encoding="utf-8") as f_obj:
content = f_obj.read().strip()
lines = [l.strip() for l in content.split("\n") if l.strip()]
for line in lines:
is_valid, error = self._validate_fact_line(line)
if not is_valid:
invalid_lines.append((file, line, error))
continue
tokens = self.preprocess_text(line)
fresh_cache["facts"][line] = {
"tokens": tokens,
"added": time.time(),
}
new_facts.append(line)
new_tokens.append(tokens)
self.document_map[line] = file
# Save the new .q.tokens with updated hash
self._save_token_cache(file, fresh_cache)
mem_usage = process.memory_info().rss / 1024 / 1024
self.logger.debug(
f"Memory usage after {file.name}: {mem_usage:.2f}MB"
)
except Exception as e:
self.logger.error(f"Error processing {file}: {str(e)}")
file_pbar.update(1)
if invalid_lines:
self.logger.warning(f"Found {len(invalid_lines)} invalid fact lines:")
for file, line, error in invalid_lines:
self.logger.warning(f"{file}: {error} in line: {line[:50]}...")
# 2) Merge newly tokenized facts with the existing ones
all_facts = existing_facts + new_facts
all_tokens = existing_tokens + new_tokens
# 3) Build BM25 index from combined facts
self.logger.info(
f"Building BM25 index with {len(all_facts)} total facts (old + new)."
)
self.bm25_index = BM25Okapi(all_tokens)
self.tokenized_facts = all_facts
# 4) Save the updated BM25 index to disk
with open(self.bm25_index_file, "wb") as f:
pickle.dump(
{
"bm25_index": self.bm25_index,
"tokenized_facts": self.tokenized_facts,
},
f,
)
final_mem = process.memory_info().rss / 1024 / 1024
self.logger.info(f"Search index updated. Final memory usage: {final_mem:.2f}MB")
async def generate_index_files(
self, force_generate_facts: bool = False, clear_bm25_cache: bool = False
) -> None:
"""
Generate index files for all documents in parallel batches
Args:
force_generate_facts (bool): If True, regenerate indexes even if they exist
clear_bm25_cache (bool): If True, clear existing BM25 index cache
"""
self.logger.info("Starting index generation for documentation files.")
md_files = [
self.docs_dir / f
for f in os.listdir(self.docs_dir)
if f.endswith(".md") and not any(f.endswith(x) for x in [".q.md", ".xs.md"])
]
# Filter out files that already have .q files unless force=True
if not force_generate_facts:
md_files = [
f
for f in md_files
if not (self.docs_dir / f.name.replace(".md", ".q.md")).exists()
]
if not md_files:
self.logger.info("All index files exist. Use force=True to regenerate.")
else:
# Process documents in batches
for i in range(0, len(md_files), self.batch_size):
batch = md_files[i : i + self.batch_size]
self.logger.info(
f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}"
)
await self._process_document_batch(batch)
self.logger.info("Index generation complete, building/updating search index.")
self.build_search_index(clear_cache=clear_bm25_cache)
def generate(self, sections: List[str], mode: str = "extended") -> str:
# Get all markdown files
all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + glob.glob(
str(self.docs_dir / "[0-9]*.xs.md")
)
# Extract base names without extensions
base_docs = {
Path(f).name.split(".")[0]
for f in all_files
if not Path(f).name.endswith(".q.md")
}
# Filter by sections if provided
if sections:
base_docs = {
doc
for doc in base_docs
if any(section.lower() in doc.lower() for section in sections)
}
# Get file paths based on mode
files = []
for doc in sorted(
base_docs,
key=lambda x: int(x.split("_")[0]) if x.split("_")[0].isdigit() else 999999,
):
if mode == "condensed":
xs_file = self.docs_dir / f"{doc}.xs.md"
regular_file = self.docs_dir / f"{doc}.md"
files.append(str(xs_file if xs_file.exists() else regular_file))
else:
files.append(str(self.docs_dir / f"{doc}.md"))
# Read and format content
content = []
for file in files:
try:
with open(file, "r", encoding="utf-8") as f:
fname = Path(file).name
content.append(f"{'#'*20}\n# {fname}\n{'#'*20}\n\n{f.read()}")
except Exception as e:
self.logger.error(f"Error reading {file}: {str(e)}")
return "\n\n---\n\n".join(content) if content else ""
def search(self, query: str, top_k: int = 5) -> str:
if not self.bm25_index:
return "No search index available. Call build_search_index() first."
query_tokens = self.preprocess_text(query)
doc_scores = self.bm25_index.get_scores(query_tokens)
mean_score = np.mean(doc_scores)
std_score = np.std(doc_scores)
score_threshold = mean_score + (0.25 * std_score)
file_data = self._aggregate_search_scores(
doc_scores=doc_scores,
score_threshold=score_threshold,
query_tokens=query_tokens,
)
ranked_files = sorted(
file_data.items(),
key=lambda x: (
x[1]["code_match_score"] * 2.0
+ x[1]["match_count"] * 1.5
+ x[1]["total_score"]
),
reverse=True,
)[:top_k]
results = []
for file, _ in ranked_files:
main_doc = str(file).replace(".q.md", ".md")
if os.path.exists(self.docs_dir / main_doc):
with open(self.docs_dir / main_doc, "r", encoding="utf-8") as f:
only_file_name = main_doc.split("/")[-1]
content = ["#" * 20, f"# {only_file_name}", "#" * 20, "", f.read()]
results.append("\n".join(content))
return "\n\n---\n\n".join(results)
def _aggregate_search_scores(
self, doc_scores: List[float], score_threshold: float, query_tokens: List[str]
) -> Dict:
file_data = {}
for idx, score in enumerate(doc_scores):
if score <= score_threshold:
continue
fact = self.tokenized_facts[idx]
file_path = self.document_map[fact]
if file_path not in file_data:
file_data[file_path] = {
"total_score": 0,
"match_count": 0,
"code_match_score": 0,
"matched_facts": [],
}
components = fact.split("|") if "|" in fact else [fact]
code_match_score = 0
if len(components) == 3:
code_ref = components[2].strip()
code_tokens = self.preprocess_text(code_ref)
code_match_score = len(set(query_tokens) & set(code_tokens)) / len(
query_tokens
)
file_data[file_path]["total_score"] += score
file_data[file_path]["match_count"] += 1
file_data[file_path]["code_match_score"] = max(
file_data[file_path]["code_match_score"], code_match_score
)
file_data[file_path]["matched_facts"].append(fact)
return file_data
def refresh_index(self) -> None:
"""Convenience method for a full rebuild."""
self.build_search_index(clear_cache=True)

View File

@@ -1,29 +0,0 @@
# version_manager.py
from pathlib import Path
from packaging import version
from . import __version__
class VersionManager:
def __init__(self):
self.home_dir = Path.home() / ".crawl4ai"
self.version_file = self.home_dir / "version.txt"
def get_installed_version(self):
"""Get the version recorded in home directory"""
if not self.version_file.exists():
return None
try:
return version.parse(self.version_file.read_text().strip())
except:
return None
def update_version(self):
"""Update the version file to current library version"""
self.version_file.write_text(__version__.__version__)
def needs_update(self):
"""Check if database needs update based on version"""
installed = self.get_installed_version()
current = version.parse(__version__.__version__)
return installed is None or installed < current

View File

@@ -1,294 +0,0 @@
import os, time
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from pathlib import Path
from .models import UrlModel, CrawlResult
from .database import init_db, get_cached_url, cache_url
from .utils import *
from .chunking_strategy import *
from .extraction_strategy import *
from .crawler_strategy import *
from typing import List
from concurrent.futures import ThreadPoolExecutor
from ..content_scraping_strategy import LXMLWebScrapingStrategy as WebScrapingStrategy
from .config import *
import warnings
import json
warnings.filterwarnings(
"ignore",
message='Field "model_name" has conflict with protected namespace "model_".',
)
class WebCrawler:
def __init__(
self,
crawler_strategy: CrawlerStrategy = None,
always_by_pass_cache: bool = False,
verbose: bool = False,
):
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(
verbose=verbose
)
self.always_by_pass_cache = always_by_pass_cache
self.crawl4ai_folder = os.path.join(
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
)
os.makedirs(self.crawl4ai_folder, exist_ok=True)
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
init_db()
self.ready = False
def warmup(self):
print("[LOG] 🌤️ Warming up the WebCrawler")
self.run(
url="https://google.com/",
word_count_threshold=5,
extraction_strategy=NoExtractionStrategy(),
bypass_cache=False,
verbose=False,
)
self.ready = True
print("[LOG] 🌞 WebCrawler is ready to crawl")
def fetch_page(
self,
url_model: UrlModel,
provider: str = DEFAULT_PROVIDER,
api_token: str = None,
extract_blocks_flag: bool = True,
word_count_threshold=MIN_WORD_THRESHOLD,
css_selector: str = None,
screenshot: bool = False,
use_cached_html: bool = False,
extraction_strategy: ExtractionStrategy = None,
chunking_strategy: ChunkingStrategy = RegexChunking(),
**kwargs,
) -> CrawlResult:
return self.run(
url_model.url,
word_count_threshold,
extraction_strategy or NoExtractionStrategy(),
chunking_strategy,
bypass_cache=url_model.forced,
css_selector=css_selector,
screenshot=screenshot,
**kwargs,
)
pass
def fetch_pages(
self,
url_models: List[UrlModel],
provider: str = DEFAULT_PROVIDER,
api_token: str = None,
extract_blocks_flag: bool = True,
word_count_threshold=MIN_WORD_THRESHOLD,
use_cached_html: bool = False,
css_selector: str = None,
screenshot: bool = False,
extraction_strategy: ExtractionStrategy = None,
chunking_strategy: ChunkingStrategy = RegexChunking(),
**kwargs,
) -> List[CrawlResult]:
extraction_strategy = extraction_strategy or NoExtractionStrategy()
def fetch_page_wrapper(url_model, *args, **kwargs):
return self.fetch_page(url_model, *args, **kwargs)
with ThreadPoolExecutor() as executor:
results = list(
executor.map(
fetch_page_wrapper,
url_models,
[provider] * len(url_models),
[api_token] * len(url_models),
[extract_blocks_flag] * len(url_models),
[word_count_threshold] * len(url_models),
[css_selector] * len(url_models),
[screenshot] * len(url_models),
[use_cached_html] * len(url_models),
[extraction_strategy] * len(url_models),
[chunking_strategy] * len(url_models),
*[kwargs] * len(url_models),
)
)
return results
def run(
self,
url: str,
word_count_threshold=MIN_WORD_THRESHOLD,
extraction_strategy: ExtractionStrategy = None,
chunking_strategy: ChunkingStrategy = RegexChunking(),
bypass_cache: bool = False,
css_selector: str = None,
screenshot: bool = False,
user_agent: str = None,
verbose=True,
**kwargs,
) -> CrawlResult:
try:
extraction_strategy = extraction_strategy or NoExtractionStrategy()
extraction_strategy.verbose = verbose
if not isinstance(extraction_strategy, ExtractionStrategy):
raise ValueError("Unsupported extraction strategy")
if not isinstance(chunking_strategy, ChunkingStrategy):
raise ValueError("Unsupported chunking strategy")
word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD)
cached = None
screenshot_data = None
extracted_content = None
if not bypass_cache and not self.always_by_pass_cache:
cached = get_cached_url(url)
if kwargs.get("warmup", True) and not self.ready:
return None
if cached:
html = sanitize_input_encode(cached[1])
extracted_content = sanitize_input_encode(cached[4])
if screenshot:
screenshot_data = cached[9]
if not screenshot_data:
cached = None
if not cached or not html:
if user_agent:
self.crawler_strategy.update_user_agent(user_agent)
t1 = time.time()
html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs))
t2 = time.time()
if verbose:
print(
f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds"
)
if screenshot:
screenshot_data = self.crawler_strategy.take_screenshot()
crawl_result = self.process_html(
url,
html,
extracted_content,
word_count_threshold,
extraction_strategy,
chunking_strategy,
css_selector,
screenshot_data,
verbose,
bool(cached),
**kwargs,
)
crawl_result.success = bool(html)
return crawl_result
except Exception as e:
if not hasattr(e, "msg"):
e.msg = str(e)
print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}")
return CrawlResult(url=url, html="", success=False, error_message=e.msg)
def process_html(
self,
url: str,
html: str,
extracted_content: str,
word_count_threshold: int,
extraction_strategy: ExtractionStrategy,
chunking_strategy: ChunkingStrategy,
css_selector: str,
screenshot: bool,
verbose: bool,
is_cached: bool,
**kwargs,
) -> CrawlResult:
t = time.time()
# Extract content from HTML
try:
t1 = time.time()
scrapping_strategy = WebScrapingStrategy()
extra_params = {
k: v
for k, v in kwargs.items()
if k not in ["only_text", "image_description_min_word_threshold"]
}
result = scrapping_strategy.scrap(
url,
html,
word_count_threshold=word_count_threshold,
css_selector=css_selector,
only_text=kwargs.get("only_text", False),
image_description_min_word_threshold=kwargs.get(
"image_description_min_word_threshold",
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
),
**extra_params,
)
# result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False))
if verbose:
print(
f"[LOG] 🚀 Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds"
)
if result is None:
raise ValueError(f"Failed to extract content from the website: {url}")
except InvalidCSSSelectorError as e:
raise ValueError(str(e))
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
markdown = sanitize_input_encode(result.get("markdown", ""))
media = result.get("media", [])
links = result.get("links", [])
metadata = result.get("metadata", {})
if extracted_content is None:
if verbose:
print(
f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}"
)
sections = chunking_strategy.chunk(markdown)
extracted_content = extraction_strategy.run(url, sections)
extracted_content = json.dumps(
extracted_content, indent=4, default=str, ensure_ascii=False
)
if verbose:
print(
f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t:.2f} seconds."
)
screenshot = None if not screenshot else screenshot
if not is_cached:
cache_url(
url,
html,
cleaned_html,
markdown,
extracted_content,
True,
json.dumps(media),
json.dumps(links),
json.dumps(metadata),
screenshot=screenshot,
)
return CrawlResult(
url=url,
html=html,
cleaned_html=format_html(cleaned_html),
markdown=markdown,
media=media,
links=links,
metadata=metadata,
screenshot=screenshot,
extracted_content=extracted_content,
success=True,
error_message="",
)

View File

@@ -1,395 +0,0 @@
"""
Link Extractor for Crawl4AI
Extracts head content from links discovered during crawling using URLSeeder's
efficient parallel processing and caching infrastructure.
"""
import asyncio
import fnmatch
from typing import Dict, List, Optional, Any
from .async_logger import AsyncLogger
from .async_url_seeder import AsyncUrlSeeder
from .async_configs import SeedingConfig, CrawlerRunConfig
from .models import Links, Link
from .utils import calculate_total_score
class LinkPreview:
"""
Extracts head content from links using URLSeeder's parallel processing infrastructure.
This class provides intelligent link filtering and head content extraction with:
- Pattern-based inclusion/exclusion filtering
- Parallel processing with configurable concurrency
- Caching for performance
- BM25 relevance scoring
- Memory-safe processing for large link sets
"""
def __init__(self, logger: Optional[AsyncLogger] = None):
"""
Initialize the LinkPreview.
Args:
logger: Optional logger instance for recording events
"""
self.logger = logger
self.seeder: Optional[AsyncUrlSeeder] = None
self._owns_seeder = False
async def __aenter__(self):
"""Async context manager entry."""
await self.start()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit."""
await self.close()
async def start(self):
"""Initialize the URLSeeder instance."""
if not self.seeder:
self.seeder = AsyncUrlSeeder(logger=self.logger)
await self.seeder.__aenter__()
self._owns_seeder = True
async def close(self):
"""Clean up resources."""
if self.seeder and self._owns_seeder:
await self.seeder.__aexit__(None, None, None)
self.seeder = None
self._owns_seeder = False
def _log(self, level: str, message: str, tag: str = "LINK_EXTRACT", **kwargs):
"""Helper method to safely log messages."""
if self.logger:
log_method = getattr(self.logger, level, None)
if log_method:
log_method(message=message, tag=tag, params=kwargs.get('params', {}))
async def extract_link_heads(
self,
links: Links,
config: CrawlerRunConfig
) -> Links:
"""
Extract head content for filtered links and attach to Link objects.
Args:
links: Links object containing internal and external links
config: CrawlerRunConfig with link_preview_config settings
Returns:
Links object with head_data attached to filtered Link objects
"""
link_config = config.link_preview_config
# Ensure seeder is initialized
await self.start()
# Filter links based on configuration
filtered_urls = self._filter_links(links, link_config)
if not filtered_urls:
self._log("info", "No links matched filtering criteria")
return links
self._log("info", "Extracting head content for {count} filtered links",
params={"count": len(filtered_urls)})
# Extract head content using URLSeeder
head_results = await self._extract_heads_parallel(filtered_urls, link_config)
# Merge results back into Link objects
updated_links = self._merge_head_data(links, head_results, config)
self._log("info", "Completed head extraction for links, {success} successful",
params={"success": len([r for r in head_results if r.get("status") == "valid"])})
return updated_links
def _filter_links(self, links: Links, link_config: Dict[str, Any]) -> List[str]:
"""
Filter links based on configuration parameters.
Args:
links: Links object containing internal and external links
link_config: Configuration dictionary for link extraction
Returns:
List of filtered URL strings
"""
filtered_urls = []
# Include internal links if configured
if link_config.include_internal:
filtered_urls.extend([link.href for link in links.internal if link.href])
self._log("debug", "Added {count} internal links",
params={"count": len(links.internal)})
# Include external links if configured
if link_config.include_external:
filtered_urls.extend([link.href for link in links.external if link.href])
self._log("debug", "Added {count} external links",
params={"count": len(links.external)})
# Apply include patterns
include_patterns = link_config.include_patterns
if include_patterns:
filtered_urls = [
url for url in filtered_urls
if any(fnmatch.fnmatch(url, pattern) for pattern in include_patterns)
]
self._log("debug", "After include patterns: {count} links remain",
params={"count": len(filtered_urls)})
# Apply exclude patterns
exclude_patterns = link_config.exclude_patterns
if exclude_patterns:
filtered_urls = [
url for url in filtered_urls
if not any(fnmatch.fnmatch(url, pattern) for pattern in exclude_patterns)
]
self._log("debug", "After exclude patterns: {count} links remain",
params={"count": len(filtered_urls)})
# Limit number of links
max_links = link_config.max_links
if max_links > 0 and len(filtered_urls) > max_links:
filtered_urls = filtered_urls[:max_links]
self._log("debug", "Limited to {max_links} links",
params={"max_links": max_links})
# Remove duplicates while preserving order
seen = set()
unique_urls = []
for url in filtered_urls:
if url not in seen:
seen.add(url)
unique_urls.append(url)
self._log("debug", "Final filtered URLs: {count} unique links",
params={"count": len(unique_urls)})
return unique_urls
async def _extract_heads_parallel(
self,
urls: List[str],
link_config: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""
Extract head content for URLs using URLSeeder's parallel processing.
Args:
urls: List of URLs to process
link_config: Configuration dictionary for link extraction
Returns:
List of dictionaries with url, status, head_data, and optional relevance_score
"""
verbose = link_config.verbose
concurrency = link_config.concurrency
if verbose:
self._log("info", "Starting batch processing: {total} links with {concurrency} concurrent workers",
params={"total": len(urls), "concurrency": concurrency})
# Create SeedingConfig for URLSeeder
seeding_config = SeedingConfig(
extract_head=True,
concurrency=concurrency,
hits_per_sec=getattr(link_config, 'hits_per_sec', None),
query=link_config.query,
score_threshold=link_config.score_threshold,
scoring_method="bm25" if link_config.query else None,
verbose=verbose
)
# Use URLSeeder's extract_head_for_urls method with progress tracking
if verbose:
# Create a wrapper to track progress
results = await self._extract_with_progress(urls, seeding_config, link_config)
else:
results = await self.seeder.extract_head_for_urls(
urls=urls,
config=seeding_config,
concurrency=concurrency,
timeout=link_config.timeout
)
return results
async def _extract_with_progress(
self,
urls: List[str],
seeding_config: SeedingConfig,
link_config: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Extract head content with progress reporting."""
total_urls = len(urls)
concurrency = link_config.concurrency
batch_size = max(1, total_urls // 10) # Report progress every 10%
# Process URLs and track progress
completed = 0
successful = 0
failed = 0
# Create a custom progress tracking version
# We'll modify URLSeeder's method to include progress callbacks
# For now, let's use the existing method and report at the end
# In a production version, we would modify URLSeeder to accept progress callbacks
self._log("info", "Processing links in batches...")
# Use existing method
results = await self.seeder.extract_head_for_urls(
urls=urls,
config=seeding_config,
concurrency=concurrency,
timeout=link_config.timeout
)
# Count results
for result in results:
completed += 1
if result.get("status") == "valid":
successful += 1
else:
failed += 1
# Final progress report
self._log("info", "Batch processing completed: {completed}/{total} processed, {successful} successful, {failed} failed",
params={
"completed": completed,
"total": total_urls,
"successful": successful,
"failed": failed
})
return results
def _merge_head_data(
self,
original_links: Links,
head_results: List[Dict[str, Any]],
config: CrawlerRunConfig
) -> Links:
"""
Merge head extraction results back into Link objects.
Args:
original_links: Original Links object
head_results: Results from head extraction
Returns:
Links object with head_data attached to matching links
"""
# Create URL to head_data mapping
url_to_head_data = {}
for result in head_results:
url = result.get("url")
if url:
url_to_head_data[url] = {
"head_data": result.get("head_data", {}),
"status": result.get("status", "unknown"),
"error": result.get("error"),
"relevance_score": result.get("relevance_score")
}
# Update internal links
updated_internal = []
for link in original_links.internal:
if link.href in url_to_head_data:
head_info = url_to_head_data[link.href]
# Create new Link object with head data and scoring
contextual_score = head_info.get("relevance_score")
updated_link = Link(
href=link.href,
text=link.text,
title=link.title,
base_domain=link.base_domain,
head_data=head_info["head_data"],
head_extraction_status=head_info["status"],
head_extraction_error=head_info.get("error"),
intrinsic_score=getattr(link, 'intrinsic_score', None),
contextual_score=contextual_score
)
# Add relevance score to head_data for backward compatibility
if contextual_score is not None:
updated_link.head_data = updated_link.head_data or {}
updated_link.head_data["relevance_score"] = contextual_score
# Calculate total score combining intrinsic and contextual scores
updated_link.total_score = calculate_total_score(
intrinsic_score=updated_link.intrinsic_score,
contextual_score=updated_link.contextual_score,
score_links_enabled=getattr(config, 'score_links', False),
query_provided=bool(config.link_preview_config.query)
)
updated_internal.append(updated_link)
else:
# Keep original link unchanged
updated_internal.append(link)
# Update external links
updated_external = []
for link in original_links.external:
if link.href in url_to_head_data:
head_info = url_to_head_data[link.href]
# Create new Link object with head data and scoring
contextual_score = head_info.get("relevance_score")
updated_link = Link(
href=link.href,
text=link.text,
title=link.title,
base_domain=link.base_domain,
head_data=head_info["head_data"],
head_extraction_status=head_info["status"],
head_extraction_error=head_info.get("error"),
intrinsic_score=getattr(link, 'intrinsic_score', None),
contextual_score=contextual_score
)
# Add relevance score to head_data for backward compatibility
if contextual_score is not None:
updated_link.head_data = updated_link.head_data or {}
updated_link.head_data["relevance_score"] = contextual_score
# Calculate total score combining intrinsic and contextual scores
updated_link.total_score = calculate_total_score(
intrinsic_score=updated_link.intrinsic_score,
contextual_score=updated_link.contextual_score,
score_links_enabled=getattr(config, 'score_links', False),
query_provided=bool(config.link_preview_config.query)
)
updated_external.append(updated_link)
else:
# Keep original link unchanged
updated_external.append(link)
# Sort links by relevance score if available
if any(hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data
for link in updated_internal + updated_external):
def get_relevance_score(link):
if hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data:
return link.head_data['relevance_score']
return 0.0
updated_internal.sort(key=get_relevance_score, reverse=True)
updated_external.sort(key=get_relevance_score, reverse=True)
return Links(
internal=updated_internal,
external=updated_external
)

View File

@@ -1,260 +0,0 @@
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any, Tuple
from .models import MarkdownGenerationResult
from .html2text import CustomHTML2Text
# from .types import RelevantContentFilter
from .content_filter_strategy import RelevantContentFilter
import re
from urllib.parse import urljoin
# Pre-compile the regex pattern
LINK_PATTERN = re.compile(r'!?\[([^\]]+)\]\(([^)]+?)(?:\s+"([^"]*)")?\)')
def fast_urljoin(base: str, url: str) -> str:
"""Fast URL joining for common cases."""
if url.startswith(("http://", "https://", "mailto:", "//")):
return url
if url.startswith("/"):
# Handle absolute paths
if base.endswith("/"):
return base[:-1] + url
return base + url
return urljoin(base, url)
class MarkdownGenerationStrategy(ABC):
"""Abstract base class for markdown generation strategies."""
def __init__(
self,
content_filter: Optional[RelevantContentFilter] = None,
options: Optional[Dict[str, Any]] = None,
verbose: bool = False,
content_source: str = "cleaned_html",
):
self.content_filter = content_filter
self.options = options or {}
self.verbose = verbose
self.content_source = content_source
@abstractmethod
def generate_markdown(
self,
input_html: str,
base_url: str = "",
html2text_options: Optional[Dict[str, Any]] = None,
content_filter: Optional[RelevantContentFilter] = None,
citations: bool = True,
**kwargs,
) -> MarkdownGenerationResult:
"""Generate markdown from the selected input HTML."""
pass
class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
"""
Default implementation of markdown generation strategy.
How it works:
1. Generate raw markdown from cleaned HTML.
2. Convert links to citations.
3. Generate fit markdown if content filter is provided.
4. Return MarkdownGenerationResult.
Args:
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html".
Returns:
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
"""
def __init__(
self,
content_filter: Optional[RelevantContentFilter] = None,
options: Optional[Dict[str, Any]] = None,
content_source: str = "cleaned_html",
):
super().__init__(content_filter, options, verbose=False, content_source=content_source)
def convert_links_to_citations(
self, markdown: str, base_url: str = ""
) -> Tuple[str, str]:
"""
Convert links in markdown to citations.
How it works:
1. Find all links in the markdown.
2. Convert links to citations.
3. Return converted markdown and references markdown.
Note:
This function uses a regex pattern to find links in markdown.
Args:
markdown (str): Markdown text.
base_url (str): Base URL for URL joins.
Returns:
Tuple[str, str]: Converted markdown and references markdown.
"""
link_map = {}
url_cache = {} # Cache for URL joins
parts = []
last_end = 0
counter = 1
for match in LINK_PATTERN.finditer(markdown):
parts.append(markdown[last_end : match.start()])
text, url, title = match.groups()
# Use cached URL if available, otherwise compute and cache
if base_url and not url.startswith(("http://", "https://", "mailto:")):
if url not in url_cache:
url_cache[url] = fast_urljoin(base_url, url)
url = url_cache[url]
if url not in link_map:
desc = []
if title:
desc.append(title)
if text and text != title:
desc.append(text)
link_map[url] = (counter, ": " + " - ".join(desc) if desc else "")
counter += 1
num = link_map[url][0]
parts.append(
f"{text}{num}"
if not match.group(0).startswith("!")
else f"![{text}{num}⟩]"
)
last_end = match.end()
parts.append(markdown[last_end:])
converted_text = "".join(parts)
# Pre-build reference strings
references = ["\n\n## References\n\n"]
references.extend(
f"{num}{url}{desc}\n"
for url, (num, desc) in sorted(link_map.items(), key=lambda x: x[1][0])
)
return converted_text, "".join(references)
def generate_markdown(
self,
input_html: str,
base_url: str = "",
html2text_options: Optional[Dict[str, Any]] = None,
options: Optional[Dict[str, Any]] = None,
content_filter: Optional[RelevantContentFilter] = None,
citations: bool = True,
**kwargs,
) -> MarkdownGenerationResult:
"""
Generate markdown with citations from the provided input HTML.
How it works:
1. Generate raw markdown from the input HTML.
2. Convert links to citations.
3. Generate fit markdown if content filter is provided.
4. Return MarkdownGenerationResult.
Args:
input_html (str): The HTML content to process (selected based on content_source).
base_url (str): Base URL for URL joins.
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
citations (bool): Whether to generate citations.
Returns:
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
"""
try:
# Initialize HTML2Text with default options for better conversion
h = CustomHTML2Text(baseurl=base_url)
default_options = {
"body_width": 0, # Disable text wrapping
"ignore_emphasis": False,
"ignore_links": False,
"ignore_images": False,
"protect_links": False,
"single_line_break": True,
"mark_code": True,
"escape_snob": False,
}
# Update with custom options if provided
if html2text_options:
default_options.update(html2text_options)
elif options:
default_options.update(options)
elif self.options:
default_options.update(self.options)
h.update_params(**default_options)
# Ensure we have valid input
if not input_html:
input_html = ""
elif not isinstance(input_html, str):
input_html = str(input_html)
# Generate raw markdown
try:
raw_markdown = h.handle(input_html)
except Exception as e:
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
raw_markdown = raw_markdown.replace(" ```", "```")
# Convert links to citations
markdown_with_citations: str = raw_markdown
references_markdown: str = ""
if citations:
try:
(
markdown_with_citations,
references_markdown,
) = self.convert_links_to_citations(raw_markdown, base_url)
except Exception as e:
markdown_with_citations = raw_markdown
references_markdown = f"Error generating citations: {str(e)}"
# Generate fit markdown if content filter is provided
fit_markdown: Optional[str] = ""
filtered_html: Optional[str] = ""
if content_filter or self.content_filter:
try:
content_filter = content_filter or self.content_filter
filtered_html = content_filter.filter_content(input_html)
filtered_html = "\n".join(
"<div>{}</div>".format(s) for s in filtered_html
)
fit_markdown = h.handle(filtered_html)
except Exception as e:
fit_markdown = f"Error generating fit markdown: {str(e)}"
filtered_html = ""
return MarkdownGenerationResult(
raw_markdown=raw_markdown or "",
markdown_with_citations=markdown_with_citations or "",
references_markdown=references_markdown or "",
fit_markdown=fit_markdown or "",
fit_html=filtered_html or "",
)
except Exception as e:
# If anything fails, return empty strings with error message
error_msg = f"Error in markdown generation: {str(e)}"
return MarkdownGenerationResult(
raw_markdown=error_msg,
markdown_with_citations=error_msg,
references_markdown="",
fit_markdown="",
fit_html="",
)

View File

@@ -1,194 +0,0 @@
import os
import asyncio
from pathlib import Path
import aiosqlite
from typing import Optional
import xxhash
import aiofiles
import shutil
from datetime import datetime
from .async_logger import AsyncLogger, LogLevel
# Initialize logger
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger(__name__)
class DatabaseMigration:
def __init__(self, db_path: str):
self.db_path = db_path
self.content_paths = self._ensure_content_dirs(os.path.dirname(db_path))
def _ensure_content_dirs(self, base_path: str) -> dict:
dirs = {
"html": "html_content",
"cleaned": "cleaned_html",
"markdown": "markdown_content",
"extracted": "extracted_content",
"screenshots": "screenshots",
}
content_paths = {}
for key, dirname in dirs.items():
path = os.path.join(base_path, dirname)
os.makedirs(path, exist_ok=True)
content_paths[key] = path
return content_paths
def _generate_content_hash(self, content: str) -> str:
x = xxhash.xxh64()
x.update(content.encode())
content_hash = x.hexdigest()
return content_hash
# return hashlib.sha256(content.encode()).hexdigest()
async def _store_content(self, content: str, content_type: str) -> str:
if not content:
return ""
content_hash = self._generate_content_hash(content)
file_path = os.path.join(self.content_paths[content_type], content_hash)
if not os.path.exists(file_path):
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
await f.write(content)
return content_hash
async def migrate_database(self):
"""Migrate existing database to file-based storage"""
# logger.info("Starting database migration...")
logger.info("Starting database migration...", tag="INIT")
try:
async with aiosqlite.connect(self.db_path) as db:
# Get all rows
async with db.execute(
"""SELECT url, html, cleaned_html, markdown,
extracted_content, screenshot FROM crawled_data"""
) as cursor:
rows = await cursor.fetchall()
migrated_count = 0
for row in rows:
(
url,
html,
cleaned_html,
markdown,
extracted_content,
screenshot,
) = row
# Store content in files and get hashes
html_hash = await self._store_content(html, "html")
cleaned_hash = await self._store_content(cleaned_html, "cleaned")
markdown_hash = await self._store_content(markdown, "markdown")
extracted_hash = await self._store_content(
extracted_content, "extracted"
)
screenshot_hash = await self._store_content(
screenshot, "screenshots"
)
# Update database with hashes
await db.execute(
"""
UPDATE crawled_data
SET html = ?,
cleaned_html = ?,
markdown = ?,
extracted_content = ?,
screenshot = ?
WHERE url = ?
""",
(
html_hash,
cleaned_hash,
markdown_hash,
extracted_hash,
screenshot_hash,
url,
),
)
migrated_count += 1
if migrated_count % 100 == 0:
logger.info(f"Migrated {migrated_count} records...", tag="INIT")
await db.commit()
logger.success(
f"Migration completed. {migrated_count} records processed.",
tag="COMPLETE",
)
except Exception as e:
# logger.error(f"Migration failed: {e}")
logger.error(
message="Migration failed: {error}",
tag="ERROR",
params={"error": str(e)},
)
raise e
async def backup_database(db_path: str) -> str:
"""Create backup of existing database"""
if not os.path.exists(db_path):
logger.info("No existing database found. Skipping backup.", tag="INIT")
return None
# Create backup with timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = f"{db_path}.backup_{timestamp}"
try:
# Wait for any potential write operations to finish
await asyncio.sleep(1)
# Create backup
shutil.copy2(db_path, backup_path)
logger.info(f"Database backup created at: {backup_path}", tag="COMPLETE")
return backup_path
except Exception as e:
# logger.error(f"Backup failed: {e}")
logger.error(
message="Migration failed: {error}", tag="ERROR", params={"error": str(e)}
)
raise e
async def run_migration(db_path: Optional[str] = None):
"""Run database migration"""
if db_path is None:
db_path = os.path.join(Path.home(), ".crawl4ai", "crawl4ai.db")
if not os.path.exists(db_path):
logger.info("No existing database found. Skipping migration.", tag="INIT")
return
# Create backup first
backup_path = await backup_database(db_path)
if not backup_path:
return
migration = DatabaseMigration(db_path)
await migration.migrate_database()
def main():
"""CLI entry point for migration"""
import argparse
parser = argparse.ArgumentParser(
description="Migrate Crawl4AI database to file-based storage"
)
parser.add_argument("--db-path", help="Custom database path")
args = parser.parse_args()
asyncio.run(run_migration(args.db_path))
if __name__ == "__main__":
main()

View File

@@ -2,125 +2,109 @@ from functools import lru_cache
from pathlib import Path
import subprocess, os
import shutil
import tarfile
from .model_loader import *
import argparse
import urllib.request
from crawl4ai.config import MODEL_REPO_BRANCH
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
@lru_cache()
def get_available_memory(device):
import torch
if device.type == "cuda":
if device.type == 'cuda':
return torch.cuda.get_device_properties(device).total_memory
elif device.type == "mps":
return 48 * 1024**3 # Assuming 8GB for MPS, as a conservative estimate
elif device.type == 'mps':
return 48 * 1024 ** 3 # Assuming 8GB for MPS, as a conservative estimate
else:
return 0
@lru_cache()
def calculate_batch_size(device):
available_memory = get_available_memory(device)
if device.type == "cpu":
if device.type == 'cpu':
return 16
elif device.type in ["cuda", "mps"]:
elif device.type in ['cuda', 'mps']:
# Adjust these thresholds based on your model size and available memory
if available_memory >= 31 * 1024**3: # > 32GB
if available_memory >= 31 * 1024 ** 3: # > 32GB
return 256
elif available_memory >= 15 * 1024**3: # > 16GB to 32GB
elif available_memory >= 15 * 1024 ** 3: # > 16GB to 32GB
return 128
elif available_memory >= 8 * 1024**3: # 8GB to 16GB
elif available_memory >= 8 * 1024 ** 3: # 8GB to 16GB
return 64
else:
return 32
else:
return 16 # Default batch size
return 16 # Default batch size
@lru_cache()
def get_device():
import torch
if torch.cuda.is_available():
device = torch.device("cuda")
device = torch.device('cuda')
elif torch.backends.mps.is_available():
device = torch.device("mps")
device = torch.device('mps')
else:
device = torch.device("cpu")
return device
device = torch.device('cpu')
return device
def set_model_device(model):
device = get_device()
model.to(device)
model.to(device)
return model, device
@lru_cache()
def get_home_folder():
home_folder = os.path.join(
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
)
home_folder = os.path.join(Path.home(), ".crawl4ai")
os.makedirs(home_folder, exist_ok=True)
os.makedirs(f"{home_folder}/cache", exist_ok=True)
os.makedirs(f"{home_folder}/models", exist_ok=True)
return home_folder
return home_folder
@lru_cache()
def load_bert_base_uncased():
from transformers import BertTokenizer, BertModel
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", resume_download=None)
model = BertModel.from_pretrained("bert-base-uncased", resume_download=None)
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', resume_download=None)
model = BertModel.from_pretrained('bert-base-uncased', resume_download=None)
model.eval()
model, device = set_model_device(model)
return tokenizer, model
@lru_cache()
def load_HF_embedding_model(model_name="BAAI/bge-small-en-v1.5") -> tuple:
"""Load the Hugging Face model for embedding.
Args:
model_name (str, optional): The model name to load. Defaults to "BAAI/bge-small-en-v1.5".
Returns:
tuple: The tokenizer and model.
"""
from transformers import AutoTokenizer, AutoModel
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained(model_name, resume_download=None)
model = AutoModel.from_pretrained(model_name, resume_download=None)
model.eval()
model, device = set_model_device(model)
return tokenizer, model
@lru_cache()
def load_text_classifier():
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
import torch
tokenizer = AutoTokenizer.from_pretrained(
"dstefa/roberta-base_topic_classification_nyt_news"
)
model = AutoModelForSequenceClassification.from_pretrained(
"dstefa/roberta-base_topic_classification_nyt_news"
)
tokenizer = AutoTokenizer.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
model = AutoModelForSequenceClassification.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
model.eval()
model, device = set_model_device(model)
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
return pipe
@lru_cache()
def load_text_multilabel_classifier():
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import numpy as np
from scipy.special import expit
import torch
@@ -132,27 +116,18 @@ def load_text_multilabel_classifier():
# else:
# device = torch.device("cpu")
# # return load_spacy_model(), torch.device("cpu")
MODEL = "cardiffnlp/tweet-topic-21-multi"
tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None)
model = AutoModelForSequenceClassification.from_pretrained(
MODEL, resume_download=None
)
model = AutoModelForSequenceClassification.from_pretrained(MODEL, resume_download=None)
model.eval()
model, device = set_model_device(model)
class_mapping = model.config.id2label
def _classifier(texts, threshold=0.5, max_length=64):
tokens = tokenizer(
texts,
return_tensors="pt",
padding=True,
truncation=True,
max_length=max_length,
)
tokens = {
key: val.to(device) for key, val in tokens.items()
} # Move tokens to the selected device
tokens = tokenizer(texts, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
tokens = {key: val.to(device) for key, val in tokens.items()} # Move tokens to the selected device
with torch.no_grad():
output = model(**tokens)
@@ -163,41 +138,35 @@ def load_text_multilabel_classifier():
batch_labels = []
for prediction in predictions:
labels = [
class_mapping[i] for i, value in enumerate(prediction) if value == 1
]
labels = [class_mapping[i] for i, value in enumerate(prediction) if value == 1]
batch_labels.append(labels)
return batch_labels
return _classifier, device
@lru_cache()
def load_nltk_punkt():
import nltk
try:
nltk.data.find("tokenizers/punkt")
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download("punkt")
return nltk.data.find("tokenizers/punkt")
nltk.download('punkt')
return nltk.data.find('tokenizers/punkt')
@lru_cache()
def load_spacy_model():
import spacy
name = "models/reuters"
home_folder = get_home_folder()
model_folder = Path(home_folder) / name
# Check if the model directory already exists
if not (model_folder.exists() and any(model_folder.iterdir())):
repo_url = "https://github.com/unclecode/crawl4ai.git"
branch = MODEL_REPO_BRANCH
branch = MODEL_REPO_BRANCH
repo_folder = Path(home_folder) / "crawl4ai"
print("[LOG] ⏬ Downloading Spacy model for the first time...")
# Remove existing repo folder if it exists
@@ -207,9 +176,7 @@ def load_spacy_model():
if model_folder.exists():
shutil.rmtree(model_folder)
except PermissionError:
print(
"[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:"
)
print("[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:")
print(f"- {repo_folder}")
print(f"- {model_folder}")
return None
@@ -220,7 +187,7 @@ def load_spacy_model():
["git", "clone", "-b", branch, repo_url, str(repo_folder)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
check=True
)
# Create the models directory if it doesn't exist
@@ -248,7 +215,6 @@ def load_spacy_model():
print(f"Error loading spacy model: {e}")
return None
def download_all_models(remove_existing=False):
"""Download all models required for Crawl4AI."""
if remove_existing:
@@ -277,20 +243,14 @@ def download_all_models(remove_existing=False):
load_nltk_punkt()
print("[LOG] ✅ All models downloaded successfully.")
def main():
print("[LOG] Welcome to the Crawl4AI Model Downloader!")
print("[LOG] This script will download all the models required for Crawl4AI.")
parser = argparse.ArgumentParser(description="Crawl4AI Model Downloader")
parser.add_argument(
"--remove-existing",
action="store_true",
help="Remove existing models before downloading",
)
parser.add_argument('--remove-existing', action='store_true', help="Remove existing models before downloading")
args = parser.parse_args()
download_all_models(remove_existing=args.remove_existing)
if __name__ == "__main__":
main()

View File

@@ -1,387 +1,24 @@
from pydantic import BaseModel, HttpUrl, PrivateAttr, Field
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
from typing import AsyncGenerator
from typing import Generic, TypeVar
from enum import Enum
from dataclasses import dataclass
from .ssl_certificate import SSLCertificate
from datetime import datetime
from datetime import timedelta
###############################
# Dispatcher Models
###############################
@dataclass
class DomainState:
last_request_time: float = 0
current_delay: float = 0
fail_count: int = 0
@dataclass
class CrawlerTaskResult:
task_id: str
url: str
result: "CrawlResult"
memory_usage: float
peak_memory: float
start_time: Union[datetime, float]
end_time: Union[datetime, float]
error_message: str = ""
retry_count: int = 0
wait_time: float = 0.0
@property
def success(self) -> bool:
return self.result.success
class CrawlStatus(Enum):
QUEUED = "QUEUED"
IN_PROGRESS = "IN_PROGRESS"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
@dataclass
class CrawlStats:
task_id: str
url: str
status: CrawlStatus
start_time: Optional[Union[datetime, float]] = None
end_time: Optional[Union[datetime, float]] = None
memory_usage: float = 0.0
peak_memory: float = 0.0
error_message: str = ""
wait_time: float = 0.0
retry_count: int = 0
counted_requeue: bool = False
@property
def duration(self) -> str:
if not self.start_time:
return "0:00"
# Convert start_time to datetime if it's a float
start = self.start_time
if isinstance(start, float):
start = datetime.fromtimestamp(start)
# Get end time or use current time
end = self.end_time or datetime.now()
# Convert end_time to datetime if it's a float
if isinstance(end, float):
end = datetime.fromtimestamp(end)
duration = end - start
return str(timedelta(seconds=int(duration.total_seconds())))
class DisplayMode(Enum):
DETAILED = "DETAILED"
AGGREGATED = "AGGREGATED"
###############################
# Crawler Models
###############################
@dataclass
class TokenUsage:
completion_tokens: int = 0
prompt_tokens: int = 0
total_tokens: int = 0
completion_tokens_details: Optional[dict] = None
prompt_tokens_details: Optional[dict] = None
from pydantic import BaseModel, HttpUrl
from typing import List, Dict, Optional
class UrlModel(BaseModel):
url: HttpUrl
forced: bool = False
@dataclass
class TraversalStats:
"""Statistics for the traversal process"""
start_time: datetime = datetime.now()
urls_processed: int = 0
urls_failed: int = 0
urls_skipped: int = 0
total_depth_reached: int = 0
current_depth: int = 0
class DispatchResult(BaseModel):
task_id: str
memory_usage: float
peak_memory: float
start_time: Union[datetime, float]
end_time: Union[datetime, float]
error_message: str = ""
class MarkdownGenerationResult(BaseModel):
raw_markdown: str
markdown_with_citations: str
references_markdown: str
fit_markdown: Optional[str] = None
fit_html: Optional[str] = None
def __str__(self):
return self.raw_markdown
class CrawlResult(BaseModel):
url: str
html: str
fit_html: Optional[str] = None
success: bool
cleaned_html: Optional[str] = None
media: Dict[str, List[Dict]] = {}
links: Dict[str, List[Dict]] = {}
downloaded_files: Optional[List[str]] = None
js_execution_result: Optional[Dict[str, Any]] = None
screenshot: Optional[str] = None
pdf: Optional[bytes] = None
mhtml: Optional[str] = None
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
markdown: Optional[str] = None
fit_markdown: Optional[str] = None
fit_html: Optional[str] = None
extracted_content: Optional[str] = None
metadata: Optional[dict] = None
error_message: Optional[str] = None
session_id: Optional[str] = None
response_headers: Optional[dict] = None
status_code: Optional[int] = None
ssl_certificate: Optional[SSLCertificate] = None
dispatch_result: Optional[DispatchResult] = None
redirected_url: Optional[str] = None
network_requests: Optional[List[Dict[str, Any]]] = None
console_messages: Optional[List[Dict[str, Any]]] = None
tables: List[Dict] = Field(default_factory=list) # NEW [{headers,rows,caption,summary}]
class Config:
arbitrary_types_allowed = True
# NOTE: The StringCompatibleMarkdown class, custom __init__ method, property getters/setters,
# and model_dump override all exist to support a smooth transition from markdown as a string
# to markdown as a MarkdownGenerationResult object, while maintaining backward compatibility.
#
# This allows code that expects markdown to be a string to continue working, while also
# providing access to the full MarkdownGenerationResult object's properties.
#
# The markdown_v2 property is deprecated and raises an error directing users to use markdown.
#
# When backward compatibility is no longer needed in future versions, this entire mechanism
# can be simplified to a standard field with no custom accessors or serialization logic.
def __init__(self, **data):
markdown_result = data.pop('markdown', None)
super().__init__(**data)
if markdown_result is not None:
self._markdown = (
MarkdownGenerationResult(**markdown_result)
if isinstance(markdown_result, dict)
else markdown_result
)
@property
def markdown(self):
"""
Property that returns a StringCompatibleMarkdown object that behaves like
a string but also provides access to MarkdownGenerationResult attributes.
This approach allows backward compatibility with code that expects 'markdown'
to be a string, while providing access to the full MarkdownGenerationResult.
"""
if self._markdown is None:
return None
return StringCompatibleMarkdown(self._markdown)
@markdown.setter
def markdown(self, value):
"""
Setter for the markdown property.
"""
self._markdown = value
@property
def markdown_v2(self):
"""
Deprecated property that raises an AttributeError when accessed.
This property exists to inform users that 'markdown_v2' has been
deprecated and they should use 'markdown' instead.
"""
raise AttributeError(
"The 'markdown_v2' attribute is deprecated and has been removed. "
"""Please use 'markdown' instead, which now returns a MarkdownGenerationResult, with
following properties:
- raw_markdown: The raw markdown string
- markdown_with_citations: The markdown string with citations
- references_markdown: The markdown string with references
- fit_markdown: The markdown string with fit text
"""
)
@property
def fit_markdown(self):
"""
Deprecated property that raises an AttributeError when accessed.
"""
raise AttributeError(
"The 'fit_markdown' attribute is deprecated and has been removed. "
"Please use 'markdown.fit_markdown' instead."
)
@property
def fit_html(self):
"""
Deprecated property that raises an AttributeError when accessed.
"""
raise AttributeError(
"The 'fit_html' attribute is deprecated and has been removed. "
"Please use 'markdown.fit_html' instead."
)
def model_dump(self, *args, **kwargs):
"""
Override model_dump to include the _markdown private attribute in serialization.
This override is necessary because:
1. PrivateAttr fields are excluded from serialization by default
2. We need to maintain backward compatibility by including the 'markdown' field
in the serialized output
3. We're transitioning from 'markdown_v2' to enhancing 'markdown' to hold
the same type of data
Future developers: This method ensures that the markdown content is properly
serialized despite being stored in a private attribute. If the serialization
requirements change, this is where you would update the logic.
"""
result = super().model_dump(*args, **kwargs)
# Remove any property descriptors that might have been included
# These deprecated properties should not be in the serialized output
for key in ['fit_html', 'fit_markdown', 'markdown_v2']:
if key in result and isinstance(result[key], property):
# del result[key]
# Nasrin: I decided to convert it to string instead of removing it.
result[key] = str(result[key])
# Add the markdown field properly
if self._markdown is not None:
result["markdown"] = self._markdown.model_dump()
return result
class StringCompatibleMarkdown(str):
"""A string subclass that also provides access to MarkdownGenerationResult attributes"""
def __new__(cls, markdown_result):
return super().__new__(cls, markdown_result.raw_markdown)
def __init__(self, markdown_result):
self._markdown_result = markdown_result
def __getattr__(self, name):
return getattr(self._markdown_result, name)
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
class CrawlResultContainer(Generic[CrawlResultT]):
def __init__(self, results: Union[CrawlResultT, List[CrawlResultT]]):
# Normalize to a list
if isinstance(results, list):
self._results = results
else:
self._results = [results]
def __iter__(self):
return iter(self._results)
def __getitem__(self, index):
return self._results[index]
def __len__(self):
return len(self._results)
def __getattr__(self, attr):
# Delegate attribute access to the first element.
if self._results:
return getattr(self._results[0], attr)
raise AttributeError(f"{self.__class__.__name__} object has no attribute '{attr}'")
def __repr__(self):
return f"{self.__class__.__name__}({self._results!r})"
RunManyReturn = Union[
CrawlResultContainer[CrawlResultT],
AsyncGenerator[CrawlResultT, None]
]
# END of backward compatibility code for markdown/markdown_v2.
# When removing this code in the future, make sure to:
# 1. Replace the private attribute and property with a standard field
# 2. Update any serialization logic that might depend on the current behavior
class AsyncCrawlResponse(BaseModel):
html: str
response_headers: Dict[str, str]
js_execution_result: Optional[Dict[str, Any]] = None
status_code: int
screenshot: Optional[str] = None
pdf_data: Optional[bytes] = None
mhtml_data: Optional[str] = None
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
downloaded_files: Optional[List[str]] = None
ssl_certificate: Optional[SSLCertificate] = None
redirected_url: Optional[str] = None
network_requests: Optional[List[Dict[str, Any]]] = None
console_messages: Optional[List[Dict[str, Any]]] = None
class Config:
arbitrary_types_allowed = True
###############################
# Scraping Models
###############################
class MediaItem(BaseModel):
src: Optional[str] = ""
data: Optional[str] = ""
alt: Optional[str] = ""
desc: Optional[str] = ""
score: Optional[int] = 0
type: str = "image"
group_id: Optional[int] = 0
format: Optional[str] = None
width: Optional[int] = None
class Link(BaseModel):
href: Optional[str] = ""
text: Optional[str] = ""
title: Optional[str] = ""
base_domain: Optional[str] = ""
head_data: Optional[Dict[str, Any]] = None # Head metadata extracted from link target
head_extraction_status: Optional[str] = None # "success", "failed", "skipped"
head_extraction_error: Optional[str] = None # Error message if extraction failed
intrinsic_score: Optional[float] = None # Quality score based on URL structure, text, and context
contextual_score: Optional[float] = None # BM25 relevance score based on query and head content
total_score: Optional[float] = None # Combined score from intrinsic and contextual scores
class Media(BaseModel):
images: List[MediaItem] = []
videos: List[
MediaItem
] = [] # Using MediaItem model for now, can be extended with Video model if needed
audios: List[
MediaItem
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
tables: List[Dict] = [] # Table data extracted from HTML tables
class Links(BaseModel):
internal: List[Link] = []
external: List[Link] = []
class ScrapingResult(BaseModel):
cleaned_html: str
success: bool
media: Media = Media()
links: Links = Links()
metadata: Dict[str, Any] = {}
status_code: Optional[int] = None

View File

@@ -1,195 +0,0 @@
from pathlib import Path
import asyncio
from dataclasses import asdict
from crawl4ai.async_logger import AsyncLogger
from crawl4ai.async_crawler_strategy import AsyncCrawlerStrategy
from crawl4ai.models import AsyncCrawlResponse, ScrapingResult
from crawl4ai.content_scraping_strategy import ContentScrapingStrategy
from .processor import NaivePDFProcessorStrategy # Assuming your current PDF code is in pdf_processor.py
class PDFCrawlerStrategy(AsyncCrawlerStrategy):
def __init__(self, logger: AsyncLogger = None):
self.logger = logger
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
# Just pass through with empty HTML - scraper will handle actual processing
return AsyncCrawlResponse(
html="Scraper will handle the real work", # Scraper will handle the real work
response_headers={"Content-Type": "application/pdf"},
status_code=200
)
async def close(self):
pass
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
class PDFContentScrapingStrategy(ContentScrapingStrategy):
"""
A content scraping strategy for PDF files.
Attributes:
save_images_locally (bool): Whether to save images locally.
extract_images (bool): Whether to extract images from PDF.
image_save_dir (str): Directory to save extracted images.
logger (AsyncLogger): Logger instance for recording events and errors.
Methods:
scrap(url: str, html: str, **params) -> ScrapingResult:
Scrap content from a PDF file.
ascrap(url: str, html: str, **kwargs) -> ScrapingResult:
Asynchronous version of scrap.
Usage:
strategy = PDFContentScrapingStrategy(
save_images_locally=False,
extract_images=False,
image_save_dir=None,
logger=logger
)
"""
def __init__(self,
save_images_locally : bool = False,
extract_images : bool = False,
image_save_dir : str = None,
batch_size: int = 4,
logger: AsyncLogger = None):
self.logger = logger
self.pdf_processor = NaivePDFProcessorStrategy(
save_images_locally=save_images_locally,
extract_images=extract_images,
image_save_dir=image_save_dir,
batch_size=batch_size
)
self._temp_files = [] # Track temp files for cleanup
def scrap(self, url: str, html: str, **params) -> ScrapingResult:
"""
Scrap content from a PDF file.
Args:
url (str): The URL of the PDF file.
html (str): The HTML content of the page.
**params: Additional parameters.
Returns:
ScrapingResult: The scraped content.
"""
# Download if URL or use local path
pdf_path = self._get_pdf_path(url)
try:
# Process PDF
# result = self.pdf_processor.process(Path(pdf_path))
result = self.pdf_processor.process_batch(Path(pdf_path))
# Combine page HTML
cleaned_html = f"""
<html>
<head><meta name="pdf-pages" content="{len(result.pages)}"></head>
<body>
{''.join(f'<div class="pdf-page" data-page="{i+1}">{page.html}</div>'
for i, page in enumerate(result.pages))}
</body>
</html>
"""
# Accumulate media and links with page numbers
media = {"images": []}
links = {"urls": []}
for page in result.pages:
# Add page number to each image
for img in page.images:
img["page"] = page.page_number
media["images"].append(img)
# Add page number to each link
for link in page.links:
links["urls"].append({
"url": link,
"page": page.page_number
})
return ScrapingResult(
cleaned_html=cleaned_html,
success=True,
media=media,
links=links,
metadata=asdict(result.metadata)
)
finally:
# Cleanup temp file if downloaded
if url.startswith(("http://", "https://")):
try:
Path(pdf_path).unlink(missing_ok=True)
if pdf_path in self._temp_files:
self._temp_files.remove(pdf_path)
except Exception as e:
if self.logger:
self.logger.warning(f"Failed to cleanup temp file {pdf_path}: {e}")
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
# For simple cases, you can use the sync version
return await asyncio.to_thread(self.scrap, url, html, **kwargs)
def _get_pdf_path(self, url: str) -> str:
if url.startswith(("http://", "https://")):
import tempfile
import requests
# Create temp file with .pdf extension
temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False)
self._temp_files.append(temp_file.name)
try:
if self.logger:
self.logger.info(f"Downloading PDF from {url}...")
# Download PDF with streaming and timeout
# Connection timeout: 10s, Read timeout: 300s (5 minutes for large PDFs)
response = requests.get(url, stream=True, timeout=(20, 60 * 10))
response.raise_for_status()
# Get file size if available
total_size = int(response.headers.get('content-length', 0))
downloaded = 0
# Write to temp file
with open(temp_file.name, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
downloaded += len(chunk)
if self.logger and total_size > 0:
progress = (downloaded / total_size) * 100
if progress % 10 < 0.1: # Log every 10%
self.logger.debug(f"PDF download progress: {progress:.0f}%")
if self.logger:
self.logger.info(f"PDF downloaded successfully: {temp_file.name}")
return temp_file.name
except requests.exceptions.Timeout as e:
# Clean up temp file if download fails
Path(temp_file.name).unlink(missing_ok=True)
self._temp_files.remove(temp_file.name)
raise RuntimeError(f"Timeout downloading PDF from {url}: {str(e)}")
except Exception as e:
# Clean up temp file if download fails
Path(temp_file.name).unlink(missing_ok=True)
self._temp_files.remove(temp_file.name)
raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}")
elif url.startswith("file://"):
return url[7:] # Strip file:// prefix
return url # Assume local path
__all__ = ["PDFCrawlerStrategy", "PDFContentScrapingStrategy"]

View File

@@ -1,487 +0,0 @@
import logging
import re
from abc import ABC, abstractmethod
from datetime import datetime
from pathlib import Path
from time import time
from dataclasses import dataclass, asdict, field
from typing import Dict, List, Optional, Any, Union
import base64
import tempfile
from .utils import *
from .utils import (
apply_png_predictor,
clean_pdf_text,
clean_pdf_text_to_html,
)
# Remove direct PyPDF2 imports from the top
# import PyPDF2
# from PyPDF2 import PdfReader
logger = logging.getLogger(__name__)
@dataclass
class PDFMetadata:
title: Optional[str] = None
author: Optional[str] = None
producer: Optional[str] = None
created: Optional[datetime] = None
modified: Optional[datetime] = None
pages: int = 0
encrypted: bool = False
file_size: Optional[int] = None
@dataclass
class PDFPage:
page_number: int
raw_text: str = ""
markdown: str = ""
html: str = ""
images: List[Dict] = field(default_factory=list)
links: List[str] = field(default_factory=list)
layout: List[Dict] = field(default_factory=list)
@dataclass
class PDFProcessResult:
metadata: PDFMetadata
pages: List[PDFPage]
processing_time: float = 0.0
version: str = "1.0"
class PDFProcessorStrategy(ABC):
@abstractmethod
def process(self, pdf_path: Path) -> PDFProcessResult:
pass
class NaivePDFProcessorStrategy(PDFProcessorStrategy):
def __init__(self, image_dpi: int = 144, image_quality: int = 85, extract_images: bool = True,
save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4):
# Import check at initialization time
try:
import PyPDF2
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
self.image_dpi = image_dpi
self.image_quality = image_quality
self.current_page_number = 0
self.extract_images = extract_images
self.save_images_locally = save_images_locally
self.image_save_dir = image_save_dir
self.batch_size = batch_size
self._temp_dir = None
def process(self, pdf_path: Path) -> PDFProcessResult:
# Import inside method to allow dependency to be optional
try:
from PyPDF2 import PdfReader
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
start_time = time()
result = PDFProcessResult(
metadata=PDFMetadata(),
pages=[],
version="1.1"
)
try:
with pdf_path.open('rb') as file:
reader = PdfReader(file)
result.metadata = self._extract_metadata(pdf_path, reader)
# Handle image directory
image_dir = None
if self.extract_images and self.save_images_locally:
if self.image_save_dir:
image_dir = Path(self.image_save_dir)
image_dir.mkdir(exist_ok=True, parents=True)
else:
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
image_dir = Path(self._temp_dir)
for page_num, page in enumerate(reader.pages):
self.current_page_number = page_num + 1
pdf_page = self._process_page(page, image_dir)
result.pages.append(pdf_page)
except Exception as e:
logger.error(f"Failed to process PDF: {str(e)}")
raise
finally:
# Cleanup temp directory if it was created
if self._temp_dir and not self.image_save_dir:
import shutil
try:
shutil.rmtree(self._temp_dir)
except Exception as e:
logger.error(f"Failed to cleanup temp directory: {str(e)}")
result.processing_time = time() - start_time
return result
def process_batch(self, pdf_path: Path) -> PDFProcessResult:
"""Like process() but processes PDF pages in parallel batches"""
# Import inside method to allow dependency to be optional
try:
from PyPDF2 import PdfReader
import PyPDF2 # For type checking
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
import concurrent.futures
import threading
# Initialize PyPDF2 thread support
if not hasattr(threading.current_thread(), "_children"):
threading.current_thread()._children = set()
start_time = time()
result = PDFProcessResult(
metadata=PDFMetadata(),
pages=[],
version="1.1"
)
try:
# Get metadata and page count from main thread
with pdf_path.open('rb') as file:
reader = PdfReader(file)
result.metadata = self._extract_metadata(pdf_path, reader)
total_pages = len(reader.pages)
# Handle image directory setup
image_dir = None
if self.extract_images and self.save_images_locally:
if self.image_save_dir:
image_dir = Path(self.image_save_dir)
image_dir.mkdir(exist_ok=True, parents=True)
else:
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
image_dir = Path(self._temp_dir)
def process_page_safely(page_num: int):
# Each thread opens its own file handle
with pdf_path.open('rb') as file:
thread_reader = PdfReader(file)
page = thread_reader.pages[page_num]
self.current_page_number = page_num + 1
return self._process_page(page, image_dir)
# Process pages in parallel batches
with concurrent.futures.ThreadPoolExecutor(max_workers=self.batch_size) as executor:
futures = []
for page_num in range(total_pages):
future = executor.submit(process_page_safely, page_num)
futures.append((page_num + 1, future))
# Collect results in order
result.pages = [None] * total_pages
for page_num, future in futures:
try:
pdf_page = future.result()
result.pages[page_num - 1] = pdf_page
except Exception as e:
logger.error(f"Failed to process page {page_num}: {str(e)}")
raise
except Exception as e:
logger.error(f"Failed to process PDF: {str(e)}")
raise
finally:
# Cleanup temp directory if it was created
if self._temp_dir and not self.image_save_dir:
import shutil
try:
shutil.rmtree(self._temp_dir)
except Exception as e:
logger.error(f"Failed to cleanup temp directory: {str(e)}")
result.processing_time = time() - start_time
return result
def _process_page(self, page, image_dir: Optional[Path]) -> PDFPage:
pdf_page = PDFPage(
page_number=self.current_page_number,
)
# Text and font extraction
def visitor_text(text, cm, tm, font_dict, font_size):
pdf_page.raw_text += text
pdf_page.layout.append({
"type": "text",
"text": text,
"x": tm[4],
"y": tm[5],
})
page.extract_text(visitor_text=visitor_text)
# Image extraction
if self.extract_images:
pdf_page.images = self._extract_images(page, image_dir)
# Link extraction
pdf_page.links = self._extract_links(page)
# Add markdown content
pdf_page.markdown = clean_pdf_text(self.current_page_number, pdf_page.raw_text)
pdf_page.html = clean_pdf_text_to_html(self.current_page_number, pdf_page.raw_text)
return pdf_page
def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]:
# Import PyPDF2 for type checking only when needed
try:
import PyPDF2
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
if not self.extract_images:
return []
images = []
try:
resources = page.get("/Resources")
if resources: # Check if resources exist
resources = resources.get_object() # Resolve IndirectObject
if '/XObject' in resources:
xobjects = resources['/XObject'].get_object()
img_count = 0
for obj_name in xobjects:
xobj = xobjects[obj_name]
if hasattr(xobj, 'get_object') and callable(xobj.get_object):
xobj = xobj.get_object()
if xobj.get('/Subtype') == '/Image':
try:
img_count += 1
img_filename = f"page_{self.current_page_number}_img_{img_count}"
data = xobj.get_data()
filters = xobj.get('/Filter', [])
if not isinstance(filters, list):
filters = [filters]
# Resolve IndirectObjects in properties
width = xobj.get('/Width', 0)
height = xobj.get('/Height', 0)
color_space = xobj.get('/ColorSpace', '/DeviceRGB')
if isinstance(color_space, PyPDF2.generic.IndirectObject):
color_space = color_space.get_object()
# Handle different image encodings
success = False
image_format = 'bin'
image_data = None
if '/FlateDecode' in filters:
try:
decode_parms = xobj.get('/DecodeParms', {})
if isinstance(decode_parms, PyPDF2.generic.IndirectObject):
decode_parms = decode_parms.get_object()
predictor = decode_parms.get('/Predictor', 1)
bits = xobj.get('/BitsPerComponent', 8)
colors = 3 if color_space == '/DeviceRGB' else 1
if predictor >= 10:
data = apply_png_predictor(data, width, bits, colors)
# Create PIL Image
from PIL import Image
mode = 'RGB' if color_space == '/DeviceRGB' else 'L'
img = Image.frombytes(mode, (width, height), data)
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.png')
img.save(final_path)
image_data = str(final_path)
else:
import io
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
image_data = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
success = True
image_format = 'png'
except Exception as e:
logger.error(f"FlateDecode error: {str(e)}")
elif '/DCTDecode' in filters:
# JPEG image
try:
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.jpg')
with open(final_path, 'wb') as f:
f.write(data)
image_data = str(final_path)
else:
image_data = base64.b64encode(data).decode('utf-8')
success = True
image_format = 'jpeg'
except Exception as e:
logger.error(f"JPEG save error: {str(e)}")
elif '/CCITTFaxDecode' in filters:
try:
if data[:4] != b'II*\x00':
# Add TIFF header if missing
tiff_header = b'II*\x00\x08\x00\x00\x00\x0e\x00\x00\x01\x03\x00\x01\x00\x00\x00' + \
width.to_bytes(4, 'little') + \
b'\x01\x03\x00\x01\x00\x00\x00' + \
height.to_bytes(4, 'little') + \
b'\x01\x12\x00\x03\x00\x00\x00\x01\x00\x01\x00\x00\x01\x17\x00\x04\x00\x00\x00\x01\x00\x00\x00J\x01\x1B\x00\x05\x00\x00\x00\x01\x00\x00\x00R\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\x00'
data = tiff_header + data
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.tiff')
with open(final_path, 'wb') as f:
f.write(data)
image_data = str(final_path)
else:
image_data = base64.b64encode(data).decode('utf-8')
success = True
image_format = 'tiff'
except Exception as e:
logger.error(f"CCITT save error: {str(e)}")
elif '/JPXDecode' in filters:
# JPEG 2000
try:
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.jp2')
with open(final_path, 'wb') as f:
f.write(data)
image_data = str(final_path)
else:
image_data = base64.b64encode(data).decode('utf-8')
success = True
image_format = 'jpeg2000'
except Exception as e:
logger.error(f"JPEG2000 save error: {str(e)}")
if success and image_data:
image_info = {
"format": image_format,
"width": width,
"height": height,
"color_space": str(color_space),
"bits_per_component": xobj.get('/BitsPerComponent', 1)
}
if self.save_images_locally:
image_info["path"] = image_data
else:
image_info["data"] = image_data
images.append(image_info)
else:
# Fallback: Save raw data
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.bin')
with open(final_path, 'wb') as f:
f.write(data)
logger.warning(f"Saved raw image data to {final_path}")
else:
image_data = base64.b64encode(data).decode('utf-8')
images.append({
"format": "bin",
"width": width,
"height": height,
"color_space": str(color_space),
"bits_per_component": xobj.get('/BitsPerComponent', 1),
"data": image_data
})
except Exception as e:
logger.error(f"Error processing image: {str(e)}")
except Exception as e:
logger.error(f"Image extraction error: {str(e)}")
return images
def _extract_links(self, page) -> List[str]:
links = []
if '/Annots' in page:
try:
for annot in page['/Annots']:
a = annot.get_object()
if '/A' in a and '/URI' in a['/A']:
links.append(a['/A']['/URI'])
except Exception as e:
print(f"Link error: {str(e)}")
return links
def _extract_metadata(self, pdf_path: Path, reader = None) -> PDFMetadata:
# Import inside method to allow dependency to be optional
if reader is None:
try:
from PyPDF2 import PdfReader
reader = PdfReader(pdf_path)
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
meta = reader.metadata or {}
created = self._parse_pdf_date(meta.get('/CreationDate', ''))
modified = self._parse_pdf_date(meta.get('/ModDate', ''))
return PDFMetadata(
title=meta.get('/Title'),
author=meta.get('/Author'),
producer=meta.get('/Producer'),
created=created,
modified=modified,
pages=len(reader.pages),
encrypted=reader.is_encrypted,
file_size=pdf_path.stat().st_size
)
def _parse_pdf_date(self, date_str: str) -> Optional[datetime]:
try:
match = re.match(r'D:(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})', date_str)
if not match:
return None
return datetime(
year=int(match[1]),
month=int(match[2]),
day=int(match[3]),
hour=int(match[4]),
minute=int(match[5]),
second=int(match[6])
)
except:
return None
# Usage example
if __name__ == "__main__":
import json
from pathlib import Path
try:
# Import PyPDF2 only when running the file directly
import PyPDF2
from PyPDF2 import PdfReader
except ImportError:
print("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
exit(1)
current_dir = Path(__file__).resolve().parent
pdf_path = f'{current_dir}/test.pdf'
strategy = NaivePDFProcessorStrategy()
result = strategy.process(Path(pdf_path))
# Convert to JSON
json_output = asdict(result)
print(json.dumps(json_output, indent=2, default=str))
with open(f'{current_dir}/test.html', 'w') as f:
for page in result.pages:
f.write(f'<h1>Page {page["page_number"]}</h1>')
f.write(page['html'])
with open(f'{current_dir}/test.md', 'w') as f:
for page in result.pages:
f.write(f'# Page {page["page_number"]}\n\n')
f.write(clean_pdf_text(page["page_number"], page['raw_text']))
f.write('\n\n')

View File

@@ -1,350 +0,0 @@
import re
def apply_png_predictor(data, width, bits, color_channels):
"""Decode PNG predictor (PDF 1.5+ filter)"""
bytes_per_pixel = (bits * color_channels) // 8
if (bits * color_channels) % 8 != 0:
bytes_per_pixel += 1
stride = width * bytes_per_pixel
scanline_length = stride + 1 # +1 for filter byte
if len(data) % scanline_length != 0:
raise ValueError("Invalid scanline structure")
num_lines = len(data) // scanline_length
output = bytearray()
prev_line = b'\x00' * stride
for i in range(num_lines):
line = data[i*scanline_length:(i+1)*scanline_length]
filter_type = line[0]
filtered = line[1:]
if filter_type == 0: # None
decoded = filtered
elif filter_type == 1: # Sub
decoded = bytearray(filtered)
for j in range(bytes_per_pixel, len(decoded)):
decoded[j] = (decoded[j] + decoded[j - bytes_per_pixel]) % 256
elif filter_type == 2: # Up
decoded = bytearray([(filtered[j] + prev_line[j]) % 256
for j in range(len(filtered))])
elif filter_type == 3: # Average
decoded = bytearray(filtered)
for j in range(len(decoded)):
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
up = prev_line[j]
avg = (left + up) // 2
decoded[j] = (decoded[j] + avg) % 256
elif filter_type == 4: # Paeth
decoded = bytearray(filtered)
for j in range(len(decoded)):
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
up = prev_line[j]
up_left = prev_line[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
paeth = paeth_predictor(left, up, up_left)
decoded[j] = (decoded[j] + paeth) % 256
else:
raise ValueError(f"Unsupported filter type: {filter_type}")
output.extend(decoded)
prev_line = decoded
return bytes(output)
def paeth_predictor(a, b, c):
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
return a
elif pb <= pc:
return b
else:
return c
import re
import html
def clean_pdf_text_to_html(page_number, text):
# Decode Unicode escapes and handle surrogate pairs
try:
decoded = text.encode('latin-1').decode('unicode-escape')
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
except Exception as e:
decoded = text # Fallback if decoding fails
article_title_detected = False
# decoded = re.sub(r'\.\n', '.\n\n', decoded)
# decoded = re.sub(r'\.\n', '<|break|>', decoded)
lines = decoded.split('\n')
output = []
current_paragraph = []
in_header = False
email_pattern = re.compile(r'\{.*?\}')
affiliation_pattern = re.compile(r'^†')
quote_pattern = re.compile(r'^["“]')
author_pattern = re.compile(
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
)
def flush_paragraph():
if current_paragraph:
para = ' '.join(current_paragraph)
para = re.sub(r'\s+', ' ', para).strip()
if para:
# escaped_para = html.escape(para)
escaped_para = para
# escaped_para = re.sub(r'\.\n', '.\n\n', escaped_para)
# Split escaped_para by <|break|> to avoid HTML escaping
escaped_para = escaped_para.split('.\n\n')
# Wrap each part in <p> tag
escaped_para = [f'<p>{part}</p>' for part in escaped_para]
output.append(f'<div class="paragraph">{"".join(escaped_para)}</div><hr/>')
current_paragraph.clear()
for i, line in enumerate(lines):
line = line.strip()
# Handle empty lines
if not line:
flush_paragraph()
continue
# Detect article title (first line with reasonable length)
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and len(lines) > 1:
flush_paragraph()
escaped_line = html.escape(line)
output.append(f'<h2>{escaped_line}</h2>')
article_title_detected = True
continue
# Detect numbered headers like "2.1 Background"
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
if i > 0 and not lines[i-1].strip() and numbered_header:
flush_paragraph()
level = numbered_header.group(1).count('.') + 1
header_text = numbered_header.group(2)
md_level = min(level + 1, 6)
escaped_header = html.escape(header_text)
output.append(f'<h{md_level}>{escaped_header}</h{md_level}>')
in_header = True
continue
# Detect authors
if page_number == 1 and author_pattern.match(line):
authors = re.sub(r'[†â€]', '', line)
authors = re.split(r', | and ', authors)
formatted_authors = []
for author in authors:
if author.strip():
parts = [p for p in author.strip().split() if p]
formatted = ' '.join(parts)
escaped_author = html.escape(formatted)
formatted_authors.append(f'<strong>{escaped_author}</strong>')
if len(formatted_authors) > 1:
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
else:
joined = formatted_authors[0]
output.append(f'<p>{joined}</p>')
continue
# Detect affiliation
if affiliation_pattern.match(line):
escaped_line = html.escape(line)
output.append(f'<p><em>{escaped_line}</em></p>')
continue
# Detect emails
if email_pattern.match(line):
escaped_line = html.escape(line)
output.append(f'<p><code>{escaped_line}</code></p>')
continue
# Detect section headers
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
flush_paragraph()
escaped_line = html.escape(line)
output.append(f'<h2 class="section-header"><em>{escaped_line}</em></h2>')
in_header = True
continue
# Handle quotes
if quote_pattern.match(line):
flush_paragraph()
escaped_line = html.escape(line)
output.append(f'<blockquote><p>{escaped_line}</p></blockquote>')
continue
# Handle hyphenated words
if line.endswith('-'):
current_paragraph.append(line[:-1].strip())
else:
current_paragraph.append(line)
# Handle paragraph breaks after headers
if in_header and not line.endswith(('.', '!', '?')):
flush_paragraph()
in_header = False
flush_paragraph()
# Post-process HTML
html_output = '\n'.join(output)
# Fix common citation patterns
html_output = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'<cite>\1</cite>', html_output)
# Fix escaped characters
html_output = html_output.replace('\\ud835', '').replace('\\u2020', '')
# Remove leftover hyphens and fix spacing
html_output = re.sub(r'\s+-\s+', '', html_output)
html_output = re.sub(r'\s+([.,!?)])', r'\1', html_output)
return html_output
def clean_pdf_text(page_number, text):
# Decode Unicode escapes and handle surrogate pairs
try:
decoded = text.encode('latin-1').decode('unicode-escape')
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
except Exception as e:
decoded = text # Fallback if decoding fails
article_title_detected = False
decoded = re.sub(r'\.\n', '.\n\n', decoded)
lines = decoded.split('\n')
output = []
current_paragraph = []
in_header = False
email_pattern = re.compile(r'\{.*?\}')
affiliation_pattern = re.compile(r'^†')
quote_pattern = re.compile(r'^["“]')
author_pattern = re.compile(
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
)
def flush_paragraph():
if current_paragraph:
para = ' '.join(current_paragraph)
para = re.sub(r'\s+', ' ', para).strip()
if para:
output.append(para)
current_paragraph.clear()
for i, line in enumerate(lines):
line = line.strip()
# Handle special patterns
if not line:
flush_paragraph()
continue
# Detect headline (first line, reasonable length, surrounded by empty lines)
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and (len(lines) > 1):
flush_paragraph()
output.append(f'## {line}')
continue
# Detect paragraph breaks for ALL paragraphs
if not line and current_paragraph:
flush_paragraph()
output.append('') # Add empty line between paragraphs
continue
# Detect numbered headers like "2.1 Background"
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
if not lines[i-1].strip() and numbered_header:
flush_paragraph()
level = numbered_header.group(1).count('.') + 1 # Convert 2.1 → level 2
header_text = numbered_header.group(2)
# Never go beyond ### for subsections
md_level = min(level + 1, 6) # 1 → ##, 2 → ###, 3 → #### etc
output.append(f'{"#" * md_level} {header_text}')
in_header = True
continue
# Detect authors
if page_number == 1 and author_pattern.match(line):
# Clean and format author names
authors = re.sub(r'[†â€]', '', line) # Remove affiliation markers
authors = re.split(r', | and ', authors)
formatted_authors = []
for author in authors:
if author.strip():
# Handle "First Last" formatting
parts = [p for p in author.strip().split() if p]
formatted = ' '.join(parts)
formatted_authors.append(f'**{formatted}**')
# Join with commas and "and"
if len(formatted_authors) > 1:
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
else:
joined = formatted_authors[0]
output.append(joined)
continue
# Detect affiliation
if affiliation_pattern.match(line):
output.append(f'*{line}*')
continue
# Detect emails
if email_pattern.match(line):
output.append(f'`{line}`')
continue
# Detect section headers
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
flush_paragraph()
output.append(f'_[{line}]_')
in_header = True
continue
# Handle quotes
if quote_pattern.match(line):
flush_paragraph()
output.append(f'> {line}')
continue
# Handle hyphenated words
if line.endswith('-'):
current_paragraph.append(line[:-1].strip())
else:
current_paragraph.append(line)
# Handle paragraph breaks after headers
if in_header and not line.endswith(('.', '!', '?')):
flush_paragraph()
in_header = False
flush_paragraph()
# Post-processing
markdown = '\n\n'.join(output)
# Fix common citation patterns
markdown = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'[\1]', markdown)
# Fix escaped characters
markdown = markdown.replace('\\ud835', '').replace('\\u2020', '')
# Remove leftover hyphens and fix spacing
markdown = re.sub(r'\s+-\s+', '', markdown) # Join hyphenated words
markdown = re.sub(r'\s+([.,!?)])', r'\1', markdown) # Fix punctuation spacing
return markdown

File diff suppressed because it is too large Load Diff

View File

@@ -1,158 +0,0 @@
from typing import List, Dict, Optional
from abc import ABC, abstractmethod
from itertools import cycle
import os
########### ATTENTION PEOPLE OF EARTH ###########
# I have moved this config to async_configs.py, kept it here, in case someone still importing it, however
# be a dear and follow `from crawl4ai import ProxyConfig` instead :)
class ProxyConfig:
def __init__(
self,
server: str,
username: Optional[str] = None,
password: Optional[str] = None,
ip: Optional[str] = None,
):
"""Configuration class for a single proxy.
Args:
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
username: Optional username for proxy authentication
password: Optional password for proxy authentication
ip: Optional IP address for verification purposes
"""
self.server = server
self.username = username
self.password = password
# Extract IP from server if not explicitly provided
self.ip = ip or self._extract_ip_from_server()
def _extract_ip_from_server(self) -> Optional[str]:
"""Extract IP address from server URL."""
try:
# Simple extraction assuming http://ip:port format
if "://" in self.server:
parts = self.server.split("://")[1].split(":")
return parts[0]
else:
parts = self.server.split(":")
return parts[0]
except Exception:
return None
@staticmethod
def from_string(proxy_str: str) -> "ProxyConfig":
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
parts = proxy_str.split(":")
if len(parts) == 4: # ip:port:username:password
ip, port, username, password = parts
return ProxyConfig(
server=f"http://{ip}:{port}",
username=username,
password=password,
ip=ip
)
elif len(parts) == 2: # ip:port only
ip, port = parts
return ProxyConfig(
server=f"http://{ip}:{port}",
ip=ip
)
else:
raise ValueError(f"Invalid proxy string format: {proxy_str}")
@staticmethod
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
"""Create a ProxyConfig from a dictionary."""
return ProxyConfig(
server=proxy_dict.get("server"),
username=proxy_dict.get("username"),
password=proxy_dict.get("password"),
ip=proxy_dict.get("ip")
)
@staticmethod
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
"""Load proxies from environment variable.
Args:
env_var: Name of environment variable containing comma-separated proxy strings
Returns:
List of ProxyConfig objects
"""
proxies = []
try:
proxy_list = os.getenv(env_var, "").split(",")
for proxy in proxy_list:
if not proxy:
continue
proxies.append(ProxyConfig.from_string(proxy))
except Exception as e:
print(f"Error loading proxies from environment: {e}")
return proxies
def to_dict(self) -> Dict:
"""Convert to dictionary representation."""
return {
"server": self.server,
"username": self.username,
"password": self.password,
"ip": self.ip
}
def clone(self, **kwargs) -> "ProxyConfig":
"""Create a copy of this configuration with updated values.
Args:
**kwargs: Key-value pairs of configuration options to update
Returns:
ProxyConfig: A new instance with the specified updates
"""
config_dict = self.to_dict()
config_dict.update(kwargs)
return ProxyConfig.from_dict(config_dict)
class ProxyRotationStrategy(ABC):
"""Base abstract class for proxy rotation strategies"""
@abstractmethod
async def get_next_proxy(self) -> Optional[ProxyConfig]:
"""Get next proxy configuration from the strategy"""
pass
@abstractmethod
def add_proxies(self, proxies: List[ProxyConfig]):
"""Add proxy configurations to the strategy"""
pass
class RoundRobinProxyStrategy:
"""Simple round-robin proxy rotation strategy using ProxyConfig objects"""
def __init__(self, proxies: List[ProxyConfig] = None):
"""
Initialize with optional list of proxy configurations
Args:
proxies: List of ProxyConfig objects
"""
self._proxies = []
self._proxy_cycle = None
if proxies:
self.add_proxies(proxies)
def add_proxies(self, proxies: List[ProxyConfig]):
"""Add new proxies to the rotation pool"""
self._proxies.extend(proxies)
self._proxy_cycle = cycle(self._proxies)
async def get_next_proxy(self) -> Optional[ProxyConfig]:
"""Get next proxy in round-robin fashion"""
if not self._proxy_cycle:
return None
return next(self._proxy_cycle)

View File

@@ -1,35 +0,0 @@
"""
C4A-Script: A domain-specific language for web automation in Crawl4AI
"""
from .c4a_compile import C4ACompiler, compile, validate, compile_file
from .c4a_result import (
CompilationResult,
ValidationResult,
ErrorDetail,
WarningDetail,
ErrorType,
Severity,
Suggestion
)
__all__ = [
# Main compiler
"C4ACompiler",
# Convenience functions
"compile",
"validate",
"compile_file",
# Result types
"CompilationResult",
"ValidationResult",
"ErrorDetail",
"WarningDetail",
# Enums
"ErrorType",
"Severity",
"Suggestion"
]

View File

@@ -1,398 +0,0 @@
"""
Clean C4A-Script API with Result pattern
No exceptions - always returns results
"""
from __future__ import annotations
import pathlib
import re
from typing import Union, List, Optional
# JSON_SCHEMA_BUILDER is still used elsewhere,
# but we now also need the new script-builder prompt.
from ..prompts import GENERATE_JS_SCRIPT_PROMPT, GENERATE_SCRIPT_PROMPT
import logging
import re
from .c4a_result import (
CompilationResult, ValidationResult, ErrorDetail, WarningDetail,
ErrorType, Severity, Suggestion
)
from .c4ai_script import Compiler
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
from ..async_configs import LLMConfig
from ..utils import perform_completion_with_backoff
class C4ACompiler:
"""Main compiler with result-based API"""
# Error code mapping
ERROR_CODES = {
"missing_then": "E001",
"missing_paren": "E002",
"missing_comma": "E003",
"missing_endproc": "E004",
"undefined_proc": "E005",
"missing_backticks": "E006",
"invalid_command": "E007",
"syntax_error": "E999"
}
@classmethod
def compile(cls, script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
"""
Compile C4A-Script to JavaScript
Args:
script: C4A-Script as string or list of lines
root: Root directory for includes
Returns:
CompilationResult with success status and JS code or errors
"""
# Normalize input
if isinstance(script, list):
script_text = '\n'.join(script)
script_lines = script
else:
script_text = script
script_lines = script.split('\n')
try:
# Try compilation
compiler = Compiler(root)
js_code = compiler.compile(script_text)
# Success!
result = CompilationResult(
success=True,
js_code=js_code,
metadata={
"lineCount": len(script_lines),
"statementCount": len(js_code)
}
)
# Add any warnings (future feature)
# result.warnings = cls._check_warnings(script_text)
return result
except Exception as e:
# Convert exception to ErrorDetail
error = cls._exception_to_error(e, script_lines)
return CompilationResult(
success=False,
errors=[error],
metadata={
"lineCount": len(script_lines)
}
)
@classmethod
def validate(cls, script: Union[str, List[str]]) -> ValidationResult:
"""
Validate script syntax without generating code
Args:
script: C4A-Script to validate
Returns:
ValidationResult with validity status and any errors
"""
result = cls.compile(script)
return ValidationResult(
valid=result.success,
errors=result.errors,
warnings=result.warnings
)
@classmethod
def compile_file(cls, path: Union[str, pathlib.Path]) -> CompilationResult:
"""
Compile a C4A-Script file
Args:
path: Path to the file
Returns:
CompilationResult
"""
path = pathlib.Path(path)
if not path.exists():
error = ErrorDetail(
type=ErrorType.RUNTIME,
code="E100",
severity=Severity.ERROR,
message=f"File not found: {path}",
line=0,
column=0,
source_line=""
)
return CompilationResult(success=False, errors=[error])
try:
script = path.read_text()
return cls.compile(script, root=path.parent)
except Exception as e:
error = ErrorDetail(
type=ErrorType.RUNTIME,
code="E101",
severity=Severity.ERROR,
message=f"Error reading file: {str(e)}",
line=0,
column=0,
source_line=""
)
return CompilationResult(success=False, errors=[error])
@classmethod
def _exception_to_error(cls, exc: Exception, script_lines: List[str]) -> ErrorDetail:
"""Convert an exception to ErrorDetail"""
if isinstance(exc, UnexpectedToken):
return cls._handle_unexpected_token(exc, script_lines)
elif isinstance(exc, UnexpectedCharacters):
return cls._handle_unexpected_chars(exc, script_lines)
elif isinstance(exc, ValueError):
return cls._handle_value_error(exc, script_lines)
else:
# Generic error
return ErrorDetail(
type=ErrorType.SYNTAX,
code=cls.ERROR_CODES["syntax_error"],
severity=Severity.ERROR,
message=str(exc),
line=1,
column=1,
source_line=script_lines[0] if script_lines else ""
)
@classmethod
def _handle_unexpected_token(cls, exc: UnexpectedToken, script_lines: List[str]) -> ErrorDetail:
"""Handle UnexpectedToken errors"""
line = exc.line
column = exc.column
# Get context lines
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
line_before = script_lines[line - 2] if line > 1 and line <= len(script_lines) + 1 else None
line_after = script_lines[line] if 0 < line < len(script_lines) else None
# Determine error type and suggestions
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
code = cls.ERROR_CODES["missing_then"]
message = "Missing 'THEN' keyword after IF condition"
suggestions = [
Suggestion(
"Add 'THEN' after the condition",
source_line.replace("CLICK", "THEN CLICK") if source_line else None
)
]
elif exc.token.type == '$END':
code = cls.ERROR_CODES["missing_endproc"]
message = "Unexpected end of script"
suggestions = [
Suggestion("Check for missing ENDPROC"),
Suggestion("Ensure all procedures are properly closed")
]
elif 'RPAR' in str(exc.expected):
code = cls.ERROR_CODES["missing_paren"]
message = "Missing closing parenthesis ')'"
suggestions = [
Suggestion("Add closing parenthesis at the end of the condition")
]
elif 'COMMA' in str(exc.expected):
code = cls.ERROR_CODES["missing_comma"]
message = "Missing comma ',' in command"
suggestions = [
Suggestion("Add comma between arguments")
]
else:
# Check if this might be missing backticks
if exc.token.type == 'NAME' and 'BACKTICK_STRING' in str(exc.expected):
code = cls.ERROR_CODES["missing_backticks"]
message = "Selector must be wrapped in backticks"
suggestions = [
Suggestion(
"Wrap the selector in backticks",
f"`{exc.token.value}`"
)
]
else:
code = cls.ERROR_CODES["syntax_error"]
message = f"Unexpected '{exc.token.value}'"
if exc.expected:
expected_list = [str(e) for e in exc.expected if not str(e).startswith('_')][:3]
if expected_list:
message += f". Expected: {', '.join(expected_list)}"
suggestions = []
return ErrorDetail(
type=ErrorType.SYNTAX,
code=code,
severity=Severity.ERROR,
message=message,
line=line,
column=column,
source_line=source_line,
line_before=line_before,
line_after=line_after,
suggestions=suggestions
)
@classmethod
def _handle_unexpected_chars(cls, exc: UnexpectedCharacters, script_lines: List[str]) -> ErrorDetail:
"""Handle UnexpectedCharacters errors"""
line = exc.line
column = exc.column
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
# Check for missing backticks
if "CLICK" in source_line and column > source_line.find("CLICK"):
code = cls.ERROR_CODES["missing_backticks"]
message = "Selector must be wrapped in backticks"
suggestions = [
Suggestion(
"Wrap the selector in backticks",
re.sub(r'CLICK\s+([^\s]+)', r'CLICK `\1`', source_line)
)
]
else:
code = cls.ERROR_CODES["syntax_error"]
message = f"Invalid character at position {column}"
suggestions = []
return ErrorDetail(
type=ErrorType.SYNTAX,
code=code,
severity=Severity.ERROR,
message=message,
line=line,
column=column,
source_line=source_line,
suggestions=suggestions
)
@classmethod
def _handle_value_error(cls, exc: ValueError, script_lines: List[str]) -> ErrorDetail:
"""Handle ValueError (runtime errors)"""
message = str(exc)
# Check for undefined procedure
if "Unknown procedure" in message:
proc_match = re.search(r"'([^']+)'", message)
if proc_match:
proc_name = proc_match.group(1)
# Find the line with the procedure call
for i, line in enumerate(script_lines):
if proc_name in line and not line.strip().startswith('PROC'):
return ErrorDetail(
type=ErrorType.RUNTIME,
code=cls.ERROR_CODES["undefined_proc"],
severity=Severity.ERROR,
message=f"Undefined procedure '{proc_name}'",
line=i + 1,
column=line.find(proc_name) + 1,
source_line=line,
suggestions=[
Suggestion(
f"Define the procedure before using it",
f"PROC {proc_name}\n # commands here\nENDPROC"
)
]
)
# Generic runtime error
return ErrorDetail(
type=ErrorType.RUNTIME,
code="E999",
severity=Severity.ERROR,
message=message,
line=1,
column=1,
source_line=script_lines[0] if script_lines else ""
)
@staticmethod
def generate_script(
html: str,
query: str | None = None,
mode: str = "c4a",
llm_config: LLMConfig | None = None,
**completion_kwargs,
) -> str:
"""
One-shot helper that calls the LLM exactly once to convert a
natural-language goal + HTML snippet into either:
1. raw JavaScript (`mode="js"`)
2. Crawl4ai DSL (`mode="c4a"`)
The returned string is guaranteed to be free of markdown wrappers
or explanatory text, ready for direct execution.
"""
if llm_config is None:
llm_config = LLMConfig() # falls back to env vars / defaults
# Build the user chunk
user_prompt = "\n".join(
[
"## GOAL",
"<<goael>>",
(query or "Prepare the page for crawling."),
"<</goal>>",
"",
"## HTML",
"<<html>>",
html[:100000], # guardrail against token blast
"<</html>>",
"",
"## MODE",
mode,
]
)
# Call the LLM with retry/back-off logic
full_prompt = f"{GENERATE_SCRIPT_PROMPT}\n\n{user_prompt}" if mode == "c4a" else f"{GENERATE_JS_SCRIPT_PROMPT}\n\n{user_prompt}"
response = perform_completion_with_backoff(
provider=llm_config.provider,
prompt_with_variables=full_prompt,
api_token=llm_config.api_token,
json_response=False,
base_url=getattr(llm_config, 'base_url', None),
**completion_kwargs,
)
# Extract content from the response
raw_response = response.choices[0].message.content.strip()
# Strip accidental markdown fences (```js … ```)
clean = re.sub(r"^```(?:[a-zA-Z0-9_-]+)?\s*|```$", "", raw_response, flags=re.MULTILINE).strip()
if not clean:
raise RuntimeError("LLM returned empty script.")
return clean
# Convenience functions for direct use
def compile(script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
"""Compile C4A-Script to JavaScript"""
return C4ACompiler.compile(script, root)
def validate(script: Union[str, List[str]]) -> ValidationResult:
"""Validate C4A-Script syntax"""
return C4ACompiler.validate(script)
def compile_file(path: Union[str, pathlib.Path]) -> CompilationResult:
"""Compile C4A-Script file"""
return C4ACompiler.compile_file(path)

View File

@@ -1,219 +0,0 @@
"""
Result classes for C4A-Script compilation
Clean API design with no exceptions
"""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Dict, Any, Optional
import json
class ErrorType(Enum):
SYNTAX = "syntax"
SEMANTIC = "semantic"
RUNTIME = "runtime"
class Severity(Enum):
ERROR = "error"
WARNING = "warning"
INFO = "info"
@dataclass
class Suggestion:
"""A suggestion for fixing an error"""
message: str
fix: Optional[str] = None
def to_dict(self) -> dict:
return {
"message": self.message,
"fix": self.fix
}
@dataclass
class ErrorDetail:
"""Detailed information about a compilation error"""
# Core info
type: ErrorType
code: str # E001, E002, etc.
severity: Severity
message: str
# Location
line: int
column: int
# Context
source_line: str
# Optional fields with defaults
end_line: Optional[int] = None
end_column: Optional[int] = None
line_before: Optional[str] = None
line_after: Optional[str] = None
# Help
suggestions: List[Suggestion] = field(default_factory=list)
documentation_url: Optional[str] = None
def to_dict(self) -> dict:
"""Convert to dictionary for JSON serialization"""
return {
"type": self.type.value,
"code": self.code,
"severity": self.severity.value,
"message": self.message,
"location": {
"line": self.line,
"column": self.column,
"endLine": self.end_line,
"endColumn": self.end_column
},
"context": {
"sourceLine": self.source_line,
"lineBefore": self.line_before,
"lineAfter": self.line_after,
"marker": {
"start": self.column - 1,
"length": (self.end_column - self.column) if self.end_column else 1
}
},
"suggestions": [s.to_dict() for s in self.suggestions],
"documentationUrl": self.documentation_url
}
def to_json(self) -> str:
"""Convert to JSON string"""
return json.dumps(self.to_dict(), indent=2)
@property
def formatted_message(self) -> str:
"""Returns the nice text format for terminals"""
lines = []
lines.append(f"\n{'='*60}")
lines.append(f"{self.type.value.title()} Error [{self.code}]")
lines.append(f"{'='*60}")
lines.append(f"Location: Line {self.line}, Column {self.column}")
lines.append(f"Error: {self.message}")
if self.source_line:
marker = " " * (self.column - 1) + "^"
if self.end_column:
marker += "~" * (self.end_column - self.column - 1)
lines.append(f"\nCode:")
if self.line_before:
lines.append(f" {self.line - 1: >3} | {self.line_before}")
lines.append(f" {self.line: >3} | {self.source_line}")
lines.append(f" | {marker}")
if self.line_after:
lines.append(f" {self.line + 1: >3} | {self.line_after}")
if self.suggestions:
lines.append("\nSuggestions:")
for i, suggestion in enumerate(self.suggestions, 1):
lines.append(f" {i}. {suggestion.message}")
if suggestion.fix:
lines.append(f" Fix: {suggestion.fix}")
lines.append("="*60)
return "\n".join(lines)
@property
def simple_message(self) -> str:
"""Returns just the error message without formatting"""
return f"Line {self.line}: {self.message}"
@dataclass
class WarningDetail:
"""Information about a compilation warning"""
code: str
message: str
line: int
column: int
def to_dict(self) -> dict:
return {
"code": self.code,
"message": self.message,
"line": self.line,
"column": self.column
}
@dataclass
class CompilationResult:
"""Result of C4A-Script compilation"""
success: bool
js_code: Optional[List[str]] = None
errors: List[ErrorDetail] = field(default_factory=list)
warnings: List[WarningDetail] = field(default_factory=list)
metadata: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> dict:
"""Convert to dictionary for JSON serialization"""
return {
"success": self.success,
"jsCode": self.js_code,
"errors": [e.to_dict() for e in self.errors],
"warnings": [w.to_dict() for w in self.warnings],
"metadata": self.metadata
}
def to_json(self) -> str:
"""Convert to JSON string"""
return json.dumps(self.to_dict(), indent=2)
@property
def has_errors(self) -> bool:
"""Check if there are any errors"""
return len(self.errors) > 0
@property
def has_warnings(self) -> bool:
"""Check if there are any warnings"""
return len(self.warnings) > 0
@property
def first_error(self) -> Optional[ErrorDetail]:
"""Get the first error if any"""
return self.errors[0] if self.errors else None
def __str__(self) -> str:
"""String representation for debugging"""
if self.success:
msg = f"✓ Compilation successful"
if self.js_code:
msg += f" - {len(self.js_code)} statements generated"
if self.warnings:
msg += f" ({len(self.warnings)} warnings)"
return msg
else:
return f"✗ Compilation failed - {len(self.errors)} error(s)"
@dataclass
class ValidationResult:
"""Result of script validation"""
valid: bool
errors: List[ErrorDetail] = field(default_factory=list)
warnings: List[WarningDetail] = field(default_factory=list)
def to_dict(self) -> dict:
return {
"valid": self.valid,
"errors": [e.to_dict() for e in self.errors],
"warnings": [w.to_dict() for w in self.warnings]
}
def to_json(self) -> str:
return json.dumps(self.to_dict(), indent=2)
@property
def first_error(self) -> Optional[ErrorDetail]:
return self.errors[0] if self.errors else None

View File

@@ -1,690 +0,0 @@
"""
2025-06-03
By Unclcode:
C4A-Script Language Documentation
Feeds Crawl4AI via CrawlerRunConfig(js_code=[ ... ]) no core modifications.
"""
from __future__ import annotations
import pathlib, re, sys, textwrap
from dataclasses import dataclass
from typing import Any, Dict, List, Union
from lark import Lark, Transformer, v_args
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
# --------------------------------------------------------------------------- #
# Custom Error Classes
# --------------------------------------------------------------------------- #
class C4AScriptError(Exception):
"""Custom error class for C4A-Script compilation errors"""
def __init__(self, message: str, line: int = None, column: int = None,
error_type: str = "Syntax Error", details: str = None):
self.message = message
self.line = line
self.column = column
self.error_type = error_type
self.details = details
super().__init__(self._format_message())
def _format_message(self) -> str:
"""Format a clear error message"""
lines = [f"\n{'='*60}"]
lines.append(f"C4A-Script {self.error_type}")
lines.append(f"{'='*60}")
if self.line:
lines.append(f"Location: Line {self.line}" + (f", Column {self.column}" if self.column else ""))
lines.append(f"Error: {self.message}")
if self.details:
lines.append(f"\nDetails: {self.details}")
lines.append("="*60)
return "\n".join(lines)
@classmethod
def from_exception(cls, exc: Exception, script: Union[str, List[str]]) -> 'C4AScriptError':
"""Create C4AScriptError from another exception"""
script_text = script if isinstance(script, str) else '\n'.join(script)
script_lines = script_text.split('\n')
if isinstance(exc, UnexpectedToken):
# Extract line and column from UnexpectedToken
line = exc.line
column = exc.column
# Get the problematic line
if 0 < line <= len(script_lines):
problem_line = script_lines[line - 1]
marker = " " * (column - 1) + "^"
details = f"\nCode:\n {problem_line}\n {marker}\n"
# Improve error message based on context
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
message = "Missing 'THEN' keyword after IF condition"
elif exc.token.type == '$END':
message = "Unexpected end of script. Check for missing ENDPROC or incomplete commands"
elif 'RPAR' in str(exc.expected):
message = "Missing closing parenthesis ')'"
elif 'COMMA' in str(exc.expected):
message = "Missing comma ',' in command"
else:
message = f"Unexpected '{exc.token}'"
if exc.expected:
expected_list = [str(e) for e in exc.expected if not e.startswith('_')]
if expected_list:
message += f". Expected: {', '.join(expected_list[:3])}"
details += f"Token: {exc.token.type} ('{exc.token.value}')"
else:
message = str(exc)
details = None
return cls(message, line, column, "Syntax Error", details)
elif isinstance(exc, UnexpectedCharacters):
# Extract line and column
line = exc.line
column = exc.column
if 0 < line <= len(script_lines):
problem_line = script_lines[line - 1]
marker = " " * (column - 1) + "^"
details = f"\nCode:\n {problem_line}\n {marker}\n"
message = f"Invalid character or unexpected text at position {column}"
else:
message = str(exc)
details = None
return cls(message, line, column, "Syntax Error", details)
elif isinstance(exc, ValueError):
# Handle runtime errors like undefined procedures
message = str(exc)
# Try to find which line caused the error
if "Unknown procedure" in message:
proc_name = re.search(r"'([^']+)'", message)
if proc_name:
proc_name = proc_name.group(1)
for i, line in enumerate(script_lines, 1):
if proc_name in line and not line.strip().startswith('PROC'):
details = f"\nCode:\n {line.strip()}\n\nMake sure the procedure '{proc_name}' is defined with PROC...ENDPROC"
return cls(f"Undefined procedure '{proc_name}'", i, None, "Runtime Error", details)
return cls(message, None, None, "Runtime Error", None)
else:
# Generic error
return cls(str(exc), None, None, "Compilation Error", None)
# --------------------------------------------------------------------------- #
# 1. Grammar
# --------------------------------------------------------------------------- #
GRAMMAR = r"""
start : line*
?line : command | proc_def | include | comment
command : wait | nav | click_cmd | double_click | right_click | move | drag | scroll
| type | clear | set_input | press | key_down | key_up
| eval_cmd | setvar | proc_call | if_cmd | repeat_cmd
wait : "WAIT" (ESCAPED_STRING|BACKTICK_STRING|NUMBER) NUMBER? -> wait_cmd
nav : "GO" URL -> go
| "RELOAD" -> reload
| "BACK" -> back
| "FORWARD" -> forward
click_cmd : "CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> click
double_click : "DOUBLE_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> double_click
right_click : "RIGHT_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> right_click
move : "MOVE" coords -> move
drag : "DRAG" coords coords -> drag
scroll : "SCROLL" DIR NUMBER? -> scroll
type : "TYPE" (ESCAPED_STRING | NAME) -> type
clear : "CLEAR" BACKTICK_STRING -> clear
set_input : "SET" BACKTICK_STRING (ESCAPED_STRING | BACKTICK_STRING | NAME) -> set_input
press : "PRESS" WORD -> press
key_down : "KEY_DOWN" WORD -> key_down
key_up : "KEY_UP" WORD -> key_up
eval_cmd : "EVAL" BACKTICK_STRING -> eval_cmd
setvar : "SETVAR" NAME "=" value -> setvar
proc_call : NAME -> proc_call
proc_def : "PROC" NAME line* "ENDPROC" -> proc_def
include : "USE" ESCAPED_STRING -> include
comment : /#.*/ -> comment
if_cmd : "IF" "(" condition ")" "THEN" command ("ELSE" command)? -> if_cmd
repeat_cmd : "REPEAT" "(" command "," repeat_count ")" -> repeat_cmd
condition : not_cond | exists_cond | js_cond
not_cond : "NOT" condition -> not_cond
exists_cond : "EXISTS" BACKTICK_STRING -> exists_cond
js_cond : BACKTICK_STRING -> js_cond
repeat_count : NUMBER | BACKTICK_STRING
coords : NUMBER NUMBER
value : ESCAPED_STRING | BACKTICK_STRING | NUMBER
DIR : /(UP|DOWN|LEFT|RIGHT)/i
REST : /[^\n]+/
URL : /(http|https):\/\/[^\s]+/
NAME : /\$?[A-Za-z_][A-Za-z0-9_]*/
WORD : /[A-Za-z0-9+]+/
BACKTICK_STRING : /`[^`]*`/
%import common.NUMBER
%import common.ESCAPED_STRING
%import common.WS_INLINE
%import common.NEWLINE
%ignore WS_INLINE
%ignore NEWLINE
"""
# --------------------------------------------------------------------------- #
# 2. IR dataclasses
# --------------------------------------------------------------------------- #
@dataclass
class Cmd:
op: str
args: List[Any]
@dataclass
class Proc:
name: str
body: List[Cmd]
# --------------------------------------------------------------------------- #
# 3. AST → IR
# --------------------------------------------------------------------------- #
@v_args(inline=True)
class ASTBuilder(Transformer):
# helpers
def _strip(self, s):
if s.startswith('"') and s.endswith('"'):
return s[1:-1]
elif s.startswith('`') and s.endswith('`'):
return s[1:-1]
return s
def start(self,*i): return list(i)
def line(self,i): return i
def command(self,i): return i
# WAIT
def wait_cmd(self, rest, timeout=None):
rest_str = str(rest)
# Check if it's a number (including floats)
try:
num_val = float(rest_str)
payload = (num_val, "seconds")
except ValueError:
if rest_str.startswith('"') and rest_str.endswith('"'):
payload = (self._strip(rest_str), "text")
elif rest_str.startswith('`') and rest_str.endswith('`'):
payload = (self._strip(rest_str), "selector")
else:
payload = (rest_str, "selector")
return Cmd("WAIT", [payload, int(timeout) if timeout else None])
# NAV
def go(self,u): return Cmd("GO",[str(u)])
def reload(self): return Cmd("RELOAD",[])
def back(self): return Cmd("BACK",[])
def forward(self): return Cmd("FORWARD",[])
# CLICK, DOUBLE_CLICK, RIGHT_CLICK
def click(self, *args):
return self._handle_click("CLICK", args)
def double_click(self, *args):
return self._handle_click("DBLCLICK", args)
def right_click(self, *args):
return self._handle_click("RIGHTCLICK", args)
def _handle_click(self, op, args):
if len(args) == 1:
# Single argument - backtick string
target = self._strip(str(args[0]))
return Cmd(op, [("selector", target)])
else:
# Two arguments - coordinates
x, y = args
return Cmd(op, [("coords", int(x), int(y))])
# MOVE / DRAG / SCROLL
def coords(self,x,y): return ("coords",int(x),int(y))
def move(self,c): return Cmd("MOVE",[c])
def drag(self,c1,c2): return Cmd("DRAG",[c1,c2])
def scroll(self,dir_tok,amt=None):
return Cmd("SCROLL",[dir_tok.upper(), int(amt) if amt else 500])
# KEYS
def type(self,tok): return Cmd("TYPE",[self._strip(str(tok))])
def clear(self,sel): return Cmd("CLEAR",[self._strip(str(sel))])
def set_input(self,sel,val): return Cmd("SET",[self._strip(str(sel)), self._strip(str(val))])
def press(self,w): return Cmd("PRESS",[str(w)])
def key_down(self,w): return Cmd("KEYDOWN",[str(w)])
def key_up(self,w): return Cmd("KEYUP",[str(w)])
# FLOW
def eval_cmd(self,txt): return Cmd("EVAL",[self._strip(str(txt))])
def setvar(self,n,v):
# v might be a Token or a Tree, extract value properly
if hasattr(v, 'value'):
value = v.value
elif hasattr(v, 'children') and len(v.children) > 0:
value = v.children[0].value
else:
value = str(v)
return Cmd("SETVAR",[str(n), self._strip(value)])
def proc_call(self,n): return Cmd("CALL",[str(n)])
def proc_def(self,n,*body): return Proc(str(n),[b for b in body if isinstance(b,Cmd)])
def include(self,p): return Cmd("INCLUDE",[self._strip(p)])
def comment(self,*_): return Cmd("NOP",[])
# IF-THEN-ELSE and EXISTS
def if_cmd(self, condition, then_cmd, else_cmd=None):
return Cmd("IF", [condition, then_cmd, else_cmd])
def condition(self, cond):
return cond
def not_cond(self, cond):
return ("NOT", cond)
def exists_cond(self, selector):
return ("EXISTS", self._strip(str(selector)))
def js_cond(self, expr):
return ("JS", self._strip(str(expr)))
# REPEAT
def repeat_cmd(self, cmd, count):
return Cmd("REPEAT", [cmd, count])
def repeat_count(self, value):
return str(value)
# --------------------------------------------------------------------------- #
# 4. Compiler
# --------------------------------------------------------------------------- #
class Compiler:
def __init__(self, root: pathlib.Path|None=None):
self.parser = Lark(GRAMMAR,start="start",parser="lalr")
self.root = pathlib.Path(root or ".").resolve()
self.vars: Dict[str,Any] = {}
self.procs: Dict[str,Proc]= {}
def compile(self, text: Union[str, List[str]]) -> List[str]:
# Handle list input by joining with newlines
if isinstance(text, list):
text = '\n'.join(text)
ir = self._parse_with_includes(text)
ir = self._collect_procs(ir)
ir = self._inline_calls(ir)
ir = self._apply_set_vars(ir)
return [self._emit_js(c) for c in ir if isinstance(c,Cmd) and c.op!="NOP"]
# passes
def _parse_with_includes(self,txt,seen=None):
seen=seen or set()
cmds=ASTBuilder().transform(self.parser.parse(txt))
out=[]
for c in cmds:
if isinstance(c,Cmd) and c.op=="INCLUDE":
p=(self.root/c.args[0]).resolve()
if p in seen: raise ValueError(f"Circular include {p}")
seen.add(p); out+=self._parse_with_includes(p.read_text(),seen)
else: out.append(c)
return out
def _collect_procs(self,ir):
out=[]
for i in ir:
if isinstance(i,Proc): self.procs[i.name]=i
else: out.append(i)
return out
def _inline_calls(self,ir):
out=[]
for c in ir:
if isinstance(c,Cmd) and c.op=="CALL":
if c.args[0] not in self.procs:
raise ValueError(f"Unknown procedure {c.args[0]!r}")
out+=self._inline_calls(self.procs[c.args[0]].body)
else: out.append(c)
return out
def _apply_set_vars(self,ir):
def sub(s): return re.sub(r"\$(\w+)",lambda m:str(self.vars.get(m.group(1),m.group(0))) ,s) if isinstance(s,str) else s
out=[]
for c in ir:
if isinstance(c,Cmd):
if c.op=="SETVAR":
# Store variable
self.vars[c.args[0].lstrip('$')]=c.args[1]
else:
# Apply variable substitution to commands that use them
if c.op in("TYPE","EVAL","SET"): c.args=[sub(a) for a in c.args]
out.append(c)
return out
# JS emitter
def _emit_js(self, cmd: Cmd) -> str:
op, a = cmd.op, cmd.args
if op == "GO": return f"window.location.href = '{a[0]}';"
if op == "RELOAD": return "window.location.reload();"
if op == "BACK": return "window.history.back();"
if op == "FORWARD": return "window.history.forward();"
if op == "WAIT":
arg, kind = a[0]
timeout = a[1] or 10
if kind == "seconds":
return f"await new Promise(r=>setTimeout(r,{arg}*1000));"
if kind == "selector":
sel = arg.replace("\\","\\\\").replace("'","\\'")
return textwrap.dedent(f"""
await new Promise((res,rej)=>{{
const max = {timeout*1000}, t0 = performance.now();
const id = setInterval(()=>{{
if(document.querySelector('{sel}')){{clearInterval(id);res();}}
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT selector timeout');}}
}},100);
}});
""").strip()
if kind == "text":
txt = arg.replace('`', '\\`')
return textwrap.dedent(f"""
await new Promise((res,rej)=>{{
const max={timeout*1000},t0=performance.now();
const id=setInterval(()=>{{
if(document.body.innerText.includes(`{txt}`)){{clearInterval(id);res();}}
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT text timeout');}}
}},100);
}});
""").strip()
# click-style helpers
def _js_click(sel, evt="click", button=0, detail=1):
sel = sel.replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.querySelector('{sel}');
if(el){{
el.focus&&el.focus();
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
}}
}})();
""").strip()
def _js_click_xy(x, y, evt="click", button=0, detail=1):
return textwrap.dedent(f"""
(()=>{{
const el=document.elementFromPoint({x},{y});
if(el){{
el.focus&&el.focus();
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
}}
}})();
""").strip()
if op in ("CLICK", "DBLCLICK", "RIGHTCLICK"):
evt = {"CLICK":"click","DBLCLICK":"dblclick","RIGHTCLICK":"contextmenu"}[op]
btn = 2 if op=="RIGHTCLICK" else 0
det = 2 if op=="DBLCLICK" else 1
kind,*rest = a[0]
return _js_click_xy(*rest) if kind=="coords" else _js_click(rest[0],evt,btn,det)
if op == "MOVE":
_, x, y = a[0]
return textwrap.dedent(f"""
document.dispatchEvent(new MouseEvent('mousemove',{{clientX:{x},clientY:{y},bubbles:true}}));
""").strip()
if op == "DRAG":
(_, x1, y1), (_, x2, y2) = a
return textwrap.dedent(f"""
(()=>{{
const s=document.elementFromPoint({x1},{y1});
if(!s) return;
s.dispatchEvent(new MouseEvent('mousedown',{{bubbles:true,clientX:{x1},clientY:{y1}}}));
document.dispatchEvent(new MouseEvent('mousemove',{{bubbles:true,clientX:{x2},clientY:{y2}}}));
document.dispatchEvent(new MouseEvent('mouseup', {{bubbles:true,clientX:{x2},clientY:{y2}}}));
}})();
""").strip()
if op == "SCROLL":
dir_, amt = a
dx, dy = {"UP":(0,-amt),"DOWN":(0,amt),"LEFT":(-amt,0),"RIGHT":(amt,0)}[dir_]
return f"window.scrollBy({dx},{dy});"
if op == "TYPE":
txt = a[0].replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.activeElement;
if(el){{
el.value += '{txt}';
el.dispatchEvent(new Event('input',{{bubbles:true}}));
}}
}})();
""").strip()
if op == "CLEAR":
sel = a[0].replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.querySelector('{sel}');
if(el && 'value' in el){{
el.value = '';
el.dispatchEvent(new Event('input',{{bubbles:true}}));
el.dispatchEvent(new Event('change',{{bubbles:true}}));
}}
}})();
""").strip()
if op == "SET" and len(a) == 2:
# This is SET for input fields (SET `#field` "value")
sel = a[0].replace("'", "\\'")
val = a[1].replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.querySelector('{sel}');
if(el && 'value' in el){{
el.value = '';
el.focus&&el.focus();
el.value = '{val}';
el.dispatchEvent(new Event('input',{{bubbles:true}}));
el.dispatchEvent(new Event('change',{{bubbles:true}}));
}}
}})();
""").strip()
if op in ("PRESS","KEYDOWN","KEYUP"):
key = a[0]
evs = {"PRESS":("keydown","keyup"),"KEYDOWN":("keydown",),"KEYUP":("keyup",)}[op]
return ";".join([f"document.dispatchEvent(new KeyboardEvent('{e}',{{key:'{key}',bubbles:true}}))" for e in evs]) + ";"
if op == "EVAL":
return textwrap.dedent(f"""
(()=>{{
try {{
{a[0]};
}} catch (e) {{
console.error('C4A-Script EVAL error:', e);
}}
}})();
""").strip()
if op == "IF":
condition, then_cmd, else_cmd = a
# Generate condition JavaScript
js_condition = self._emit_condition(condition)
# Generate commands - handle both regular commands and procedure calls
then_js = self._handle_cmd_or_proc(then_cmd)
else_js = self._handle_cmd_or_proc(else_cmd) if else_cmd else ""
if else_cmd:
return textwrap.dedent(f"""
if ({js_condition}) {{
{then_js}
}} else {{
{else_js}
}}
""").strip()
else:
return textwrap.dedent(f"""
if ({js_condition}) {{
{then_js}
}}
""").strip()
if op == "REPEAT":
cmd, count = a
# Handle the count - could be number or JS expression
if count.isdigit():
# Simple number
repeat_js = self._handle_cmd_or_proc(cmd)
return textwrap.dedent(f"""
for (let _i = 0; _i < {count}; _i++) {{
{repeat_js}
}}
""").strip()
else:
# JS expression (from backticks)
count_expr = count[1:-1] if count.startswith('`') and count.endswith('`') else count
repeat_js = self._handle_cmd_or_proc(cmd)
return textwrap.dedent(f"""
(()=>{{
const _count = {count_expr};
if (typeof _count === 'number') {{
for (let _i = 0; _i < _count; _i++) {{
{repeat_js}
}}
}} else if (_count) {{
{repeat_js}
}}
}})();
""").strip()
raise ValueError(f"Unhandled op {op}")
def _emit_condition(self, condition):
"""Convert a condition tuple to JavaScript"""
cond_type = condition[0]
if cond_type == "EXISTS":
return f"!!document.querySelector('{condition[1]}')"
elif cond_type == "NOT":
# Recursively handle the negated condition
inner_condition = self._emit_condition(condition[1])
return f"!({inner_condition})"
else: # JS condition
return condition[1]
def _handle_cmd_or_proc(self, cmd):
"""Handle a command that might be a regular command or a procedure call"""
if not cmd:
return ""
if isinstance(cmd, Cmd):
if cmd.op == "CALL":
# Inline the procedure
if cmd.args[0] not in self.procs:
raise ValueError(f"Unknown procedure {cmd.args[0]!r}")
proc_body = self.procs[cmd.args[0]].body
return "\n".join([self._emit_js(c) for c in proc_body if c.op != "NOP"])
else:
return self._emit_js(cmd)
return ""
# --------------------------------------------------------------------------- #
# 5. Helpers + demo
# --------------------------------------------------------------------------- #
def compile_string(script: Union[str, List[str]], *, root: Union[pathlib.Path, None] = None) -> List[str]:
"""Compile C4A-Script from string or list of strings to JavaScript.
Args:
script: C4A-Script as a string or list of command strings
root: Root directory for resolving includes (optional)
Returns:
List of JavaScript command strings
Raises:
C4AScriptError: When compilation fails with detailed error information
"""
try:
return Compiler(root).compile(script)
except Exception as e:
# Wrap the error with better formatting
raise C4AScriptError.from_exception(e, script)
def compile_file(path: pathlib.Path) -> List[str]:
"""Compile C4A-Script from file to JavaScript.
Args:
path: Path to C4A-Script file
Returns:
List of JavaScript command strings
"""
return compile_string(path.read_text(), root=path.parent)
def compile_lines(lines: List[str], *, root: Union[pathlib.Path, None] = None) -> List[str]:
"""Compile C4A-Script from list of lines to JavaScript.
Args:
lines: List of C4A-Script command lines
root: Root directory for resolving includes (optional)
Returns:
List of JavaScript command strings
"""
return compile_string(lines, root=root)
DEMO = """
# quick sanity demo
PROC login
SET `input[name="username"]` $user
SET `input[name="password"]` $pass
CLICK `button.submit`
ENDPROC
SETVAR user = "tom@crawl4ai.com"
SETVAR pass = "hunter2"
GO https://example.com/login
WAIT `input[name="username"]` 10
login
WAIT 3
EVAL `console.log('logged in')`
"""
if __name__ == "__main__":
if len(sys.argv) == 2:
for js in compile_file(pathlib.Path(sys.argv[1])):
print(js)
else:
print("=== DEMO ===")
for js in compile_string(DEMO):
print(js)

View File

@@ -1,204 +0,0 @@
"""SSL Certificate class for handling certificate operations."""
import ssl
import socket
import base64
import json
from typing import Dict, Any, Optional
from urllib.parse import urlparse
import OpenSSL.crypto
from pathlib import Path
# === Inherit from dict ===
class SSLCertificate(dict):
"""
A class representing an SSL certificate, behaving like a dictionary
for direct JSON serialization. It stores the certificate information internally
and provides methods for export and property access.
Inherits from dict, so instances are directly JSON serializable.
"""
# Use __slots__ for potential memory optimization if desired, though less common when inheriting dict
# __slots__ = ("_cert_info",) # If using slots, be careful with dict inheritance interaction
def __init__(self, cert_info: Dict[str, Any]):
"""
Initializes the SSLCertificate object.
Args:
cert_info (Dict[str, Any]): The raw certificate dictionary.
"""
# 1. Decode the data (handle bytes -> str)
decoded_info = self._decode_cert_data(cert_info)
# 2. Store the decoded info internally (optional but good practice)
# self._cert_info = decoded_info # You can keep this if methods rely on it
# 3. Initialize the dictionary part of the object with the decoded data
super().__init__(decoded_info)
@staticmethod
def _decode_cert_data(data: Any) -> Any:
"""Helper method to decode bytes in certificate data."""
if isinstance(data, bytes):
try:
# Try UTF-8 first, fallback to latin-1 for arbitrary bytes
return data.decode("utf-8")
except UnicodeDecodeError:
return data.decode("latin-1") # Or handle as needed, maybe hex representation
elif isinstance(data, dict):
return {
(
k.decode("utf-8") if isinstance(k, bytes) else k
): SSLCertificate._decode_cert_data(v)
for k, v in data.items()
}
elif isinstance(data, list):
return [SSLCertificate._decode_cert_data(item) for item in data]
return data
@staticmethod
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
"""
Create SSLCertificate instance from a URL. Fetches cert info and initializes.
(Fetching logic remains the same)
"""
cert_info_raw = None # Variable to hold the fetched dict
try:
hostname = urlparse(url).netloc
if ":" in hostname:
hostname = hostname.split(":")[0]
context = ssl.create_default_context()
# Set check_hostname to False and verify_mode to CERT_NONE temporarily
# for potentially problematic certificates during fetch, but parse the result regardless.
# context.check_hostname = False
# context.verify_mode = ssl.CERT_NONE
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
cert_binary = ssock.getpeercert(binary_form=True)
if not cert_binary:
print(f"Warning: No certificate returned for {hostname}")
return None
x509 = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
)
# Create the dictionary directly
cert_info_raw = {
"subject": dict(x509.get_subject().get_components()),
"issuer": dict(x509.get_issuer().get_components()),
"version": x509.get_version(),
"serial_number": hex(x509.get_serial_number()),
"not_before": x509.get_notBefore(), # Keep as bytes initially, _decode handles it
"not_after": x509.get_notAfter(), # Keep as bytes initially
"fingerprint": x509.digest("sha256").hex(), # hex() is already string
"signature_algorithm": x509.get_signature_algorithm(), # Keep as bytes
"raw_cert": base64.b64encode(cert_binary), # Base64 is bytes, _decode handles it
}
# Add extensions
extensions = []
for i in range(x509.get_extension_count()):
ext = x509.get_extension(i)
# get_short_name() returns bytes, str(ext) handles value conversion
extensions.append(
{"name": ext.get_short_name(), "value": str(ext)}
)
cert_info_raw["extensions"] = extensions
except ssl.SSLCertVerificationError as e:
print(f"SSL Verification Error for {url}: {e}")
# Decide if you want to proceed or return None based on your needs
# You might try fetching without verification here if needed, but be cautious.
return None
except socket.gaierror:
print(f"Could not resolve hostname: {hostname}")
return None
except socket.timeout:
print(f"Connection timed out for {url}")
return None
except Exception as e:
print(f"Error fetching/processing certificate for {url}: {e}")
# Log the full error details if needed: logging.exception("Cert fetch error")
return None
# If successful, create the SSLCertificate instance from the dictionary
if cert_info_raw:
return SSLCertificate(cert_info_raw)
else:
return None
# --- Properties now access the dictionary items directly via self[] ---
@property
def issuer(self) -> Dict[str, str]:
return self.get("issuer", {}) # Use self.get for safety
@property
def subject(self) -> Dict[str, str]:
return self.get("subject", {})
@property
def valid_from(self) -> str:
return self.get("not_before", "")
@property
def valid_until(self) -> str:
return self.get("not_after", "")
@property
def fingerprint(self) -> str:
return self.get("fingerprint", "")
# --- Export methods can use `self` directly as it is the dict ---
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
"""Export certificate as JSON."""
# `self` is already the dictionary we want to serialize
json_str = json.dumps(self, indent=2, ensure_ascii=False)
if filepath:
Path(filepath).write_text(json_str, encoding="utf-8")
return None
return json_str
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
"""Export certificate as PEM."""
try:
# Decode the raw_cert (which should be string due to _decode)
raw_cert_bytes = base64.b64decode(self.get("raw_cert", ""))
x509 = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, raw_cert_bytes
)
pem_data = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, x509
).decode("utf-8")
if filepath:
Path(filepath).write_text(pem_data, encoding="utf-8")
return None
return pem_data
except Exception as e:
print(f"Error converting to PEM: {e}")
return None
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
"""Export certificate as DER."""
try:
# Decode the raw_cert (which should be string due to _decode)
der_data = base64.b64decode(self.get("raw_cert", ""))
if filepath:
Path(filepath).write_bytes(der_data)
return None
return der_data
except Exception as e:
print(f"Error converting to DER: {e}")
return None
# Optional: Add __repr__ for better debugging
def __repr__(self) -> str:
subject_cn = self.subject.get('CN', 'N/A')
issuer_cn = self.issuer.get('CN', 'N/A')
return f"<SSLCertificate Subject='{subject_cn}' Issuer='{issuer_cn}'>"

File diff suppressed because it is too large Load Diff

View File

@@ -1,195 +0,0 @@
from typing import TYPE_CHECKING, Union
# Logger types
AsyncLoggerBase = Union['AsyncLoggerBaseType']
AsyncLogger = Union['AsyncLoggerType']
# Crawler core types
AsyncWebCrawler = Union['AsyncWebCrawlerType']
CacheMode = Union['CacheModeType']
CrawlResult = Union['CrawlResultType']
CrawlerHub = Union['CrawlerHubType']
BrowserProfiler = Union['BrowserProfilerType']
# NEW: Add AsyncUrlSeederType
AsyncUrlSeeder = Union['AsyncUrlSeederType']
# Configuration types
BrowserConfig = Union['BrowserConfigType']
CrawlerRunConfig = Union['CrawlerRunConfigType']
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
LLMConfig = Union['LLMConfigType']
# NEW: Add SeedingConfigType
SeedingConfig = Union['SeedingConfigType']
# Content scraping types
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
# Backward compatibility alias
WebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
# Proxy types
ProxyRotationStrategy = Union['ProxyRotationStrategyType']
RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType']
# Extraction types
ExtractionStrategy = Union['ExtractionStrategyType']
LLMExtractionStrategy = Union['LLMExtractionStrategyType']
CosineStrategy = Union['CosineStrategyType']
JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType']
JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType']
# Chunking types
ChunkingStrategy = Union['ChunkingStrategyType']
RegexChunking = Union['RegexChunkingType']
# Markdown generation types
DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType']
MarkdownGenerationResult = Union['MarkdownGenerationResultType']
# Content filter types
RelevantContentFilter = Union['RelevantContentFilterType']
PruningContentFilter = Union['PruningContentFilterType']
BM25ContentFilter = Union['BM25ContentFilterType']
LLMContentFilter = Union['LLMContentFilterType']
# Dispatcher types
BaseDispatcher = Union['BaseDispatcherType']
MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType']
SemaphoreDispatcher = Union['SemaphoreDispatcherType']
RateLimiter = Union['RateLimiterType']
CrawlerMonitor = Union['CrawlerMonitorType']
DisplayMode = Union['DisplayModeType']
RunManyReturn = Union['RunManyReturnType']
# Docker client
Crawl4aiDockerClient = Union['Crawl4aiDockerClientType']
# Deep crawling types
DeepCrawlStrategy = Union['DeepCrawlStrategyType']
BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType']
FilterChain = Union['FilterChainType']
ContentTypeFilter = Union['ContentTypeFilterType']
DomainFilter = Union['DomainFilterType']
URLFilter = Union['URLFilterType']
FilterStats = Union['FilterStatsType']
SEOFilter = Union['SEOFilterType']
KeywordRelevanceScorer = Union['KeywordRelevanceScorerType']
URLScorer = Union['URLScorerType']
CompositeScorer = Union['CompositeScorerType']
DomainAuthorityScorer = Union['DomainAuthorityScorerType']
FreshnessScorer = Union['FreshnessScorerType']
PathDepthScorer = Union['PathDepthScorerType']
BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType']
DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType']
DeepCrawlDecorator = Union['DeepCrawlDecoratorType']
# Only import types during type checking to avoid circular imports
if TYPE_CHECKING:
# Logger imports
from .async_logger import (
AsyncLoggerBase as AsyncLoggerBaseType,
AsyncLogger as AsyncLoggerType,
)
# Crawler core imports
from .async_webcrawler import (
AsyncWebCrawler as AsyncWebCrawlerType,
CacheMode as CacheModeType,
)
from .models import CrawlResult as CrawlResultType
from .hub import CrawlerHub as CrawlerHubType
from .browser_profiler import BrowserProfiler as BrowserProfilerType
# NEW: Import AsyncUrlSeeder for type checking
from .async_url_seeder import AsyncUrlSeeder as AsyncUrlSeederType
# Configuration imports
from .async_configs import (
BrowserConfig as BrowserConfigType,
CrawlerRunConfig as CrawlerRunConfigType,
HTTPCrawlerConfig as HTTPCrawlerConfigType,
LLMConfig as LLMConfigType,
# NEW: Import SeedingConfig for type checking
SeedingConfig as SeedingConfigType,
)
# Content scraping imports
from .content_scraping_strategy import (
ContentScrapingStrategy as ContentScrapingStrategyType,
LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType,
)
# Proxy imports
from .proxy_strategy import (
ProxyRotationStrategy as ProxyRotationStrategyType,
RoundRobinProxyStrategy as RoundRobinProxyStrategyType,
)
# Extraction imports
from .extraction_strategy import (
ExtractionStrategy as ExtractionStrategyType,
LLMExtractionStrategy as LLMExtractionStrategyType,
CosineStrategy as CosineStrategyType,
JsonCssExtractionStrategy as JsonCssExtractionStrategyType,
JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType,
)
# Chunking imports
from .chunking_strategy import (
ChunkingStrategy as ChunkingStrategyType,
RegexChunking as RegexChunkingType,
)
# Markdown generation imports
from .markdown_generation_strategy import (
DefaultMarkdownGenerator as DefaultMarkdownGeneratorType,
)
from .models import MarkdownGenerationResult as MarkdownGenerationResultType
# Content filter imports
from .content_filter_strategy import (
RelevantContentFilter as RelevantContentFilterType,
PruningContentFilter as PruningContentFilterType,
BM25ContentFilter as BM25ContentFilterType,
LLMContentFilter as LLMContentFilterType,
)
# Dispatcher imports
from .async_dispatcher import (
BaseDispatcher as BaseDispatcherType,
MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType,
SemaphoreDispatcher as SemaphoreDispatcherType,
RateLimiter as RateLimiterType,
CrawlerMonitor as CrawlerMonitorType,
DisplayMode as DisplayModeType,
RunManyReturn as RunManyReturnType,
)
# Docker client
from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType
# Deep crawling imports
from .deep_crawling import (
DeepCrawlStrategy as DeepCrawlStrategyType,
BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType,
FilterChain as FilterChainType,
ContentTypeFilter as ContentTypeFilterType,
DomainFilter as DomainFilterType,
URLFilter as URLFilterType,
FilterStats as FilterStatsType,
SEOFilter as SEOFilterType,
KeywordRelevanceScorer as KeywordRelevanceScorerType,
URLScorer as URLScorerType,
CompositeScorer as CompositeScorerType,
DomainAuthorityScorer as DomainAuthorityScorerType,
FreshnessScorer as FreshnessScorerType,
PathDepthScorer as PathDepthScorerType,
BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType,
DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType,
DeepCrawlDecorator as DeepCrawlDecoratorType,
)
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
from .async_configs import LLMConfig
return LLMConfig(*args, **kwargs)

Some files were not shown because too many files have changed in this diff Show More