Compare commits

..

1 Commits

Author SHA1 Message Date
UncleCode
0f00821df5 Fix version 2025-01-26 18:08:24 +08:00
548 changed files with 8004 additions and 194329 deletions

View File

@@ -1,28 +0,0 @@
{
"permissions": {
"allow": [
"Bash(cd:*)",
"Bash(python3:*)",
"Bash(python:*)",
"Bash(grep:*)",
"Bash(mkdir:*)",
"Bash(cp:*)",
"Bash(rm:*)",
"Bash(true)",
"Bash(./package-extension.sh:*)",
"Bash(find:*)",
"Bash(chmod:*)",
"Bash(rg:*)",
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 5 -B 5 \"Script Builder\" docs/md_v2/apps/crawl4ai-assistant/)",
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 30 \"generateCode\\(events, format\\)\" docs/md_v2/apps/crawl4ai-assistant/content/content.js)",
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg \"<style>\" docs/md_v2/apps/crawl4ai-assistant/index.html -A 5)",
"Bash(git checkout:*)",
"Bash(docker logs:*)",
"Bash(curl:*)",
"Bash(docker compose:*)",
"Bash(./test-final-integration.sh:*)",
"Bash(mv:*)"
]
},
"enableAllProjectMcpServers": false
}

View File

@@ -1,59 +0,0 @@
title: "[Feature Request]: "
labels: ["⚙️ New"]
body:
- type: markdown
attributes:
value: |
Thank you for your interest in suggesting a new feature! Before you submit, please take a moment to check if already exists in
this discussions category to avoid duplicates. 😊
- type: textarea
id: needs_to_be_done
attributes:
label: What needs to be done?
description: Please describe the feature or functionality you'd like to see.
placeholder: "e.g., Return alt text along with images scraped from a webpages in Result"
validations:
required: true
- type: textarea
id: problem_to_solve
attributes:
label: What problem does this solve?
description: Explain the pain point or issue this feature will help address.
placeholder: "e.g., Bypass Captchas added by cloudflare"
validations:
required: true
- type: textarea
id: target_users
attributes:
label: Target users/beneficiaries
description: Who would benefit from this feature? (e.g., specific teams, developers, users, etc.)
placeholder: "e.g., Marketing teams, developers"
validations:
required: false
- type: textarea
id: current_workarounds
attributes:
label: Current alternatives/workarounds
description: Are there any existing solutions or workarounds? How does this feature improve upon them?
placeholder: "e.g., Users manually select the css classes mapped to data fields to extract them"
validations:
required: false
- type: markdown
attributes:
value: |
### 💡 Implementation Ideas
- type: textarea
id: proposed_approach
attributes:
label: Proposed approach
description: Share any ideas you have for how this feature could be implemented. Point out any challenges your foresee
and the success metrics for this feature
placeholder: "e.g., Implement a breadth first traversal algorithm for scraper"
validations:
required: false

7
.github/FUNDING.yml vendored
View File

@@ -1,7 +0,0 @@
# These are supported funding model platforms
# GitHub Sponsors
github: unclecode
# Custom links for enterprise inquiries (uncomment when ready)
# custom: ["https://crawl4ai.com/enterprise"]

View File

@@ -1,127 +0,0 @@
name: Bug Report
description: Report a bug with the Crawl4AI.
title: "[Bug]: "
labels: ["🐞 Bug","🩺 Needs Triage"]
body:
- type: input
id: crawl4ai_version
attributes:
label: crawl4ai version
description: Specify the version of crawl4ai you are using.
placeholder: "e.g., 2.0.0"
validations:
required: true
- type: textarea
id: expected_behavior
attributes:
label: Expected Behavior
description: Describe what you expected to happen.
placeholder: "Provide a detailed explanation of the expected outcome."
validations:
required: true
- type: textarea
id: current_behavior
attributes:
label: Current Behavior
description: Describe what is happening instead of the expected behavior.
placeholder: "Describe the actual result or issue you encountered."
validations:
required: true
- type: dropdown
id: reproducible
attributes:
label: Is this reproducible?
description: Indicate whether this bug can be reproduced consistently.
options:
- "Yes"
- "No"
validations:
required: true
- type: textarea
id: inputs
attributes:
label: Inputs Causing the Bug
description: Provide details about the inputs causing the issue.
placeholder: |
- URL(s):
- Settings used:
- Input data (if applicable):
render: bash
- type: textarea
id: steps_to_reproduce
attributes:
label: Steps to Reproduce
description: Provide step-by-step instructions to reproduce the issue.
placeholder: |
1. Go to...
2. Click on...
3. Observe the issue...
render: bash
- type: textarea
id: code_snippets
attributes:
label: Code snippets
description: Provide code snippets(if any). Add comments as necessary
placeholder: print("Hello world")
render: python
# Header Section with Title
- type: markdown
attributes:
value: |
## Supporting Information
Please provide the following details to help us understand and resolve your issue. This will assist us in reproducing and diagnosing the problem
- type: input
id: os
attributes:
label: OS
description: Please provide the operating system & distro where the issue occurs.
placeholder: "e.g., Windows, macOS, Linux"
validations:
required: true
- type: input
id: python_version
attributes:
label: Python version
description: Specify the Python version being used.
placeholder: "e.g., 3.8.5"
validations:
required: true
# Browser Field
- type: input
id: browser
attributes:
label: Browser
description: Provide the name of the browser you are using.
placeholder: "e.g., Chrome, Firefox, Safari"
validations:
required: false
# Browser Version Field
- type: input
id: browser_version
attributes:
label: Browser version
description: Provide the version of the browser you are using.
placeholder: "e.g., 91.0.4472.124"
validations:
required: false
# Error Logs Field (Text Area)
- type: textarea
id: error_logs
attributes:
label: Error logs & Screenshots (if applicable)
description: If you encountered any errors, please provide the error logs. Attach any relevant screenshots to help us understand the issue.
placeholder: "Paste error logs here and attach your screenshots"
validations:
required: false

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Feature Requests
url: https://github.com/unclecode/crawl4ai/discussions/categories/feature-requests
about: "Suggest new features or enhancements for Crawl4AI"
- name: Forums - Q&A
url: https://github.com/unclecode/crawl4ai/discussions/categories/forums-q-a
about: "Ask questions or engage in general discussions about Crawl4AI"

View File

@@ -1,19 +0,0 @@
## Summary
Please include a summary of the change and/or which issues are fixed.
eg: `Fixes #123` (Tag GitHub issue numbers in this format, so it automatically links the issues with your PR)
## List of files changed and why
eg: quickstart.py - To update the example as per new changes
## How Has This Been Tested?
Please describe the tests that you ran to verify your changes.
## Checklist:
- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] I have added/updated unit tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes

View File

@@ -1,46 +0,0 @@
name: Discord GitHub Notifications
on:
issues:
types: [opened]
issue_comment:
types: [created]
pull_request:
types: [opened]
discussion:
types: [created]
watch:
types: [started]
jobs:
notify-discord:
runs-on: ubuntu-latest
steps:
- name: Send to Google Apps Script (Stars only)
if: github.event_name == 'watch'
run: |
curl -fSs -X POST "${{ secrets.GOOGLE_SCRIPT_ENDPOINT }}" \
-H 'Content-Type: application/json' \
-d '{"url":"${{ github.event.sender.html_url }}"}'
- name: Set webhook based on event type
id: set-webhook
run: |
if [ "${{ github.event_name }}" == "discussion" ]; then
echo "webhook=${{ secrets.DISCORD_DISCUSSIONS_WEBHOOK }}" >> $GITHUB_OUTPUT
elif [ "${{ github.event_name }}" == "watch" ]; then
echo "webhook=${{ secrets.DISCORD_STAR_GAZERS }}" >> $GITHUB_OUTPUT
else
echo "webhook=${{ secrets.DISCORD_WEBHOOK }}" >> $GITHUB_OUTPUT
fi
- name: Discord Notification
uses: Ilshidur/action-discord@master
env:
DISCORD_WEBHOOK: ${{ steps.set-webhook.outputs.webhook }}
with:
args: |
${{ github.event_name == 'issues' && format('📣 New issue created: **{0}** by {1} - {2}', github.event.issue.title, github.event.issue.user.login, github.event.issue.html_url) ||
github.event_name == 'issue_comment' && format('💬 New comment on issue **{0}** by {1} - {2}', github.event.issue.title, github.event.comment.user.login, github.event.comment.html_url) ||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
github.event_name == 'watch' && format('⭐ {0} starred Crawl4AI 🥳! Check out their profile: {1}', github.event.sender.login, github.event.sender.html_url) ||
format('💬 New discussion started: **{0}** by {1} - {2}', github.event.discussion.title, github.event.discussion.user.login, github.event.discussion.html_url) }}

View File

@@ -1,142 +0,0 @@
name: Release Pipeline
on:
push:
tags:
- 'v*'
- '!test-v*' # Exclude test tags
jobs:
release:
runs-on: ubuntu-latest
permissions:
contents: write # Required for creating releases
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Extract version from tag
id: get_version
run: |
TAG_VERSION=${GITHUB_REF#refs/tags/v}
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
echo "Releasing version: $TAG_VERSION"
- name: Install package dependencies
run: |
pip install -e .
- name: Check version consistency
run: |
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
echo "Tag version: $TAG_VERSION"
echo "Package version: $PACKAGE_VERSION"
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
echo "Please update crawl4ai/__version__.py to match the tag version"
exit 1
fi
echo "✅ Version check passed: $TAG_VERSION"
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
pip install build twine
- name: Build package
run: python -m build
- name: Check package
run: twine check dist/*
- name: Upload to PyPI
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
run: |
echo "📦 Uploading to PyPI..."
twine upload dist/*
echo "✅ Package uploaded to https://pypi.org/project/crawl4ai/"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Extract major and minor versions
id: versions
run: |
VERSION=${{ steps.get_version.outputs.VERSION }}
MAJOR=$(echo $VERSION | cut -d. -f1)
MINOR=$(echo $VERSION | cut -d. -f1-2)
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
- name: Build and push Docker images
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: |
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
unclecode/crawl4ai:latest
platforms: linux/amd64,linux/arm64
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
tag_name: v${{ steps.get_version.outputs.VERSION }}
name: Release v${{ steps.get_version.outputs.VERSION }}
body: |
## 🎉 Crawl4AI v${{ steps.get_version.outputs.VERSION }} Released!
### 📦 Installation
**PyPI:**
```bash
pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}
```
**Docker:**
```bash
docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
docker pull unclecode/crawl4ai:latest
```
### 📝 What's Changed
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details.
draft: false
prerelease: false
token: ${{ secrets.GITHUB_TOKEN }}
- name: Summary
run: |
echo "## 🚀 Release Complete!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📦 PyPI Package" >> $GITHUB_STEP_SUMMARY
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- URL: https://pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
echo "- Install: \`pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🐳 Docker Images" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
echo "https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,116 +0,0 @@
name: Test Release Pipeline
on:
push:
tags:
- 'test-v*'
jobs:
test-release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Extract version from tag
id: get_version
run: |
TAG_VERSION=${GITHUB_REF#refs/tags/test-v}
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
echo "Testing with version: $TAG_VERSION"
- name: Install package dependencies
run: |
pip install -e .
- name: Check version consistency
run: |
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
echo "Tag version: $TAG_VERSION"
echo "Package version: $PACKAGE_VERSION"
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
echo "Please update crawl4ai/__version__.py to match the tag version"
exit 1
fi
echo "✅ Version check passed: $TAG_VERSION"
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
pip install build twine
- name: Build package
run: python -m build
- name: Check package
run: twine check dist/*
- name: Upload to Test PyPI
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.TEST_PYPI_TOKEN }}
run: |
echo "📦 Uploading to Test PyPI..."
twine upload --repository testpypi dist/* || {
if [ $? -eq 1 ]; then
echo "⚠️ Upload failed - likely version already exists on Test PyPI"
echo "Continuing anyway for test purposes..."
else
exit 1
fi
}
echo "✅ Test PyPI step complete"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and push Docker test images
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: |
unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}
unclecode/crawl4ai:test-latest
platforms: linux/amd64,linux/arm64
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Summary
run: |
echo "## 🎉 Test Release Complete!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📦 Test PyPI Package" >> $GITHUB_STEP_SUMMARY
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- URL: https://test.pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
echo "- Install: \`pip install -i https://test.pypi.org/simple/ crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🐳 Docker Test Images" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`unclecode/crawl4ai:test-latest\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🧹 Cleanup Commands" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
echo "# Remove test tag" >> $GITHUB_STEP_SUMMARY
echo "git tag -d test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "git push origin :test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "# Remove Docker test images" >> $GITHUB_STEP_SUMMARY
echo "docker rmi unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "docker rmi unclecode/crawl4ai:test-latest" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY

52
.gitignore vendored
View File

@@ -1,11 +1,3 @@
# Scripts folder (private tools)
.scripts/
# Local development CLI (private)
local_dev.py
dev
DEV_CLI_README.md
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@@ -234,48 +226,8 @@ tree.md
.local
.do
/plans
plans/
# Codeium
.codeiumignore
todo/
# Continue development files
.continue/
.continuerc.json
continue.lock
continue_core.log
contextProviders/
continue_workspace/
.continue-cache/
continue_config.json
# Continue temporary files
.continue-temp/
.continue-logs/
.continue-downloads/
# Continue VS Code specific
.vscode-continue/
.vscode-continue-cache/
.prompts/
.llm.env
.private/
CLAUDE_MONITOR.md
CLAUDE.md
tests/**/test_site
tests/**/reports
tests/**/benchmark_reports
docs/**/data
.codecat/
docs/apps/linkdin/debug*/
docs/apps/linkdin/samples/insights/*
# Production checklist (local, not for version control)
PRODUCTION_CHECKLIST.md
# windsurf rules
.windsurfrules

View File

@@ -5,411 +5,9 @@ All notable changes to Crawl4AI will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.7.3] - 2025-08-09
Okay, here's a detailed changelog in Markdown format, generated from the provided git diff and commit history. I've focused on user-facing changes, fixes, and features, and grouped them as requested:
### Added
- **🕵️ Undetected Browser Support**: New browser adapter pattern with stealth capabilities
- `browser_adapter.py` with undetected Chrome integration
- Bypass sophisticated bot detection systems (Cloudflare, Akamai, custom solutions)
- Support for headless stealth mode with anti-detection techniques
- Human-like behavior simulation with random mouse movements and scrolling
- Comprehensive examples for anti-bot strategies and stealth crawling
- Full documentation guide for undetected browser usage
- **🎨 Multi-URL Configuration System**: URL-specific crawler configurations for batch processing
- Different crawling strategies for different URL patterns in a single batch
- Support for string patterns with wildcards (`"*.pdf"`, `"*/blog/*"`)
- Lambda function matchers for complex URL logic
- Mixed matchers combining strings and functions with AND/OR logic
- Fallback configuration support when no patterns match
- First-match-wins configuration selection with optional fallback
- **🧠 Memory Monitoring & Optimization**: Comprehensive memory usage tracking
- New `memory_utils.py` module for memory monitoring and optimization
- Real-time memory usage tracking during crawl sessions
- Memory leak detection and reporting
- Performance optimization recommendations
- Peak memory usage analysis and efficiency metrics
- Automatic cleanup suggestions for memory-intensive operations
- **📊 Enhanced Table Extraction**: Improved table access and DataFrame conversion
- Direct `result.tables` interface replacing generic `result.media` approach
- Instant pandas DataFrame conversion with `pd.DataFrame(table['data'])`
- Enhanced table detection algorithms for better accuracy
- Table metadata including source XPath and headers
- Improved table structure preservation during extraction
- **💰 GitHub Sponsors Integration**: 4-tier sponsorship system
- Supporter ($5/month): Community support + early feature previews
- Professional ($25/month): Priority support + beta access
- Business ($100/month): Direct consultation + custom integrations
- Enterprise ($500/month): Dedicated support + feature development
- Custom arrangement options for larger organizations
- **🐳 Docker LLM Provider Flexibility**: Environment-based LLM configuration
- `LLM_PROVIDER` environment variable support for dynamic provider switching
- `.llm.env` file support for secure configuration management
- Per-request provider override capabilities in API endpoints
- Support for OpenAI, Groq, and other providers without rebuilding images
- Enhanced Docker documentation with deployment examples
### Fixed
- **URL Matcher Fallback**: Resolved edge cases in URL pattern matching logic
- **Memory Management**: Fixed memory leaks in long-running crawl sessions
- **Sitemap Processing**: Improved redirect handling in sitemap fetching
- **Table Extraction**: Enhanced table detection and extraction accuracy
- **Error Handling**: Better error messages and recovery from network failures
### Changed
- **Architecture Refactoring**: Major cleanup and optimization
- Moved 2,450+ lines from main `async_crawler_strategy.py` to backup
- Cleaner separation of concerns in crawler architecture
- Better maintainability and code organization
- Preserved backward compatibility while improving performance
### Documentation
- **Comprehensive Examples**: Added real-world URLs and practical use cases
- **API Documentation**: Complete CrawlResult field documentation with all available fields
- **Migration Guides**: Updated table extraction patterns from `result.media` to `result.tables`
- **Undetected Browser Guide**: Full documentation for stealth mode and anti-bot strategies
- **Multi-Config Examples**: Detailed examples for URL-specific configurations
- **Docker Deployment**: Enhanced Docker documentation with LLM provider configuration
## [0.7.x] - 2025-06-29
### Added
- **Virtual Scroll Support**: New `VirtualScrollConfig` for handling virtualized scrolling on modern websites
- Automatically detects and handles three scrolling scenarios:
- Content unchanged (continue scrolling)
- Content appended (traditional infinite scroll)
- Content replaced (true virtual scroll - Twitter/Instagram style)
- Captures ALL content from pages that replace DOM elements during scroll
- Intelligent deduplication based on normalized text content
- Configurable scroll amount, count, and wait times
- Seamless integration with existing extraction strategies
- Comprehensive examples including Twitter timeline, Instagram grid, and mixed content scenarios
## [Unreleased]
### Added
- **Flexible LLM Provider Configuration** (Docker):
- Support for `LLM_PROVIDER` environment variable to override default provider
- Per-request provider override via optional `provider` parameter in API endpoints
- Automatic provider validation with clear error messages
- Updated Docker documentation and examples
### Changed
- **WebScrapingStrategy Refactoring**: Simplified content scraping architecture
- `WebScrapingStrategy` is now an alias for `LXMLWebScrapingStrategy` for backward compatibility
- Removed redundant BeautifulSoup-based implementation (~1000 lines of code)
- `LXMLWebScrapingStrategy` now inherits directly from `ContentScrapingStrategy`
- All existing code using `WebScrapingStrategy` continues to work without modification
- Default scraping strategy remains `LXMLWebScrapingStrategy` for optimal performance
### Added
- **AsyncUrlSeeder**: High-performance URL discovery system for intelligent crawling at scale
- Discover URLs from sitemaps and Common Crawl index
- Extract and analyze page metadata without full crawling
- BM25 relevance scoring for query-based URL filtering
- Multi-domain parallel discovery with `many_urls()` method
- Automatic caching with TTL for discovered URLs
- Rate limiting and concurrent request management
- Live URL validation with HEAD requests
- JSON-LD and Open Graph metadata extraction
- **SeedingConfig**: Configuration class for URL seeding operations
- Support for multiple discovery sources (`sitemap`, `cc`, `sitemap+cc`)
- Pattern-based URL filtering with wildcards
- Configurable concurrency and rate limiting
- Query-based relevance scoring with BM25
- Score threshold filtering for quality control
- Comprehensive documentation for URL seeding feature
- Detailed comparison with deep crawling approaches
- Complete API reference with examples
- Integration guide with AsyncWebCrawler
- Performance benchmarks and best practices
- Example scripts demonstrating URL seeding:
- `url_seeder_demo.py`: Interactive Rich-based demonstration
- `url_seeder_quick_demo.py`: Screenshot-friendly examples
- Test suite for URL seeding with BM25 scoring
### Changed
- Updated `__init__.py` to export AsyncUrlSeeder and SeedingConfig
- Enhanced documentation with URL seeding integration examples
### Fixed
- Corrected examples to properly extract URLs from seeder results before passing to `arun_many()`
- Fixed logger color compatibility issue (changed `lightblack` to `bright_black`)
## [0.6.2] - 2025-05-02
### Added
- New `RegexExtractionStrategy` for fast pattern-based extraction without requiring LLM
- Built-in patterns for emails, URLs, phone numbers, dates, and more
- Support for custom regex patterns
- `generate_pattern` utility for LLM-assisted pattern creation (one-time use)
- Added `fit_html` as a top-level field in `CrawlResult` for optimized HTML extraction
- Added support for network response body capture in network request tracking
### Changed
- Updated documentation for no-LLM extraction strategies
- Enhanced API reference to include RegexExtractionStrategy examples and usage
- Improved HTML preprocessing with optimized performance for extraction strategies
## [0.6.1] - 2025-04-24
### Added
- New dedicated `tables` field in `CrawlResult` model for better table extraction handling
- Updated crypto_analysis_example.py to use the new tables field with backward compatibility
### Changed
- Improved playground UI in Docker deployment with better endpoint handling and UI feedback
## [0.6.0] 20250422
### Added
- Browser pooling with page prewarming and finegrained **geolocation, locale, and timezone** controls
- Crawler pool manager (SDK + Docker API) for smarter resource allocation
- Network & console log capture plus MHTML snapshot export
- **Table extractor**: turn HTML `<table>`s into DataFrames or CSV with one flag
- Highvolume stresstest framework in `tests/memory` and API load scripts
- MCP protocol endpoints with socket & SSE support; playground UI scaffold
- Docs v2 revamp: TOC, GitHub badge, copycode buttons, Docker API demo
- “Ask AI” helper button *(workinprogress, shipping soon)*
- New examples: geolocation usage, network/console capture, Docker API, markdown source selection, crypto analysis
- Expanded automated test suites for browser, Docker, MCP and memory benchmarks
### Changed
- Consolidated and renamed browser strategies; legacy docker strategy modules removed
- `ProxyConfig` moved to `async_configs`
- Server migrated to poolbased crawler management
- FastAPI validators replace custom query validation
- Docker build now uses Chromium base image
- Largescale repo tidyup (≈36 k insertions, ≈5 k deletions)
### Fixed
- Async crawler session leak, duplicatevisit handling, URL normalisation
- Targetelement regressions in scraping strategies
- LoggedURL readability, encodedURL decoding, middle truncation for long URLs
- Closed issues: #701, #733, #756, #774, #804, #822, #839, #841, #842, #843, #867, #902, #911
### Removed
- Obsolete modules under `crawl4ai/browser/*` superseded by the new pooled browser layer
### Deprecated
- Old markdown generator names now alias `DefaultMarkdownGenerator` and emit warnings
---
#### Upgrade notes
1. Update any direct imports from `crawl4ai/browser/*` to the new pooled browser modules
2. If you override `AsyncPlaywrightCrawlerStrategy.get_page`, adopt the new signature
3. Rebuild Docker images to pull the new Chromium layer
4. Switch to `DefaultMarkdownGenerator` (or silence the deprecation warning)
---
`121 files changed, ≈36 223 insertions, ≈4 975 deletions` :contentReference[oaicite:0]{index=0}&#8203;:contentReference[oaicite:1]{index=1}
### [Feature] 2025-04-21
- Implemented MCP protocol for machine-to-machine communication
- Added WebSocket and SSE transport for MCP server
- Exposed server endpoints via MCP protocol
- Created tests for MCP socket and SSE communication
- Enhanced Docker server with file handling and intelligent search
- Added PDF and screenshot endpoints with file saving capability
- Added JavaScript execution endpoint for page interaction
- Implemented advanced context search with BM25 and code chunking
- Added file path output support for generated assets
- Improved server endpoints and API surface
- Added intelligent context search with query filtering
- Added syntax-aware code function chunking
- Implemented efficient HTML processing pipeline
- Added support for controlling browser geolocation via new GeolocationConfig class
- Added locale and timezone configuration options to CrawlerRunConfig
- Added example script demonstrating geolocation and locale usage
- Added documentation for location-based identity features
### [Refactor] 2025-04-20
- Replaced crawler_manager.py with simpler crawler_pool.py implementation
- Added global page semaphore for hard concurrency cap
- Implemented browser pool with idle cleanup
- Added playground UI for testing and stress testing
- Updated API handlers to use pooled crawlers
- Enhanced logging levels and symbols
- Added memory tests and stress test utilities
### [Added] 2025-04-17
- Added content source selection feature for markdown generation
- New `content_source` parameter allows choosing between `cleaned_html`, `raw_html`, and `fit_html`
- Provides flexibility in how HTML content is processed before markdown conversion
- Added examples and documentation for the new feature
- Includes backward compatibility with default `cleaned_html` behavior
## Version 0.5.0.post5 (2025-03-14)
### Added
- *(crawler)* Add experimental parameters dictionary to CrawlerRunConfig to support beta features
- *(tables)* Add comprehensive table detection and extraction functionality with scoring system
- *(monitor)* Add real-time crawler monitoring system with memory management
- *(content)* Add target_elements parameter for selective content extraction
- *(browser)* Add standalone CDP browser launch capability
- *(schema)* Add preprocess_html_for_schema utility for better HTML cleaning
- *(api)* Add special handling for single URL requests in Docker API
### Changed
- *(filters)* Add reverse option to URLPatternFilter for inverting filter logic
- *(browser)* Make CSP nonce headers optional via experimental config
- *(browser)* Remove default cookie injection from page initialization
- *(crawler)* Optimize response handling for single-URL processing
- *(api)* Refactor crawl request handling to streamline processing
- *(config)* Update default provider to gpt-4o
- *(cache)* Change default cache_mode from aggressive to bypass in examples
### Fixed
- *(browser)* Clean up browser context creation code
- *(api)* Improve code formatting in API handler
### Breaking Changes
- WebScrapingStrategy no longer returns 'scraped_html' in its output dictionary
- Table extraction logic has been modified to better handle thead/tbody structures
- Default cookie injection has been removed from page initialization
## Version 0.5.0 (2025-03-02)
### Added
- *(profiles)* Add BrowserProfiler class for dedicated browser profile management
- *(cli)* Add interactive profile management to CLI with rich UI
- *(profiles)* Add ability to crawl directly from profile management interface
- *(browser)* Support identity-based browsing with persistent profiles
- *(deep-crawling)* Add max_pages parameter to limit the number of pages crawled in all deep crawling strategies
- *(deep-crawling)* Add score_threshold parameter to BFS and DFS strategies to filter URLs by score
### Changed
- *(browser)* Refactor profile management from ManagedBrowser to BrowserProfiler class
- *(cli)* Enhance CLI with profile selection and status display for crawling
- *(examples)* Update identity-based browsing example to use BrowserProfiler class
- *(docs)* Update identity-based crawling documentation
- *(docs)* Update deep crawling documentation with max_pages and score_threshold parameters
- *(examples)* Add example demonstrating the use of max_pages and score_threshold parameters
### Fixed
- *(browser)* Fix profile detection and management on different platforms
- *(cli)* Fix CLI command structure for better user experience
- *(deep-crawling)* Improve BFS and DFS strategies to handle page count limits more efficiently
## Version 0.5.0 (2025-02-21)
### Added
- *(crawler)* [**breaking**] Add memory-adaptive dispatcher with rate limiting
- *(scraping)* [**breaking**] Add LXML-based scraping mode for improved performance
- *(content-filter)* Add LLMContentFilter for intelligent markdown generation
- *(dispatcher)* [**breaking**] Add streaming support for URL processing
- *(browser)* [**breaking**] Improve browser context management and add shared data support
- *(config)* [**breaking**] Add streaming support and config cloning
- *(crawler)* Add URL redirection tracking
- *(extraction)* Add LLM-powered schema generation utility
- *(proxy)* Add proxy configuration support to CrawlerRunConfig
- *(robots)* Add robots.txt compliance support
- *(release)* [**breaking**] Prepare v0.4.3 beta release
- *(proxy)* Add proxy rotation support and documentation
- *(browser)* Add CDP URL configuration support
- *(demo)* Uncomment feature demos and add fake-useragent dependency
- *(pdf)* Add PDF processing capabilities
- *(crawler)* [**breaking**] Enhance JavaScript execution and PDF processing
- *(docker)* Add Docker deployment configuration and API server
- *(docker)* Add Docker service integration and config serialization
- *(docker)* [**breaking**] Enhance Docker deployment setup and configuration
- *(api)* Improve cache handling and add API tests
- *(crawler)* [**breaking**] Add deep crawling capabilities with BFS strategy
- *(proxy)* [**breaking**] Add proxy rotation strategy
- *(deep-crawling)* Add DFS strategy and update exports; refactor CLI entry point
- *(cli)* Add command line interface with comprehensive features
- *(config)* Enhance serialization and add deep crawling exports
- *(crawler)* Add HTTP crawler strategy for lightweight web scraping
- *(docker)* [**breaking**] Implement supervisor and secure API endpoints
- *(docker)* [**breaking**] Add JWT authentication and improve server architecture
### Changed
- *(browser)* Update browser channel default to 'chromium' in BrowserConfig.from_args method
- *(crawler)* Optimize response handling and default settings
- *(crawler)* - Update hello_world example with proper content filtering
- - Update hello_world.py example
- *(docs)* [**breaking**] Reorganize documentation structure and update styles
- *(dispatcher)* [**breaking**] Migrate to modular dispatcher system with enhanced monitoring
- *(scraping)* [**breaking**] Replace ScrapingMode enum with strategy pattern
- *(browser)* Improve browser path management
- *(models)* Rename final_url to redirected_url for consistency
- *(core)* [**breaking**] Improve type hints and remove unused file
- *(docs)* Improve code formatting in features demo
- *(user-agent)* Improve user agent generation system
- *(core)* [**breaking**] Reorganize project structure and remove legacy code
- *(docker)* Clean up import statements in server.py
- *(docker)* Remove unused models and utilities for cleaner codebase
- *(docker)* [**breaking**] Improve server architecture and configuration
- *(deep-crawl)* [**breaking**] Reorganize deep crawling functionality into dedicated module
- *(deep-crawling)* [**breaking**] Reorganize deep crawling strategies and add new implementations
- *(crawling)* [**breaking**] Improve type hints and code cleanup
- *(crawler)* [**breaking**] Improve HTML handling and cleanup codebase
- *(crawler)* [**breaking**] Remove content filter functionality
- *(examples)* Update API usage in features demo
- *(config)* [**breaking**] Enhance serialization and config handling
### Docs
- Add Code of Conduct for the project (#410)
### Documentation
- *(extraction)* Add clarifying comments for CSS selector behavior
- *(readme)* Update personal story and project vision
- *(urls)* [**breaking**] Update documentation URLs to new domain
- *(api)* Add streaming mode documentation and examples
- *(readme)* Update version and feature announcements for v0.4.3b1
- *(examples)* Update demo scripts and fix output formats
- *(examples)* Update v0.4.3 features demo to v0.4.3b2
- *(readme)* Update version references and fix links
- *(multi-url)* [**breaking**] Improve documentation clarity and update examples
- *(examples)* Update proxy rotation demo and disable other demos
- *(api)* Improve formatting and readability of API documentation
- *(examples)* Add SERP API project example
- *(urls)* Update documentation URLs to new domain
- *(readme)* Resolve merge conflict and update version info
### Fixed
- *(browser)* Update default browser channel to chromium and simplify channel selection logic
- *(browser)* [**breaking**] Default to Chromium channel for new headless mode (#387)
- *(browser)* Resolve merge conflicts in browser channel configuration
- Prevent memory leaks by ensuring proper closure of Playwright pages
- Not working long page screenshot (#403)
- *(extraction)* JsonCss selector and crawler improvements
- *(models)* [**breaking**] Make model fields optional with default values
- *(dispatcher)* Adjust memory threshold and fix dispatcher initialization
- *(install)* Ensure proper exit after running doctor command
### Miscellaneous Tasks
- *(cleanup)* Remove unused files and improve type hints
- Add .gitattributes file
## License Update
Crawl4AI v0.5.0 updates the license to Apache 2.0 *with a required attribution clause*. This means you are free to use, modify, and distribute Crawl4AI (even commercially), but you *must* clearly attribute the project in any public use or distribution. See the updated `LICENSE` file for the full legal text and specific requirements.
---
## Version 0.4.3b2 (2025-01-21)
## Version 0.4.3 (2025-01-21)
This release introduces several powerful new features, including robots.txt compliance, dynamic proxy support, LLM-powered schema generation, and improved documentation.
@@ -537,11 +135,9 @@ This release introduces several powerful new features, including robots.txt comp
- **Multiple Element Selection**: Modified `_get_elements` in `JsonCssExtractionStrategy` to return all matching elements instead of just the first one, ensuring comprehensive extraction. ([#extraction_strategy.py](crawl4ai/extraction_strategy.py))
- **Error Handling in Scrolling**: Added robust error handling to ensure scrolling proceeds safely even if a configuration is missing. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
## [0.4.267] - 2025 - 01 - 06
#### Other
- **Git Ignore Update**: Added `/plans` to `.gitignore` for better development environment consistency. ([#.gitignore](.gitignore))
### Added
- **Windows Event Loop Configuration**: Introduced a utility function `configure_windows_event_loop` to resolve `NotImplementedError` for asyncio subprocesses on Windows. ([#utils.py](crawl4ai/utils.py), [#tutorials/async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
- **`page_need_scroll` Method**: Added a method to determine if a page requires scrolling before taking actions in `AsyncPlaywrightCrawlerStrategy`. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
## [0.4.24] - 2024-12-31
@@ -685,6 +281,12 @@ This release introduces several powerful new features, including robots.txt comp
- Fixed potential viewport mismatches by ensuring consistent use of `self.viewport_width` and `self.viewport_height` throughout the code.
- Improved robustness of dynamic content loading to avoid timeouts and failed evaluations.
## [0.3.75] December 1, 2024
### PruningContentFilter

View File

@@ -1,131 +0,0 @@
# Crawl4AI Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official email address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
unclecode@crawl4ai.com. All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View File

@@ -24,14 +24,6 @@ We would like to thank the following people for their contributions to Crawl4AI:
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
#### Feb-Alpha-1
- [sufianuddin](https://github.com/sufianuddin) - fix: [Documentation for JsonCssExtractionStrategy](https://github.com/unclecode/crawl4ai/issues/651)
- [tautikAg](https://github.com/tautikAg) - fix: [Markdown output has incorect spacing](https://github.com/unclecode/crawl4ai/issues/599)
- [cardit1](https://github.com/cardit1) - fix: ['AsyncPlaywrightCrawlerStrategy' object has no attribute 'downloads_path'](https://github.com/unclecode/crawl4ai/issues/585)
- [dmurat](https://github.com/dmurat) - fix: [ Incorrect rendering of inline code inside of links ](https://github.com/unclecode/crawl4ai/issues/583)
- [Sparshsing](https://github.com/Sparshsing) - fix: [Relative Urls in the webpage not extracted properly ](https://github.com/unclecode/crawl4ai/issues/570)
## Other Contributors
@@ -39,11 +31,6 @@ We would like to thank the following people for their contributions to Crawl4AI:
- [Shiv Kumar](https://github.com/shivkumar0757)
- [QIN2DIM](https://github.com/QIN2DIM)
#### Typo fixes
- [ssoydan](https://github.com/ssoydan)
- [Darshan](https://github.com/Darshan2104)
- [tuhinmallick](https://github.com/tuhinmallick)
## Acknowledgements
We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better.

View File

@@ -1,36 +1,32 @@
FROM python:3.12-slim-bookworm AS build
# syntax=docker/dockerfile:1.4
# C4ai version
ARG C4AI_VER=0.7.0-r1
ENV C4AI_VERSION=$C4AI_VER
LABEL c4ai.version=$C4AI_VER
ARG TARGETPLATFORM
ARG BUILDPLATFORM
# Set build arguments
ARG APP_HOME=/app
ARG GITHUB_REPO=https://github.com/unclecode/crawl4ai.git
ARG GITHUB_BRANCH=main
ARG USE_LOCAL=true
# Other build arguments
ARG PYTHON_VERSION=3.10
ENV PYTHONFAULTHANDLER=1 \
PYTHONHASHSEED=random \
PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PIP_DEFAULT_TIMEOUT=100 \
DEBIAN_FRONTEND=noninteractive \
REDIS_HOST=localhost \
REDIS_PORT=6379
# Base stage with system dependencies
FROM python:${PYTHON_VERSION}-slim as base
ARG PYTHON_VERSION=3.12
ARG INSTALL_TYPE=default
# Declare ARG variables again within the build stage
ARG INSTALL_TYPE=all
ARG ENABLE_GPU=false
ARG TARGETARCH
# Platform-specific labels
LABEL maintainer="unclecode"
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
LABEL version="1.0"
# Environment setup
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PIP_DEFAULT_TIMEOUT=100 \
DEBIAN_FRONTEND=noninteractive
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
curl \
@@ -41,11 +37,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
pkg-config \
python3-dev \
libjpeg-dev \
redis-server \
supervisor \
&& apt-get clean \
libpng-dev \
&& rm -rf /var/lib/apt/lists/*
# Playwright system dependencies for Linux
RUN apt-get update && apt-get install -y --no-install-recommends \
libglib2.0-0 \
libnss3 \
@@ -68,66 +63,30 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libcairo2 \
libasound2 \
libatspi2.0-0 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get dist-upgrade -y \
&& rm -rf /var/lib/apt/lists/*
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
# GPU support if enabled and architecture is supported
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \
apt-get update && apt-get install -y --no-install-recommends \
nvidia-cuda-toolkit \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* ; \
else \
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
fi
RUN if [ "$TARGETARCH" = "arm64" ]; then \
echo "🦾 Installing ARM-specific optimizations"; \
apt-get update && apt-get install -y --no-install-recommends \
libopenblas-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*; \
elif [ "$TARGETARCH" = "amd64" ]; then \
echo "🖥️ Installing AMD64-specific optimizations"; \
apt-get update && apt-get install -y --no-install-recommends \
libomp-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*; \
else \
echo "Skipping platform-specific optimizations (unsupported platform)"; \
fi
# Create and set working directory
WORKDIR /app
# Create a non-root user and group
RUN groupadd -r appuser && useradd --no-log-init -r -g appuser appuser
# Copy the entire project
COPY . .
# Create and set permissions for appuser home directory
RUN mkdir -p /home/appuser && chown -R appuser:appuser /home/appuser
WORKDIR ${APP_HOME}
RUN echo '#!/bin/bash\n\
if [ "$USE_LOCAL" = "true" ]; then\n\
echo "📦 Installing from local source..."\n\
pip install --no-cache-dir /tmp/project/\n\
else\n\
echo "🌐 Installing from GitHub..."\n\
for i in {1..3}; do \n\
git clone --branch ${GITHUB_BRANCH} ${GITHUB_REPO} /tmp/crawl4ai && break || \n\
{ echo "Attempt $i/3 failed! Taking a short break... ☕"; sleep 5; }; \n\
done\n\
pip install --no-cache-dir /tmp/crawl4ai\n\
fi' > /tmp/install.sh && chmod +x /tmp/install.sh
COPY . /tmp/project/
# Copy supervisor config first (might need root later, but okay for now)
COPY deploy/docker/supervisord.conf .
COPY deploy/docker/requirements.txt .
# Install base requirements
RUN pip install --no-cache-dir -r requirements.txt
# Install required library for FastAPI
RUN pip install fastapi uvicorn psutil
# Install ML dependencies first for better layer caching
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
pip install --no-cache-dir \
torch \
@@ -140,61 +99,38 @@ RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
python -m nltk.downloader punkt stopwords ; \
fi
# Install the package
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
pip install "/tmp/project/[all]" && \
pip install ".[all]" && \
python -m crawl4ai.model_loader ; \
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
pip install "/tmp/project/[torch]" ; \
pip install ".[torch]" ; \
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
pip install "/tmp/project/[transformer]" && \
pip install ".[transformer]" && \
python -m crawl4ai.model_loader ; \
else \
pip install "/tmp/project" ; \
pip install "." ; \
fi
RUN pip install --no-cache-dir --upgrade pip && \
/tmp/install.sh && \
python -c "import crawl4ai; print('✅ crawl4ai is ready to rock!')" && \
python -c "from playwright.sync_api import sync_playwright; print('✅ Playwright is feeling dramatic!')"
# Install MkDocs and required plugins
RUN pip install --no-cache-dir \
mkdocs \
mkdocs-material \
mkdocs-terminal \
pymdown-extensions
RUN crawl4ai-setup
# Build MkDocs documentation
RUN mkdocs build
RUN playwright install --with-deps
# Install Playwright and browsers
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
playwright install chromium; \
elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
playwright install chromium; \
fi
RUN mkdir -p /home/appuser/.cache/ms-playwright \
&& cp -r /root/.cache/ms-playwright/chromium-* /home/appuser/.cache/ms-playwright/ \
&& chown -R appuser:appuser /home/appuser/.cache/ms-playwright
# Expose port
EXPOSE 8000 11235 9222 8080
RUN crawl4ai-doctor
# Copy application code
COPY deploy/docker/* ${APP_HOME}/
# copy the playground + any future static assets
COPY deploy/docker/static ${APP_HOME}/static
# Change ownership of the application directory to the non-root user
RUN chown -R appuser:appuser ${APP_HOME}
# give permissions to redis persistence dirs if used
RUN mkdir -p /var/lib/redis /var/log/redis && chown -R appuser:appuser /var/lib/redis /var/log/redis
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD bash -c '\
MEM=$(free -m | awk "/^Mem:/{print \$2}"); \
if [ $MEM -lt 2048 ]; then \
echo "⚠️ Warning: Less than 2GB RAM available! Your container might need a memory boost! 🚀"; \
exit 1; \
fi && \
redis-cli ping > /dev/null && \
curl -f http://localhost:11235/health || exit 1'
EXPOSE 6379
# Switch to the non-root user before starting the application
USER appuser
# Set environment variables to ptoduction
ENV PYTHON_ENV=production
# Start the application using supervisord
CMD ["supervisord", "-c", "supervisord.conf"]
# Start the FastAPI server
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "11235"]

View File

@@ -1,339 +0,0 @@
# Development Journal
This journal tracks significant feature additions, bug fixes, and architectural decisions in the crawl4ai project. It serves as both documentation and a historical record of the project's evolution.
## [2025-04-17] Added Content Source Selection for Markdown Generation
**Feature:** Configurable content source for markdown generation
**Changes Made:**
1. Added `content_source: str = "cleaned_html"` parameter to `MarkdownGenerationStrategy` class
2. Updated `DefaultMarkdownGenerator` to accept and pass the content source parameter
3. Renamed the `cleaned_html` parameter to `input_html` in the `generate_markdown` method
4. Modified `AsyncWebCrawler.aprocess_html` to select the appropriate HTML source based on the generator's config
5. Added `preprocess_html_for_schema` import in `async_webcrawler.py`
**Implementation Details:**
- Added a new `content_source` parameter to specify which HTML input to use for markdown generation
- Options include: "cleaned_html" (default), "raw_html", and "fit_html"
- Used a dictionary dispatch pattern in `aprocess_html` to select the appropriate HTML source
- Added proper error handling with fallback to cleaned_html if content source selection fails
- Ensured backward compatibility by defaulting to "cleaned_html" option
**Files Modified:**
- `crawl4ai/markdown_generation_strategy.py`: Added content_source parameter and updated the method signature
- `crawl4ai/async_webcrawler.py`: Added HTML source selection logic and updated imports
**Examples:**
- Created `docs/examples/content_source_example.py` demonstrating how to use the new parameter
**Challenges:**
- Maintaining backward compatibility while reorganizing the parameter flow
- Ensuring proper error handling for all content source options
- Making the change with minimal code modifications
**Why This Feature:**
The content source selection feature allows users to choose which HTML content to use as input for markdown generation:
1. "cleaned_html" - Uses the post-processed HTML after scraping strategy (original behavior)
2. "raw_html" - Uses the original raw HTML directly from the web page
3. "fit_html" - Uses the preprocessed HTML optimized for schema extraction
This feature provides greater flexibility in how users generate markdown, enabling them to:
- Capture more detailed content from the original HTML when needed
- Use schema-optimized HTML when working with structured data
- Choose the approach that best suits their specific use case
## [2025-04-17] Implemented High Volume Stress Testing Solution for SDK
**Feature:** Comprehensive stress testing framework using `arun_many` and the dispatcher system to evaluate performance, concurrency handling, and identify potential issues under high-volume crawling scenarios.
**Changes Made:**
1. Created a dedicated stress testing framework in the `benchmarking/` (or similar) directory.
2. Implemented local test site generation (`SiteGenerator`) with configurable heavy HTML pages.
3. Added basic memory usage tracking (`SimpleMemoryTracker`) using platform-specific commands (avoiding `psutil` dependency for this specific test).
4. Utilized `CrawlerMonitor` from `crawl4ai` for rich terminal UI and real-time monitoring of test progress and dispatcher activity.
5. Implemented detailed result summary saving (JSON) and memory sample logging (CSV).
6. Developed `run_benchmark.py` to orchestrate tests with predefined configurations.
7. Created `run_all.sh` as a simple wrapper for `run_benchmark.py`.
**Implementation Details:**
- Generates a local test site with configurable pages containing heavy text and image content.
- Uses Python's built-in `http.server` for local serving, minimizing network variance.
- Leverages `crawl4ai`'s `arun_many` method for processing URLs.
- Utilizes `MemoryAdaptiveDispatcher` to manage concurrency via the `max_sessions` parameter (note: memory adaptation features require `psutil`, not used by `SimpleMemoryTracker`).
- Tracks memory usage via `SimpleMemoryTracker`, recording samples throughout test execution to a CSV file.
- Uses `CrawlerMonitor` (which uses the `rich` library) for clear terminal visualization and progress reporting directly from the dispatcher.
- Stores detailed final metrics in a JSON summary file.
**Files Created/Updated:**
- `stress_test_sdk.py`: Main stress testing implementation using `arun_many`.
- `benchmark_report.py`: (Assumed) Report generator for comparing test results.
- `run_benchmark.py`: Test runner script with predefined configurations.
- `run_all.sh`: Simple bash script wrapper for `run_benchmark.py`.
- `USAGE.md`: Comprehensive documentation on usage and interpretation (updated).
**Testing Approach:**
- Creates a controlled, reproducible test environment with a local HTTP server.
- Processes URLs using `arun_many`, allowing the dispatcher to manage concurrency up to `max_sessions`.
- Optionally logs per-batch summaries (when not in streaming mode) after processing chunks.
- Supports different test sizes via `run_benchmark.py` configurations.
- Records memory samples via platform commands for basic trend analysis.
- Includes cleanup functionality for the test environment.
**Challenges:**
- Ensuring proper cleanup of HTTP server processes.
- Getting reliable memory tracking across platforms without adding heavy dependencies (`psutil`) to this specific test script.
- Designing `run_benchmark.py` to correctly pass arguments to `stress_test_sdk.py`.
**Why This Feature:**
The high volume stress testing solution addresses critical needs for ensuring Crawl4AI's `arun_many` reliability:
1. Provides a reproducible way to evaluate performance under concurrent load.
2. Allows testing the dispatcher's concurrency control (`max_session_permit`) and queue management.
3. Enables performance tuning by observing throughput (`URLs/sec`) under different `max_sessions` settings.
4. Creates a controlled environment for testing `arun_many` behavior.
5. Supports continuous integration by providing deterministic test conditions for `arun_many`.
**Design Decisions:**
- Chose local site generation for reproducibility and isolation from network issues.
- Utilized the built-in `CrawlerMonitor` for real-time feedback, leveraging its `rich` integration.
- Implemented optional per-batch logging in `stress_test_sdk.py` (when not streaming) to provide chunk-level summaries alongside the continuous monitor.
- Adopted `arun_many` with a `MemoryAdaptiveDispatcher` as the core mechanism for parallel execution, reflecting the intended SDK usage.
- Created `run_benchmark.py` to simplify running standard test configurations.
- Used `SimpleMemoryTracker` to provide basic memory insights without requiring `psutil` for this particular test runner.
**Future Enhancements to Consider:**
- Create a separate test variant that *does* use `psutil` to specifically stress the memory-adaptive features of the dispatcher.
- Add support for generated JavaScript content.
- Add support for Docker-based testing with explicit memory limits.
- Enhance `benchmark_report.py` to provide more sophisticated analysis of performance and memory trends from the generated JSON/CSV files.
---
## [2025-04-17] Refined Stress Testing System Parameters and Execution
**Changes Made:**
1. Corrected `run_benchmark.py` and `stress_test_sdk.py` to use `--max-sessions` instead of the incorrect `--workers` parameter, accurately reflecting dispatcher configuration.
2. Updated `run_benchmark.py` argument handling to correctly pass all relevant custom parameters (including `--stream`, `--monitor-mode`, etc.) to `stress_test_sdk.py`.
3. (Assuming changes in `benchmark_report.py`) Applied dark theme to benchmark reports for better readability.
4. (Assuming changes in `benchmark_report.py`) Improved visualization code to eliminate matplotlib warnings.
5. Updated `run_benchmark.py` to provide clickable `file://` links to generated reports in the terminal output.
6. Updated `USAGE.md` with comprehensive parameter descriptions reflecting the final script arguments.
7. Updated `run_all.sh` wrapper to correctly invoke `run_benchmark.py` with flexible arguments.
**Details of Changes:**
1. **Parameter Correction (`--max-sessions`)**:
* Identified the fundamental misunderstanding where `--workers` was used incorrectly.
* Refactored `stress_test_sdk.py` to accept `--max-sessions` and configure the `MemoryAdaptiveDispatcher`'s `max_session_permit` accordingly.
* Updated `run_benchmark.py` argument parsing and command construction to use `--max-sessions`.
* Updated `TEST_CONFIGS` in `run_benchmark.py` to use `max_sessions`.
2. **Argument Handling (`run_benchmark.py`)**:
* Improved logic to collect all command-line arguments provided to `run_benchmark.py`.
* Ensured all relevant arguments (like `--stream`, `--monitor-mode`, `--port`, `--use-rate-limiter`, etc.) are correctly forwarded when calling `stress_test_sdk.py` as a subprocess.
3. **Dark Theme & Visualization Fixes (Assumed in `benchmark_report.py`)**:
* (Describes changes assumed to be made in the separate reporting script).
4. **Clickable Links (`run_benchmark.py`)**:
* Added logic to find the latest HTML report and PNG chart in the `benchmark_reports` directory after `benchmark_report.py` runs.
* Used `pathlib` to generate correct `file://` URLs for terminal output.
5. **Documentation Improvements (`USAGE.md`)**:
* Rewrote sections to explain `arun_many`, dispatchers, and `--max-sessions`.
* Updated parameter tables for all scripts (`stress_test_sdk.py`, `run_benchmark.py`).
* Clarified the difference between batch and streaming modes and their effect on logging.
* Updated examples to use correct arguments.
**Files Modified:**
- `stress_test_sdk.py`: Changed `--workers` to `--max-sessions`, added new arguments, used `arun_many`.
- `run_benchmark.py`: Changed argument handling, updated configs, calls `stress_test_sdk.py`.
- `run_all.sh`: Updated to call `run_benchmark.py` correctly.
- `USAGE.md`: Updated documentation extensively.
- `benchmark_report.py`: (Assumed modifications for dark theme and viz fixes).
**Testing:**
- Verified that `--max-sessions` correctly limits concurrency via the `CrawlerMonitor` output.
- Confirmed that custom arguments passed to `run_benchmark.py` are forwarded to `stress_test_sdk.py`.
- Validated clickable links work in supporting terminals.
- Ensured documentation matches the final script parameters and behavior.
**Why These Changes:**
These refinements correct the fundamental approach of the stress test to align with `crawl4ai`'s actual architecture and intended usage:
1. Ensures the test evaluates the correct components (`arun_many`, `MemoryAdaptiveDispatcher`).
2. Makes test configurations more accurate and flexible.
3. Improves the usability of the testing framework through better argument handling and documentation.
**Future Enhancements to Consider:**
- Add support for generated JavaScript content to test JS rendering performance
- Implement more sophisticated memory analysis like generational garbage collection tracking
- Add support for Docker-based testing with memory limits to force OOM conditions
- Create visualization tools for analyzing memory usage patterns across test runs
- Add benchmark comparisons between different crawler versions or configurations
## [2025-04-17] Fixed Issues in Stress Testing System
**Changes Made:**
1. Fixed custom parameter handling in run_benchmark.py
2. Applied dark theme to benchmark reports for better readability
3. Improved visualization code to eliminate matplotlib warnings
4. Added clickable links to generated reports in terminal output
5. Enhanced documentation with comprehensive parameter descriptions
**Details of Changes:**
1. **Custom Parameter Handling Fix**
- Identified bug where custom URL count was being ignored in run_benchmark.py
- Rewrote argument handling to use a custom args dictionary
- Properly passed parameters to the test_simple_stress.py command
- Added better UI indication of custom parameters in use
2. **Dark Theme Implementation**
- Added complete dark theme to HTML benchmark reports
- Applied dark styling to all visualization components
- Used Nord-inspired color palette for charts and graphs
- Improved contrast and readability for data visualization
- Updated text colors and backgrounds for better eye comfort
3. **Matplotlib Warning Fixes**
- Resolved warnings related to improper use of set_xticklabels()
- Implemented correct x-axis positioning for bar charts
- Ensured proper alignment of bar labels and data points
- Updated plotting code to use modern matplotlib practices
4. **Documentation Improvements**
- Created comprehensive USAGE.md with detailed instructions
- Added parameter documentation for all scripts
- Included examples for all common use cases
- Provided detailed explanations for interpreting results
- Added troubleshooting guide for common issues
**Files Modified:**
- `tests/memory/run_benchmark.py`: Fixed custom parameter handling
- `tests/memory/benchmark_report.py`: Added dark theme and fixed visualization warnings
- `tests/memory/run_all.sh`: Added clickable links to reports
- `tests/memory/USAGE.md`: Created comprehensive documentation
**Testing:**
- Verified that custom URL counts are now correctly used
- Confirmed dark theme is properly applied to all report elements
- Checked that matplotlib warnings are no longer appearing
- Validated clickable links to reports work in terminals that support them
**Why These Changes:**
These improvements address several usability issues with the stress testing system:
1. Better parameter handling ensures test configurations work as expected
2. Dark theme reduces eye strain during extended test review sessions
3. Fixing visualization warnings improves code quality and output clarity
4. Enhanced documentation makes the system more accessible for future use
**Future Enhancements:**
- Add additional visualization options for different types of analysis
- Implement theme toggle to support both light and dark preferences
- Add export options for embedding reports in other documentation
- Create dedicated CI/CD integration templates for automated testing
## [2025-04-09] Added MHTML Capture Feature
**Feature:** MHTML snapshot capture of crawled pages
**Changes Made:**
1. Added `capture_mhtml: bool = False` parameter to `CrawlerRunConfig` class
2. Added `mhtml: Optional[str] = None` field to `CrawlResult` model
3. Added `mhtml_data: Optional[str] = None` field to `AsyncCrawlResponse` class
4. Implemented `capture_mhtml()` method in `AsyncPlaywrightCrawlerStrategy` class to capture MHTML via CDP
5. Modified the crawler to capture MHTML when enabled and pass it to the result
**Implementation Details:**
- MHTML capture uses Chrome DevTools Protocol (CDP) via Playwright's CDP session API
- The implementation waits for page to fully load before capturing MHTML content
- Enhanced waiting for JavaScript content with requestAnimationFrame for better JS content capture
- We ensure all browser resources are properly cleaned up after capture
**Files Modified:**
- `crawl4ai/models.py`: Added the mhtml field to CrawlResult
- `crawl4ai/async_configs.py`: Added capture_mhtml parameter to CrawlerRunConfig
- `crawl4ai/async_crawler_strategy.py`: Implemented MHTML capture logic
- `crawl4ai/async_webcrawler.py`: Added mapping from AsyncCrawlResponse.mhtml_data to CrawlResult.mhtml
**Testing:**
- Created comprehensive tests in `tests/20241401/test_mhtml.py` covering:
- Capturing MHTML when enabled
- Ensuring mhtml is None when disabled explicitly
- Ensuring mhtml is None by default
- Capturing MHTML on JavaScript-enabled pages
**Challenges:**
- Had to improve page loading detection to ensure JavaScript content was fully rendered
- Tests needed to be run independently due to Playwright browser instance management
- Modified test expected content to match actual MHTML output
**Why This Feature:**
The MHTML capture feature allows users to capture complete web pages including all resources (CSS, images, etc.) in a single file. This is valuable for:
1. Offline viewing of captured pages
2. Creating permanent snapshots of web content for archival
3. Ensuring consistent content for later analysis, even if the original site changes
**Future Enhancements to Consider:**
- Add option to save MHTML to file
- Support for filtering what resources get included in MHTML
- Add support for specifying MHTML capture options
## [2025-04-10] Added Network Request and Console Message Capturing
**Feature:** Comprehensive capturing of network requests/responses and browser console messages during crawling
**Changes Made:**
1. Added `capture_network_requests: bool = False` and `capture_console_messages: bool = False` parameters to `CrawlerRunConfig` class
2. Added `network_requests: Optional[List[Dict[str, Any]]] = None` and `console_messages: Optional[List[Dict[str, Any]]] = None` fields to both `AsyncCrawlResponse` and `CrawlResult` models
3. Implemented event listeners in `AsyncPlaywrightCrawlerStrategy._crawl_web()` to capture browser network events and console messages
4. Added proper event listener cleanup in the finally block to prevent resource leaks
5. Modified the crawler flow to pass captured data from AsyncCrawlResponse to CrawlResult
**Implementation Details:**
- Network capture uses Playwright event listeners (`request`, `response`, and `requestfailed`) to record all network activity
- Console capture uses Playwright event listeners (`console` and `pageerror`) to record console messages and errors
- Each network event includes metadata like URL, headers, status, and timing information
- Each console message includes type, text content, and source location when available
- All captured events include timestamps for chronological analysis
- Error handling ensures even failed capture attempts won't crash the main crawling process
**Files Modified:**
- `crawl4ai/models.py`: Added new fields to AsyncCrawlResponse and CrawlResult
- `crawl4ai/async_configs.py`: Added new configuration parameters to CrawlerRunConfig
- `crawl4ai/async_crawler_strategy.py`: Implemented capture logic using event listeners
- `crawl4ai/async_webcrawler.py`: Added data transfer from AsyncCrawlResponse to CrawlResult
**Documentation:**
- Created detailed documentation in `docs/md_v2/advanced/network-console-capture.md`
- Added feature to site navigation in `mkdocs.yml`
- Updated CrawlResult documentation in `docs/md_v2/api/crawl-result.md`
- Created comprehensive example in `docs/examples/network_console_capture_example.py`
**Testing:**
- Created `tests/general/test_network_console_capture.py` with tests for:
- Verifying capture is disabled by default
- Testing network request capturing
- Testing console message capturing
- Ensuring both capture types can be enabled simultaneously
- Checking correct content is captured in expected formats
**Challenges:**
- Initial implementation had synchronous/asynchronous mismatches in event handlers
- Needed to fix type of property access vs. method calls in handlers
- Required careful cleanup of event listeners to prevent memory leaks
**Why This Feature:**
The network and console capture feature provides deep visibility into web page activity, enabling:
1. Debugging complex web applications by seeing all network requests and errors
2. Security analysis to detect unexpected third-party requests and data flows
3. Performance profiling to identify slow-loading resources
4. API discovery in single-page applications
5. Comprehensive analysis of web application behavior
**Future Enhancements to Consider:**
- Option to filter captured events by type, domain, or content
- Support for capturing response bodies (with size limits)
- Aggregate statistics calculation for performance metrics
- Integration with visualization tools for network waterfall analysis
- Exporting captures in HAR format for use with external tools

20
LICENSE
View File

@@ -48,22 +48,4 @@ You may add Your own copyright statement to Your modifications and may provide a
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
---
Attribution Requirement
All distributions, publications, or public uses of this software, or derivative works based on this software, must include the following attribution:
"This product includes software developed by UncleCode (https://x.com/unclecode) as part of the Crawl4AI project (https://github.com/unclecode/crawl4ai)."
This attribution must be displayed in a prominent and easily accessible location, such as:
- For software distributions: In a NOTICE file, README file, or equivalent documentation.
- For publications (research papers, articles, blog posts): In the acknowledgments section or a footnote.
- For websites/web applications: In an "About" or "Credits" section.
- For command-line tools: In the help/usage output.
This requirement ensures proper credit is given for the use of Crawl4AI and helps promote the project.
---
END OF TERMS AND CONDITIONS

View File

@@ -1,320 +0,0 @@
# Progressive Web Crawling with Adaptive Information Foraging
## Abstract
This paper presents a novel approach to web crawling that adaptively determines when sufficient information has been gathered to answer a given query. Unlike traditional exhaustive crawling methods, our Progressive Information Sufficiency (PIS) framework uses statistical measures to balance information completeness against crawling efficiency. We introduce a multi-strategy architecture supporting pure statistical, embedding-enhanced, and LLM-assisted approaches, with theoretical guarantees on convergence and practical evaluation methods using synthetic datasets.
## 1. Introduction
Traditional web crawling approaches follow predetermined patterns (breadth-first, depth-first) without consideration for information sufficiency. This work addresses the fundamental question: *"When do we have enough information to answer a query and similar queries in its domain?"*
We formalize this as an optimal stopping problem in information foraging, introducing metrics for coverage, consistency, and saturation that enable crawlers to make intelligent decisions about when to stop crawling and which links to follow.
## 2. Problem Formulation
### 2.1 Definitions
Let:
- **K** = {d₁, d₂, ..., dₙ} be the current knowledge base (crawled documents)
- **Q** be the user query
- **L** = {l₁, l₂, ..., lₘ} be available links with preview metadata
- **θ** be the confidence threshold for information sufficiency
### 2.2 Objectives
1. **Minimize** |K| (number of crawled pages)
2. **Maximize** P(answers(Q) | K) (probability of answering Q given K)
3. **Ensure** coverage of Q's domain (similar queries)
## 3. Mathematical Framework
### 3.1 Information Sufficiency Metric
We define Information Sufficiency as:
```
IS(K, Q) = min(Coverage(K, Q), Consistency(K, Q), 1 - Redundancy(K)) × DomainCoverage(K, Q)
```
### 3.2 Coverage Score
Coverage measures how well current knowledge covers query terms and related concepts:
```
Coverage(K, Q) = Σ(t ∈ Q) log(df(t, K) + 1) × idf(t) / |Q|
```
Where:
- df(t, K) = document frequency of term t in knowledge base K
- idf(t) = inverse document frequency weight
### 3.3 Consistency Score
Consistency measures information coherence across documents:
```
Consistency(K, Q) = 1 - Var(answers from random subsets of K)
```
This captures the principle that sufficient knowledge should provide stable answers regardless of document subset.
### 3.4 Saturation Score
Saturation detects diminishing returns:
```
Saturation(K) = 1 - (ΔInfo(Kₙ) / ΔInfo(K₁))
```
Where ΔInfo represents marginal information gain from the nth crawl.
### 3.5 Link Value Prediction
Expected information gain from uncrawled links:
```
ExpectedGain(l) = Relevance(l, Q) × Novelty(l, K) × Authority(l)
```
Components:
- **Relevance**: BM25(preview_text, Q)
- **Novelty**: 1 - max_similarity(preview, K)
- **Authority**: f(url_structure, domain_metrics)
## 4. Algorithmic Approach
### 4.1 Progressive Crawling Algorithm
```
Algorithm: ProgressiveCrawl(start_url, query, θ)
K ← ∅
crawled ← {start_url}
pending ← extract_links(crawl(start_url))
while IS(K, Q) < θ and |crawled| < max_pages:
candidates ← rank_by_expected_gain(pending, Q, K)
if max(ExpectedGain(candidates)) < min_gain:
break // Diminishing returns
to_crawl ← top_k(candidates)
new_docs ← parallel_crawl(to_crawl)
K ← K new_docs
crawled ← crawled to_crawl
pending ← extract_new_links(new_docs) - crawled
return K
```
### 4.2 Stopping Criteria
Crawling terminates when:
1. IS(K, Q) ≥ θ (sufficient information)
2. d(IS)/d(crawls) < ε (plateau reached)
3. |crawled| ≥ max_pages (resource limit)
4. max(ExpectedGain) < min_gain (no promising links)
## 5. Multi-Strategy Architecture
### 5.1 Strategy Pattern Design
```
AbstractStrategy
├── StatisticalStrategy (no LLM, no embeddings)
├── EmbeddingStrategy (with semantic similarity)
└── LLMStrategy (with language model assistance)
```
### 5.2 Statistical Strategy
Pure statistical approach using:
- BM25 for relevance scoring
- Term frequency analysis for coverage
- Graph structure for authority
- No external models required
**Advantages**: Fast, no API costs, works offline
**Best for**: Technical documentation, specific terminology
### 5.3 Embedding Strategy (Implemented)
Semantic understanding through embeddings:
- Query expansion into semantic variations
- Coverage mapping in embedding space
- Gap-driven link selection
- Validation-based stopping criteria
**Mathematical Framework**:
```
Coverage(K, Q) = mean(max_similarity(q, K) for q in Q_expanded)
Gap(q) = 1 - max_similarity(q, K)
LinkScore(l) = Σ(Gap(q) × relevance(l, q)) × (1 - redundancy(l, K))
```
**Key Parameters**:
- `embedding_k_exp`: Exponential decay factor for distance-to-score mapping
- `embedding_coverage_radius`: Distance threshold for query coverage
- `embedding_min_confidence_threshold`: Minimum relevance threshold
**Advantages**: Semantic understanding, handles ambiguity, detects irrelevance
**Best for**: Research queries, conceptual topics, diverse content
### 5.4 Progressive Enhancement Path
1. **Level 0**: Statistical only (implemented)
2. **Level 1**: + Embeddings for semantic similarity (implemented)
3. **Level 2**: + LLM for query understanding (future)
## 6. Evaluation Methodology
### 6.1 Synthetic Dataset Generation
Using LLM to create evaluation data:
```python
def generate_synthetic_dataset(domain_url):
# 1. Fully crawl domain
full_knowledge = exhaustive_crawl(domain_url)
# 2. Generate answerable queries
queries = llm_generate_queries(full_knowledge)
# 3. Create query variations
for q in queries:
variations = generate_variations(q) # synonyms, sub/super queries
return queries, variations, full_knowledge
```
### 6.2 Evaluation Metrics
1. **Efficiency**: Information gained / Pages crawled
2. **Completeness**: Answerable queries / Total queries
3. **Redundancy**: 1 - (Unique information / Total information)
4. **Convergence Rate**: Pages to 95% completeness
### 6.3 Ablation Studies
- Impact of each score component (coverage, consistency, saturation)
- Sensitivity to threshold parameters
- Performance across different domain types
## 7. Theoretical Properties
### 7.1 Convergence Guarantee
**Theorem**: For finite websites, ProgressiveCrawl converges to IS(K, Q) ≥ θ or exhausts all reachable pages.
**Proof sketch**: IS(K, Q) is monotonically non-decreasing with each crawl, bounded above by 1.
### 7.2 Optimality
Under certain assumptions about link preview accuracy:
- Expected crawls ≤ 2 × optimal_crawls
- Approximation ratio improves with preview quality
## 8. Implementation Design
### 8.1 Core Components
1. **CrawlState**: Maintains crawl history and metrics
2. **AdaptiveConfig**: Configuration parameters
3. **CrawlStrategy**: Pluggable strategy interface
4. **AdaptiveCrawler**: Main orchestrator
### 8.2 Integration with Crawl4AI
- Wraps existing AsyncWebCrawler
- Leverages link preview functionality
- Maintains backward compatibility
### 8.3 Persistence
Knowledge base serialization for:
- Resumable crawls
- Knowledge sharing
- Offline analysis
## 9. Future Directions
### 9.1 Advanced Scoring
- Temporal information value
- Multi-query optimization
- Active learning from user feedback
### 9.2 Distributed Crawling
- Collaborative knowledge building
- Federated information sufficiency
### 9.3 Domain Adaptation
- Transfer learning across domains
- Meta-learning for threshold selection
## 10. Conclusion
Progressive crawling with adaptive information foraging provides a principled approach to efficient web information extraction. By combining coverage, consistency, and saturation metrics, we can determine information sufficiency without ground truth labels. The multi-strategy architecture allows graceful enhancement from pure statistical to LLM-assisted approaches based on requirements and resources.
## References
1. Manning, C. D., Raghavan, P., & Schütze, H. (2008). Introduction to Information Retrieval. Cambridge University Press.
2. Robertson, S., & Zaragoza, H. (2009). The Probabilistic Relevance Framework: BM25 and Beyond. Foundations and Trends in Information Retrieval.
3. Pirolli, P., & Card, S. (1999). Information Foraging. Psychological Review, 106(4), 643-675.
4. Dasgupta, S. (2005). Analysis of a greedy active learning strategy. Advances in Neural Information Processing Systems.
## Appendix A: Implementation Pseudocode
```python
class StatisticalStrategy:
def calculate_confidence(self, state):
coverage = self.calculate_coverage(state)
consistency = self.calculate_consistency(state)
saturation = self.calculate_saturation(state)
return min(coverage, consistency, saturation)
def calculate_coverage(self, state):
# BM25-based term coverage
term_scores = []
for term in state.query.split():
df = state.document_frequencies.get(term, 0)
idf = self.idf_cache.get(term, 1.0)
term_scores.append(log(df + 1) * idf)
return mean(term_scores) / max_possible_score
def rank_links(self, state):
scored_links = []
for link in state.pending_links:
relevance = self.bm25_score(link.preview_text, state.query)
novelty = self.calculate_novelty(link, state.knowledge_base)
authority = self.url_authority(link.href)
score = relevance * novelty * authority
scored_links.append((link, score))
return sorted(scored_links, key=lambda x: x[1], reverse=True)
```
## Appendix B: Evaluation Protocol
1. **Dataset Creation**:
- Select diverse domains (documentation, blogs, e-commerce)
- Generate 100 queries per domain using LLM
- Create query variations (5-10 per query)
2. **Baseline Comparisons**:
- BFS crawler (depth-limited)
- DFS crawler (depth-limited)
- Random crawler
- Oracle (knows relevant pages)
3. **Metrics Collection**:
- Pages crawled vs query answerability
- Time to sufficient confidence
- False positive/negative rates
4. **Statistical Analysis**:
- ANOVA for strategy comparison
- Regression for parameter sensitivity
- Bootstrap for confidence intervals

View File

@@ -1,809 +0,0 @@
# 🚀🤖 Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper.
<div align="center">
<a href="https://trendshift.io/repositories/11716" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11716" alt="unclecode%2Fcrawl4ai | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
[![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers)
[![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members)
[![PyPI version](https://badge.fury.io/py/crawl4ai.svg)](https://badge.fury.io/py/crawl4ai)
[![Python Version](https://img.shields.io/pypi/pyversions/crawl4ai)](https://pypi.org/project/crawl4ai/)
[![Downloads](https://static.pepy.tech/badge/crawl4ai/month)](https://pepy.tech/project/crawl4ai)
[![GitHub Sponsors](https://img.shields.io/github/sponsors/unclecode?style=flat&logo=GitHub-Sponsors&label=Sponsors&color=pink)](https://github.com/sponsors/unclecode)
<p align="center">
<a href="https://x.com/crawl4ai">
<img src="https://img.shields.io/badge/Follow%20on%20X-000000?style=for-the-badge&logo=x&logoColor=white" alt="Follow on X" />
</a>
<a href="https://www.linkedin.com/company/crawl4ai">
<img src="https://img.shields.io/badge/Follow%20on%20LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white" alt="Follow on LinkedIn" />
</a>
<a href="https://discord.gg/jP8KfhDhyN">
<img src="https://img.shields.io/badge/Join%20our%20Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join our Discord" />
</a>
</p>
</div>
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
[✨ Check out latest update v0.7.0](#-recent-updates)
🎉 **Version 0.7.0 is now available!** The Adaptive Intelligence Update introduces groundbreaking features: Adaptive Crawling that learns website patterns, Virtual Scroll support for infinite pages, intelligent Link Preview with 3-layer scoring, Async URL Seeder for massive discovery, and significant performance improvements. [Read the release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.0.md)
<details>
<summary>🤓 <strong>My Personal Story</strong></summary>
My journey with computers started in childhood when my dad, a computer scientist, introduced me to an Amstrad computer. Those early days sparked a fascination with technology, leading me to pursue computer science and specialize in NLP during my postgraduate studies. It was during this time that I first delved into web crawling, building tools to help researchers organize papers and extract information from publications a challenging yet rewarding experience that honed my skills in data extraction.
Fast forward to 2023, I was working on a tool for a project and needed a crawler to convert a webpage into markdown. While exploring solutions, I found one that claimed to be open-source but required creating an account and generating an API token. Worse, it turned out to be a SaaS model charging $16, and its quality didnt meet my standards. Frustrated, I realized this was a deeper problem. That frustration turned into turbo anger mode, and I decided to build my own solution. In just a few days, I created Crawl4AI. To my surprise, it went viral, earning thousands of GitHub stars and resonating with a global community.
I made Crawl4AI open-source for two reasons. First, its my way of giving back to the open-source community that has supported me throughout my career. Second, I believe data should be accessible to everyone, not locked behind paywalls or monopolized by a few. Open access to data lays the foundation for the democratization of AI, a vision where individuals can train their own models and take ownership of their information. This library is the first step in a larger journey to create the best open-source data extraction and generation tool the world has ever seen, built collaboratively by a passionate community.
Thank you to everyone who has supported this project, used it, and shared feedback. Your encouragement motivates me to dream even bigger. Join us, file issues, submit PRs, or spread the word. Together, we can build a tool that truly empowers people to access their own data and reshape the future of AI.
</details>
## 🧐 Why Crawl4AI?
1. **Built for LLMs**: Creates smart, concise Markdown optimized for RAG and fine-tuning applications.
2. **Lightning Fast**: Delivers results faster with real-time, cost-efficient performance.
3. **Flexible Browser Control**: Offers session management, proxies, and custom hooks for seamless data access.
4. **Heuristic Intelligence**: Uses advanced algorithms for efficient extraction, reducing reliance on costly models.
5. **Open Source & Deployable**: Fully open-source with no API keys—ready for Docker and cloud integration.
6. **Thriving Community**: Actively maintained by a vibrant community and the #1 trending GitHub repository.
## 🚀 Quick Start
1. Install Crawl4AI:
```bash
# Install the package
pip install -U crawl4ai
# For pre release versions
pip install crawl4ai --pre
# Run post-installation setup
crawl4ai-setup
# Verify your installation
crawl4ai-doctor
```
If you encounter any browser-related issues, you can install them manually:
```bash
python -m playwright install --with-deps chromium
```
2. Run a simple web crawl with Python:
```python
import asyncio
from crawl4ai import *
async def main():
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url="https://www.nbcnews.com/business",
)
print(result.markdown)
if __name__ == "__main__":
asyncio.run(main())
```
3. Or use the new command-line interface:
```bash
# Basic crawl with markdown output
crwl https://www.nbcnews.com/business -o markdown
# Deep crawl with BFS strategy, max 10 pages
crwl https://docs.crawl4ai.com --deep-crawl bfs --max-pages 10
# Use LLM extraction with a specific question
crwl https://www.example.com/products -q "Extract all product prices"
```
## ✨ Features
<details>
<summary>📝 <strong>Markdown Generation</strong></summary>
- 🧹 **Clean Markdown**: Generates clean, structured Markdown with accurate formatting.
- 🎯 **Fit Markdown**: Heuristic-based filtering to remove noise and irrelevant parts for AI-friendly processing.
- 🔗 **Citations and References**: Converts page links into a numbered reference list with clean citations.
- 🛠️ **Custom Strategies**: Users can create their own Markdown generation strategies tailored to specific needs.
- 📚 **BM25 Algorithm**: Employs BM25-based filtering for extracting core information and removing irrelevant content.
</details>
<details>
<summary>📊 <strong>Structured Data Extraction</strong></summary>
- 🤖 **LLM-Driven Extraction**: Supports all LLMs (open-source and proprietary) for structured data extraction.
- 🧱 **Chunking Strategies**: Implements chunking (topic-based, regex, sentence-level) for targeted content processing.
- 🌌 **Cosine Similarity**: Find relevant content chunks based on user queries for semantic extraction.
- 🔎 **CSS-Based Extraction**: Fast schema-based data extraction using XPath and CSS selectors.
- 🔧 **Schema Definition**: Define custom schemas for extracting structured JSON from repetitive patterns.
</details>
<details>
<summary>🌐 <strong>Browser Integration</strong></summary>
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
- 👤 **Browser Profiler**: Create and manage persistent profiles with saved authentication states, cookies, and settings.
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
- 🌍 **Multi-Browser Support**: Compatible with Chromium, Firefox, and WebKit.
- 📐 **Dynamic Viewport Adjustment**: Automatically adjusts the browser viewport to match page content, ensuring complete rendering and capturing of all elements.
</details>
<details>
<summary>🔎 <strong>Crawling & Scraping</strong></summary>
- 🖼️ **Media Support**: Extract images, audio, videos, and responsive image formats like `srcset` and `picture`.
- 🚀 **Dynamic Crawling**: Execute JS and wait for async or sync for dynamic content extraction.
- 📸 **Screenshots**: Capture page screenshots during crawling for debugging or analysis.
- 📂 **Raw Data Crawling**: Directly process raw HTML (`raw:`) or local files (`file://`).
- 🔗 **Comprehensive Link Extraction**: Extracts internal, external links, and embedded iframe content.
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior.
- 💾 **Caching**: Cache data for improved speed and to avoid redundant fetches.
- 📄 **Metadata Extraction**: Retrieve structured metadata from web pages.
- 📡 **IFrame Content Extraction**: Seamless extraction from embedded iframe content.
- 🕵️ **Lazy Load Handling**: Waits for images to fully load, ensuring no content is missed due to lazy loading.
- 🔄 **Full-Page Scanning**: Simulates scrolling to load and capture all dynamic content, perfect for infinite scroll pages.
</details>
<details>
<summary>🚀 <strong>Deployment</strong></summary>
- 🐳 **Dockerized Setup**: Optimized Docker image with FastAPI server for easy deployment.
- 🔑 **Secure Authentication**: Built-in JWT token authentication for API security.
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
- ☁️ **Cloud Deployment**: Ready-to-deploy configurations for major cloud platforms.
</details>
<details>
<summary>🎯 <strong>Additional Features</strong></summary>
- 🕶️ **Stealth Mode**: Avoid bot detection by mimicking real users.
- 🏷️ **Tag-Based Content Extraction**: Refine crawling based on custom tags, headers, or metadata.
- 🔗 **Link Analysis**: Extract and analyze all links for detailed data exploration.
- 🛡️ **Error Handling**: Robust error management for seamless execution.
- 🔐 **CORS & Static Serving**: Supports filesystem-based caching and cross-origin requests.
- 📖 **Clear Documentation**: Simplified and updated guides for onboarding and advanced usage.
- 🙌 **Community Recognition**: Acknowledges contributors and pull requests for transparency.
</details>
## Try it Now!
✨ Play around with this [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1SgRPrByQLzjRfwoRNq1wSGE9nYY_EE8C?usp=sharing)
✨ Visit our [Documentation Website](https://docs.crawl4ai.com/)
## Installation 🛠️
Crawl4AI offers flexible installation options to suit various use cases. You can install it as a Python package or use Docker.
<details>
<summary>🐍 <strong>Using pip</strong></summary>
Choose the installation option that best fits your needs:
### Basic Installation
For basic web crawling and scraping tasks:
```bash
pip install crawl4ai
crawl4ai-setup # Setup the browser
```
By default, this will install the asynchronous version of Crawl4AI, using Playwright for web crawling.
👉 **Note**: When you install Crawl4AI, the `crawl4ai-setup` should automatically install and set up Playwright. However, if you encounter any Playwright-related errors, you can manually install it using one of these methods:
1. Through the command line:
```bash
playwright install
```
2. If the above doesn't work, try this more specific command:
```bash
python -m playwright install chromium
```
This second method has proven to be more reliable in some cases.
---
### Installation with Synchronous Version
The sync version is deprecated and will be removed in future versions. If you need the synchronous version using Selenium:
```bash
pip install crawl4ai[sync]
```
---
### Development Installation
For contributors who plan to modify the source code:
```bash
git clone https://github.com/unclecode/crawl4ai.git
cd crawl4ai
pip install -e . # Basic installation in editable mode
```
Install optional features:
```bash
pip install -e ".[torch]" # With PyTorch features
pip install -e ".[transformer]" # With Transformer features
pip install -e ".[cosine]" # With cosine similarity features
pip install -e ".[sync]" # With synchronous crawling (Selenium)
pip install -e ".[all]" # Install all optional features
```
</details>
<details>
<summary>🐳 <strong>Docker Deployment</strong></summary>
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
### New Docker Features
The new Docker implementation includes:
- **Browser pooling** with page pre-warming for faster response times
- **Interactive playground** to test and generate request code
- **MCP integration** for direct connection to AI tools like Claude Code
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
- **Optimized resources** with improved memory management
### Getting Started
```bash
# Pull and run the latest release candidate
docker pull unclecode/crawl4ai:0.7.0
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:0.7.0
# Visit the playground at http://localhost:11235/playground
```
For complete documentation, see our [Docker Deployment Guide](https://docs.crawl4ai.com/core/docker-deployment/).
</details>
---
### Quick Test
Run a quick test (works for both Docker options):
```python
import requests
# Submit a crawl job
response = requests.post(
"http://localhost:11235/crawl",
json={"urls": ["https://example.com"], "priority": 10}
)
if response.status_code == 200:
print("Crawl job submitted successfully.")
if "results" in response.json():
results = response.json()["results"]
print("Crawl job completed. Results:")
for result in results:
print(result)
else:
task_id = response.json()["task_id"]
print(f"Crawl job submitted. Task ID:: {task_id}")
result = requests.get(f"http://localhost:11235/task/{task_id}")
```
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
</details>
## 🔬 Advanced Usage Examples 🔬
You can check the project structure in the directory [docs/examples](https://github.com/unclecode/crawl4ai/tree/main/docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
<details>
<summary>📝 <strong>Heuristic Markdown Generation with Clean and Fit Markdown</strong></summary>
```python
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.content_filter_strategy import PruningContentFilter, BM25ContentFilter
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
async def main():
browser_config = BrowserConfig(
headless=True,
verbose=True,
)
run_config = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED,
markdown_generator=DefaultMarkdownGenerator(
content_filter=PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
),
# markdown_generator=DefaultMarkdownGenerator(
# content_filter=BM25ContentFilter(user_query="WHEN_WE_FOCUS_BASED_ON_A_USER_QUERY", bm25_threshold=1.0)
# ),
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url="https://docs.micronaut.io/4.7.6/guide/",
config=run_config
)
print(len(result.markdown.raw_markdown))
print(len(result.markdown.fit_markdown))
if __name__ == "__main__":
asyncio.run(main())
```
</details>
<details>
<summary>🖥️ <strong>Executing JavaScript & Extract Structured Data without LLMs</strong></summary>
```python
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai import JsonCssExtractionStrategy
import json
async def main():
schema = {
"name": "KidoCode Courses",
"baseSelector": "section.charge-methodology .w-tab-content > div",
"fields": [
{
"name": "section_title",
"selector": "h3.heading-50",
"type": "text",
},
{
"name": "section_description",
"selector": ".charge-content",
"type": "text",
},
{
"name": "course_name",
"selector": ".text-block-93",
"type": "text",
},
{
"name": "course_description",
"selector": ".course-content-text",
"type": "text",
},
{
"name": "course_icon",
"selector": ".image-92",
"type": "attribute",
"attribute": "src"
}
}
}
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
browser_config = BrowserConfig(
headless=False,
verbose=True
)
run_config = CrawlerRunConfig(
extraction_strategy=extraction_strategy,
js_code=["""(async () => {const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");for(let tab of tabs) {tab.scrollIntoView();tab.click();await new Promise(r => setTimeout(r, 500));}})();"""],
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url="https://www.kidocode.com/degrees/technology",
config=run_config
)
companies = json.loads(result.extracted_content)
print(f"Successfully extracted {len(companies)} companies")
print(json.dumps(companies[0], indent=2))
if __name__ == "__main__":
asyncio.run(main())
```
</details>
<details>
<summary>📚 <strong>Extracting Structured Data with LLMs</strong></summary>
```python
import os
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
from crawl4ai import LLMExtractionStrategy
from pydantic import BaseModel, Field
class OpenAIModelFee(BaseModel):
model_name: str = Field(..., description="Name of the OpenAI model.")
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
output_fee: str = Field(..., description="Fee for output token for the OpenAI model.")
async def main():
browser_config = BrowserConfig(verbose=True)
run_config = CrawlerRunConfig(
word_count_threshold=1,
extraction_strategy=LLMExtractionStrategy(
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
# provider="ollama/qwen2", api_token="no-token",
llm_config = LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
schema=OpenAIModelFee.schema(),
extraction_type="schema",
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
Do not miss any models in the entire content. One extracted model JSON format should look like this:
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
),
cache_mode=CacheMode.BYPASS,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(
url='https://openai.com/api/pricing/',
config=run_config
)
print(result.extracted_content)
if __name__ == "__main__":
asyncio.run(main())
```
</details>
<details>
<summary>🤖 <strong>Using Your own Browser with Custom User Profile</strong></summary>
```python
import os, sys
from pathlib import Path
import asyncio, time
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
async def test_news_crawl():
# Create a persistent user data directory
user_data_dir = os.path.join(Path.home(), ".crawl4ai", "browser_profile")
os.makedirs(user_data_dir, exist_ok=True)
browser_config = BrowserConfig(
verbose=True,
headless=True,
user_data_dir=user_data_dir,
use_persistent_context=True,
)
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler(config=browser_config) as crawler:
url = "ADDRESS_OF_A_CHALLENGING_WEBSITE"
result = await crawler.arun(
url,
config=run_config,
magic=True,
)
print(f"Successfully crawled {url}")
print(f"Content length: {len(result.markdown)}")
```
</details>
## ✨ Recent Updates
### Version 0.7.0 Release Highlights - The Adaptive Intelligence Update
- **🧠 Adaptive Crawling**: Your crawler now learns and adapts to website patterns automatically:
```python
config = AdaptiveConfig(
confidence_threshold=0.7, # Min confidence to stop crawling
max_depth=5, # Maximum crawl depth
max_pages=20, # Maximum number of pages to crawl
strategy="statistical"
)
async with AsyncWebCrawler() as crawler:
adaptive_crawler = AdaptiveCrawler(crawler, config)
state = await adaptive_crawler.digest(
start_url="https://news.example.com",
query="latest news content"
)
# Crawler learns patterns and improves extraction over time
```
- **🌊 Virtual Scroll Support**: Complete content extraction from infinite scroll pages:
```python
scroll_config = VirtualScrollConfig(
container_selector="[data-testid='feed']",
scroll_count=20,
scroll_by="container_height",
wait_after_scroll=1.0
)
result = await crawler.arun(url, config=CrawlerRunConfig(
virtual_scroll_config=scroll_config
))
```
- **🔗 Intelligent Link Analysis**: 3-layer scoring system for smart link prioritization:
```python
link_config = LinkPreviewConfig(
query="machine learning tutorials",
score_threshold=0.3,
concurrent_requests=10
)
result = await crawler.arun(url, config=CrawlerRunConfig(
link_preview_config=link_config,
score_links=True
))
# Links ranked by relevance and quality
```
- **🎣 Async URL Seeder**: Discover thousands of URLs in seconds:
```python
seeder = AsyncUrlSeeder(SeedingConfig(
source="sitemap+cc",
pattern="*/blog/*",
query="python tutorials",
score_threshold=0.4
))
urls = await seeder.discover("https://example.com")
```
- **⚡ Performance Boost**: Up to 3x faster with optimized resource handling and memory efficiency
Read the full details in our [0.7.0 Release Notes](https://docs.crawl4ai.com/blog/release-v0.7.0) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
## Version Numbering in Crawl4AI
Crawl4AI follows standard Python version numbering conventions (PEP 440) to help users understand the stability and features of each release.
### Version Numbers Explained
Our version numbers follow this pattern: `MAJOR.MINOR.PATCH` (e.g., 0.4.3)
#### Pre-release Versions
We use different suffixes to indicate development stages:
- `dev` (0.4.3dev1): Development versions, unstable
- `a` (0.4.3a1): Alpha releases, experimental features
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
- `rc` (0.4.3): Release candidates, potential final version
#### Installation
- Regular installation (stable version):
```bash
pip install -U crawl4ai
```
- Install pre-release versions:
```bash
pip install crawl4ai --pre
```
- Install specific version:
```bash
pip install crawl4ai==0.4.3b1
```
#### Why Pre-releases?
We use pre-releases to:
- Test new features in real-world scenarios
- Gather feedback before final releases
- Ensure stability for production users
- Allow early adopters to try new features
For production environments, we recommend using the stable version. For testing new features, you can opt-in to pre-releases using the `--pre` flag.
## 📖 Documentation & Roadmap
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://docs.crawl4ai.com/).
To check our development plans and upcoming features, visit our [Roadmap](https://github.com/unclecode/crawl4ai/blob/main/ROADMAP.md).
<details>
<summary>📈 <strong>Development TODOs</strong></summary>
- [x] 0. Graph Crawler: Smart website traversal using graph search algorithms for comprehensive nested page extraction
- [ ] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
- [ ] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
- [ ] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
- [ ] 4. Automated Schema Generator: Convert natural language to extraction schemas
- [ ] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
- [ ] 6. Web Embedding Index: Semantic search infrastructure for crawled content
- [ ] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
- [ ] 8. Performance Monitor: Real-time insights into crawler operations
- [ ] 9. Cloud Integration: One-click deployment solutions across cloud providers
- [ ] 10. Sponsorship Program: Structured support system with tiered benefits
- [ ] 11. Educational Content: "How to Crawl" video series and interactive tutorials
</details>
## 🤝 Contributing
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTORS.md) for more information.
I'll help modify the license section with badges. For the halftone effect, here's a version with it:
Here's the updated license section:
## 📄 License & Attribution
This project is licensed under the Apache License 2.0, attribution is recommended via the badges below. See the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) file for details.
### Attribution Requirements
When using Crawl4AI, you must include one of the following attribution methods:
#### 1. Badge Attribution (Recommended)
Add one of these badges to your README, documentation, or website:
| Theme | Badge |
|-------|-------|
| **Disco Theme (Animated)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Night Theme (Dark with Neon)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Dark Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Light Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/></a> |
HTML code for adding the badges:
```html
<!-- Disco Theme (Animated) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Night Theme (Dark with Neon) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Dark Theme (Classic) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Light Theme (Classic) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Simple Shield Badge -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://img.shields.io/badge/Powered%20by-Crawl4AI-blue?style=flat-square" alt="Powered by Crawl4AI"/>
</a>
```
#### 2. Text Attribution
Add this line to your documentation:
```
This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
```
## 📚 Citation
If you use Crawl4AI in your research or project, please cite:
```bibtex
@software{crawl4ai2024,
author = {UncleCode},
title = {Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper},
year = {2024},
publisher = {GitHub},
journal = {GitHub Repository},
howpublished = {\url{https://github.com/unclecode/crawl4ai}},
commit = {Please use the commit hash you're working with}
}
```
Text citation format:
```
UncleCode. (2024). Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper [Computer software].
GitHub. https://github.com/unclecode/crawl4ai
```
## 📧 Contact
For questions, suggestions, or feedback, feel free to reach out:
- GitHub: [unclecode](https://github.com/unclecode)
- Twitter: [@unclecode](https://twitter.com/unclecode)
- Website: [crawl4ai.com](https://crawl4ai.com)
Happy Crawling! 🕸️🚀
## 💖 Support Crawl4AI
> 🎉 **Sponsorship Program Just Launched!** Be among the first 50 **Founding Sponsors** and get permanent recognition in our Hall of Fame!
Crawl4AI is the #1 trending open-source web crawler with 51K+ stars. Your support ensures we stay independent, innovative, and free forever.
<div align="center">
[![Become a Sponsor](https://img.shields.io/badge/Become%20a%20Sponsor-pink?style=for-the-badge&logo=github-sponsors&logoColor=white)](https://github.com/sponsors/unclecode)
[![Current Sponsors](https://img.shields.io/github/sponsors/unclecode?style=for-the-badge&logo=github&label=Current%20Sponsors&color=green)](https://github.com/sponsors/unclecode)
</div>
### 🤝 Sponsorship Tiers
- **🌱 Believer ($5/mo)**: Join the movement for data democratization
- **🚀 Builder ($50/mo)**: Get priority support and early feature access
- **💼 Growing Team ($500/mo)**: Bi-weekly syncs and optimization help
- **🏢 Data Infrastructure Partner ($2000/mo)**: Full partnership with dedicated support
**Why sponsor?** Every tier includes real benefits. No more rate-limited APIs. Own your data pipeline. Build data sovereignty together.
[View All Tiers & Benefits →](https://github.com/sponsors/unclecode)
### 🏆 Our Sponsors
#### 👑 Founding Sponsors (First 50)
*Be part of history - [Become a Founding Sponsor](https://github.com/sponsors/unclecode)*
<!-- Founding sponsors will be permanently recognized here -->
#### Current Sponsors
Thank you to all our sponsors who make this project possible!
<!-- Sponsors will be automatically added here -->
## 🗾 Mission
Our mission is to unlock the value of personal and enterprise data by transforming digital footprints into structured, tradeable assets. Crawl4AI empowers individuals and organizations with open-source tools to extract and structure data, fostering a shared data economy.
We envision a future where AI is powered by real human knowledge, ensuring data creators directly benefit from their contributions. By democratizing data and enabling ethical sharing, we are laying the foundation for authentic AI advancement.
<details>
<summary>🔑 <strong>Key Opportunities</strong></summary>
- **Data Capitalization**: Transform digital footprints into measurable, valuable assets.
- **Authentic AI Data**: Provide AI systems with real human insights.
- **Shared Economy**: Create a fair data marketplace that benefits data creators.
</details>
<details>
<summary>🚀 <strong>Development Pathway</strong></summary>
1. **Open-Source Tools**: Community-driven platforms for transparent data extraction.
2. **Digital Asset Structuring**: Tools to organize and value digital knowledge.
3. **Ethical Data Marketplace**: A secure, fair platform for exchanging structured data.
For more details, see our [full mission statement](./MISSION.md).
</details>
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=unclecode/crawl4ai&type=Date)](https://star-history.com/#unclecode/crawl4ai&Date)

435
README.md
View File

@@ -10,48 +10,40 @@
[![PyPI version](https://badge.fury.io/py/crawl4ai.svg)](https://badge.fury.io/py/crawl4ai)
[![Python Version](https://img.shields.io/pypi/pyversions/crawl4ai)](https://pypi.org/project/crawl4ai/)
[![Downloads](https://static.pepy.tech/badge/crawl4ai/month)](https://pepy.tech/project/crawl4ai)
[![GitHub Sponsors](https://img.shields.io/github/sponsors/unclecode?style=flat&logo=GitHub-Sponsors&label=Sponsors&color=pink)](https://github.com/sponsors/unclecode)
<p align="center">
<a href="https://x.com/crawl4ai">
<img src="https://img.shields.io/badge/Follow%20on%20X-000000?style=for-the-badge&logo=x&logoColor=white" alt="Follow on X" />
</a>
<a href="https://www.linkedin.com/company/crawl4ai">
<img src="https://img.shields.io/badge/Follow%20on%20LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white" alt="Follow on LinkedIn" />
</a>
<a href="https://discord.gg/jP8KfhDhyN">
<img src="https://img.shields.io/badge/Join%20our%20Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join our Discord" />
</a>
</p>
<!-- [![Documentation Status](https://readthedocs.org/projects/crawl4ai/badge/?version=latest)](https://crawl4ai.readthedocs.io/) -->
[![License](https://img.shields.io/github/license/unclecode/crawl4ai)](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![Security: bandit](https://img.shields.io/badge/security-bandit-yellow.svg)](https://github.com/PyCQA/bandit)
</div>
Crawl4AI turns the web into clean, LLM ready Markdown for RAG, agents, and data pipelines. Fast, controllable, battle tested by a 50k+ star community.
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
[✨ Check out latest update v0.7.3](#-recent-updates)
[✨ Check out latest update v0.4.3bx](#-recent-updates)
✨ New in v0.7.3: Undetected Browser Support, Multi-URL Configurations, Memory Monitoring, Enhanced Table Extraction, GitHub Sponsors. [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.3.md)
🎉 **Version 0.4.3bx is out!** This release brings exciting new features like a Memory Dispatcher System, Streaming Support, LLM-Powered Markdown Generation, Schema Generation, and Robots.txt Compliance! [Read the release notes →](https://docs.crawl4ai.com/blog)
<details>
<summary>🤓 <strong>My Personal Story</strong></summary>
<summary>🤓 <strong>My Personal Story</strong></summary>
I grew up on an Amstrad, thanks to my dad, and never stopped building. In grad school I specialized in NLP and built crawlers for research. Thats where I learned how much extraction matters.
My journey with computers started in childhood when my dad, a computer scientist, introduced me to an Amstrad computer. Those early days sparked a fascination with technology, leading me to pursue computer science and specialize in NLP during my postgraduate studies. It was during this time that I first delved into web crawling, building tools to help researchers organize papers and extract information from publications a challenging yet rewarding experience that honed my skills in data extraction.
In 2023, I needed web-to-Markdown. The “open source” option wanted an account, API token, and $16, and still under-delivered. I went turbo anger mode, built Crawl4AI in days, and it went viral. Now its the most-starred crawler on GitHub.
Fast forward to 2023, I was working on a tool for a project and needed a crawler to convert a webpage into markdown. While exploring solutions, I found one that claimed to be open-source but required creating an account and generating an API token. Worse, it turned out to be a SaaS model charging $16, and its quality didnt meet my standards. Frustrated, I realized this was a deeper problem. That frustration turned into turbo anger mode, and I decided to build my own solution. In just a few days, I created Crawl4AI. To my surprise, it went viral, earning thousands of GitHub stars and resonating with a global community.
I made it open source for **availability**, anyone can use it without a gate. Now Im building the platform for **affordability**, anyone can run serious crawls without breaking the bank. If that resonates, join in, send feedback, or just crawl something amazing.
I made Crawl4AI open-source for two reasons. First, its my way of giving back to the open-source community that has supported me throughout my career. Second, I believe data should be accessible to everyone, not locked behind paywalls or monopolized by a few. Open access to data lays the foundation for the democratization of AI, a vision where individuals can train their own models and take ownership of their information. This library is the first step in a larger journey to create the best open-source data extraction and generation tool the world has ever seen, built collaboratively by a passionate community.
Thank you to everyone who has supported this project, used it, and shared feedback. Your encouragement motivates me to dream even bigger. Join us, file issues, submit PRs, or spread the word. Together, we can build a tool that truly empowers people to access their own data and reshape the future of AI.
</details>
## 🧐 Why Crawl4AI?
<details>
<summary>Why developers pick Crawl4AI</summary>
- **LLM ready output**, smart Markdown with headings, tables, code, citation hints
- **Fast in practice**, async browser pool, caching, minimal hops
- **Full control**, sessions, proxies, cookies, user scripts, hooks
- **Adaptive intelligence**, learns site patterns, explores only what matters
- **Deploy anywhere**, zero keys, CLI and Docker, cloud friendly
</details>
1. **Built for LLMs**: Creates smart, concise Markdown optimized for RAG and fine-tuning applications.
2. **Lightning Fast**: Delivers results 6x faster with real-time, cost-efficient performance.
3. **Flexible Browser Control**: Offers session management, proxies, and custom hooks for seamless data access.
4. **Heuristic Intelligence**: Uses advanced algorithms for efficient extraction, reducing reliance on costly models.
5. **Open Source & Deployable**: Fully open-source with no API keys—ready for Docker and cloud integration.
6. **Thriving Community**: Actively maintained by a vibrant community and the #1 trending GitHub repository.
## 🚀 Quick Start
@@ -75,7 +67,7 @@ If you encounter any browser-related issues, you can install them manually:
python -m playwright install --with-deps chromium
```
2. Run a simple web crawl with Python:
2. Run a simple web crawl:
```python
import asyncio
from crawl4ai import *
@@ -91,45 +83,6 @@ if __name__ == "__main__":
asyncio.run(main())
```
3. Or use the new command-line interface:
```bash
# Basic crawl with markdown output
crwl https://www.nbcnews.com/business -o markdown
# Deep crawl with BFS strategy, max 10 pages
crwl https://docs.crawl4ai.com --deep-crawl bfs --max-pages 10
# Use LLM extraction with a specific question
crwl https://www.example.com/products -q "Extract all product prices"
```
## 💖 Support Crawl4AI
> 🎉 **Sponsorship Program Now Open!** After powering 51K+ developers and 1 year of growth, Crawl4AI is launching dedicated support for **startups** and **enterprises**. Be among the first 50 **Founding Sponsors** for permanent recognition in our Hall of Fame.
Crawl4AI is the #1 trending open-source web crawler on GitHub. Your support keeps it independent, innovative, and free for the community — while giving you direct access to premium benefits.
<div align="">
[![Become a Sponsor](https://img.shields.io/badge/Become%20a%20Sponsor-pink?style=for-the-badge&logo=github-sponsors&logoColor=white)](https://github.com/sponsors/unclecode)
[![Current Sponsors](https://img.shields.io/github/sponsors/unclecode?style=for-the-badge&logo=github&label=Current%20Sponsors&color=green)](https://github.com/sponsors/unclecode)
</div>
### 🤝 Sponsorship Tiers
- **🌱 Believer ($5/mo)** — Join the movement for data democratization
- **🚀 Builder ($50/mo)** — Priority support & early access to features
- **💼 Growing Team ($500/mo)** — Bi-weekly syncs & optimization help
- **🏢 Data Infrastructure Partner ($2000/mo)** — Full partnership with dedicated support
*Custom arrangements available - see [SPONSORS.md](SPONSORS.md) for details & contact*
**Why sponsor?**
No rate-limited APIs. No lock-in. Build and own your data pipeline with direct guidance from the creator of Crawl4AI.
[See All Tiers & Benefits →](https://github.com/sponsors/unclecode)
## ✨ Features
<details>
@@ -158,7 +111,6 @@ No rate-limited APIs. No lock-in. Build and own your data pipeline with direct g
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
- 👤 **Browser Profiler**: Create and manage persistent profiles with saved authentication states, cookies, and settings.
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
@@ -187,11 +139,10 @@ No rate-limited APIs. No lock-in. Build and own your data pipeline with direct g
<details>
<summary>🚀 <strong>Deployment</strong></summary>
- 🐳 **Dockerized Setup**: Optimized Docker image with FastAPI server for easy deployment.
- 🔑 **Secure Authentication**: Built-in JWT token authentication for API security.
- 🐳 **Dockerized Setup**: Optimized Docker image with API server for easy deployment.
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
- **Cloud Deployment**: Ready-to-deploy configurations for major cloud platforms.
- **DigitalOcean Deployment**: Ready-to-deploy configurations for DigitalOcean and similar platforms.
</details>
@@ -287,27 +238,28 @@ pip install -e ".[all]" # Install all optional features
<details>
<summary>🐳 <strong>Docker Deployment</strong></summary>
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
> 🚀 **Major Changes Coming!** We're developing a completely new Docker implementation that will make deployment even more efficient and seamless. The current Docker setup is being deprecated in favor of this new solution.
### New Docker Features
### Current Docker Support
The new Docker implementation includes:
- **Browser pooling** with page pre-warming for faster response times
- **Interactive playground** to test and generate request code
- **MCP integration** for direct connection to AI tools like Claude Code
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
- **Optimized resources** with improved memory management
The existing Docker implementation is being deprecated and will be replaced soon. If you still need to use Docker with the current version:
### Getting Started
- 📚 [Deprecated Docker Setup](./docs/deprecated/docker-deployment.md) - Instructions for the current Docker implementation
- ⚠️ Note: This setup will be replaced in the next major release
```bash
# Pull and run the latest release candidate
docker pull unclecode/crawl4ai:0.7.0
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:0.7.0
### What's Coming Next?
# Visit the playground at http://localhost:11235/playground
```
Our new Docker implementation will bring:
- Improved performance and resource efficiency
- Streamlined deployment process
- Better integration with Crawl4AI features
- Enhanced scalability options
Stay connected with our [GitHub repository](https://github.com/unclecode/crawl4ai) for updates!
</details>
---
### Quick Test
@@ -319,31 +271,22 @@ import requests
# Submit a crawl job
response = requests.post(
"http://localhost:11235/crawl",
json={"urls": ["https://example.com"], "priority": 10}
json={"urls": "https://example.com", "priority": 10}
)
if response.status_code == 200:
print("Crawl job submitted successfully.")
if "results" in response.json():
results = response.json()["results"]
print("Crawl job completed. Results:")
for result in results:
print(result)
else:
task_id = response.json()["task_id"]
print(f"Crawl job submitted. Task ID:: {task_id}")
result = requests.get(f"http://localhost:11235/task/{task_id}")
task_id = response.json()["task_id"]
# Continue polling until the task is complete (status="completed")
result = requests.get(f"http://localhost:11235/task/{task_id}")
```
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
</details>
---
## 🔬 Advanced Usage Examples 🔬
You can check the project structure in the directory [docs/examples](https://github.com/unclecode/crawl4ai/tree/main/docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
You can check the project structure in the directory [https://github.com/unclecode/crawl4ai/docs/examples](docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
<details>
<summary>📝 <strong>Heuristic Markdown Generation with Clean and Fit Markdown</strong></summary>
@@ -374,8 +317,9 @@ async def main():
url="https://docs.micronaut.io/4.7.6/guide/",
config=run_config
)
print(len(result.markdown.raw_markdown))
print(len(result.markdown.fit_markdown))
print(len(result.markdown))
print(len(result.fit_markdown))
print(len(result.markdown_v2.fit_markdown))
if __name__ == "__main__":
asyncio.run(main())
@@ -389,7 +333,7 @@ if __name__ == "__main__":
```python
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai import JsonCssExtractionStrategy
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
import json
async def main():
@@ -462,8 +406,8 @@ if __name__ == "__main__":
```python
import os
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
from crawl4ai import LLMExtractionStrategy
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.extraction_strategy import LLMExtractionStrategy
from pydantic import BaseModel, Field
class OpenAIModelFee(BaseModel):
@@ -478,7 +422,7 @@ async def main():
extraction_strategy=LLMExtractionStrategy(
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
# provider="ollama/qwen2", api_token="no-token",
llm_config = LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'),
schema=OpenAIModelFee.schema(),
extraction_type="schema",
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
@@ -502,7 +446,7 @@ if __name__ == "__main__":
</details>
<details>
<summary>🤖 <strong>Using Your own Browser with Custom User Profile</strong></summary>
<summary>🤖 <strong>Using You own Browswer with Custome User Profile</strong></summary>
```python
import os, sys
@@ -542,161 +486,27 @@ async def test_news_crawl():
## ✨ Recent Updates
<details>
<summary><strong>Version 0.7.3 Release Highlights - The Multi-Config Intelligence Update</strong></summary>
- **🚀 New Dispatcher System**: Scale to thousands of URLs with intelligent **memory monitoring**, **concurrency control**, and optional **rate limiting**. (See `MemoryAdaptiveDispatcher`, `SemaphoreDispatcher`, `RateLimiter`, `CrawlerMonitor`)
- **⚡ Streaming Mode**: Process results **as they arrive** instead of waiting for an entire batch to complete. (Set `stream=True` in `CrawlerRunConfig`)
- **🤖 Enhanced LLM Integration**:
- **Automatic schema generation**: Create extraction rules from HTML using OpenAI or Ollama, no manual CSS/XPath needed.
- **LLM-powered Markdown filtering**: Refine your markdown output with a new `LLMContentFilter` that understands content relevance.
- **Ollama Support**: Use open-source or self-hosted models for private or cost-effective extraction.
- **🏎️ Faster Scraping Option**: New `LXMLWebScrapingStrategy` offers **10-20x speedup** for large, complex pages (experimental).
- **🤖 robots.txt Compliance**: Respect website rules with `check_robots_txt=True` and efficient local caching.
- **🔄 Proxy Rotation**: Built-in support for dynamic proxy switching and IP verification, with support for authenticated proxies and session persistence.
- **➡️ URL Redirection Tracking**: The `redirected_url` field now captures the final destination after any redirects.
- **🪞 Improved Mirroring**: The `LXMLWebScrapingStrategy` now has much greater fidelity, allowing for almost pixel-perfect mirroring of websites.
- **📈 Enhanced Monitoring**: Track memory, CPU, and individual crawler status with `CrawlerMonitor`.
- **📝 Improved Documentation**: More examples, clearer explanations, and updated tutorials.
- **🕵️ Undetected Browser Support**: Bypass sophisticated bot detection systems:
```python
from crawl4ai import AsyncWebCrawler, BrowserConfig
browser_config = BrowserConfig(
browser_type="undetected", # Use undetected Chrome
headless=True, # Can run headless with stealth
extra_args=[
"--disable-blink-features=AutomationControlled",
"--disable-web-security"
]
)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun("https://protected-site.com")
# Successfully bypass Cloudflare, Akamai, and custom bot detection
```
- **🎨 Multi-URL Configuration**: Different strategies for different URL patterns in one batch:
```python
from crawl4ai import CrawlerRunConfig, MatchMode
configs = [
# Documentation sites - aggressive caching
CrawlerRunConfig(
url_matcher=["*docs*", "*documentation*"],
cache_mode="write",
markdown_generator_options={"include_links": True}
),
# News/blog sites - fresh content
CrawlerRunConfig(
url_matcher=lambda url: 'blog' in url or 'news' in url,
cache_mode="bypass"
),
# Fallback for everything else
CrawlerRunConfig()
]
results = await crawler.arun_many(urls, config=configs)
# Each URL gets the perfect configuration automatically
```
- **🧠 Memory Monitoring**: Track and optimize memory usage during crawling:
```python
from crawl4ai.memory_utils import MemoryMonitor
monitor = MemoryMonitor()
monitor.start_monitoring()
results = await crawler.arun_many(large_url_list)
report = monitor.get_report()
print(f"Peak memory: {report['peak_mb']:.1f} MB")
print(f"Efficiency: {report['efficiency']:.1f}%")
# Get optimization recommendations
```
- **📊 Enhanced Table Extraction**: Direct DataFrame conversion from web tables:
```python
result = await crawler.arun("https://site-with-tables.com")
# New way - direct table access
if result.tables:
import pandas as pd
for table in result.tables:
df = pd.DataFrame(table['data'])
print(f"Table: {df.shape[0]} rows × {df.shape[1]} columns")
```
- **💰 GitHub Sponsors**: 4-tier sponsorship system for project sustainability
- **🐳 Docker LLM Flexibility**: Configure providers via environment variables
[Full v0.7.3 Release Notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.3.md)
</details>
<details>
<summary><strong>Version 0.7.0 Release Highlights - The Adaptive Intelligence Update</strong></summary>
- **🧠 Adaptive Crawling**: Your crawler now learns and adapts to website patterns automatically:
```python
config = AdaptiveConfig(
confidence_threshold=0.7, # Min confidence to stop crawling
max_depth=5, # Maximum crawl depth
max_pages=20, # Maximum number of pages to crawl
strategy="statistical"
)
async with AsyncWebCrawler() as crawler:
adaptive_crawler = AdaptiveCrawler(crawler, config)
state = await adaptive_crawler.digest(
start_url="https://news.example.com",
query="latest news content"
)
# Crawler learns patterns and improves extraction over time
```
- **🌊 Virtual Scroll Support**: Complete content extraction from infinite scroll pages:
```python
scroll_config = VirtualScrollConfig(
container_selector="[data-testid='feed']",
scroll_count=20,
scroll_by="container_height",
wait_after_scroll=1.0
)
result = await crawler.arun(url, config=CrawlerRunConfig(
virtual_scroll_config=scroll_config
))
```
- **🔗 Intelligent Link Analysis**: 3-layer scoring system for smart link prioritization:
```python
link_config = LinkPreviewConfig(
query="machine learning tutorials",
score_threshold=0.3,
concurrent_requests=10
)
result = await crawler.arun(url, config=CrawlerRunConfig(
link_preview_config=link_config,
score_links=True
))
# Links ranked by relevance and quality
```
- **🎣 Async URL Seeder**: Discover thousands of URLs in seconds:
```python
seeder = AsyncUrlSeeder(SeedingConfig(
source="sitemap+cc",
pattern="*/blog/*",
query="python tutorials",
score_threshold=0.4
))
urls = await seeder.discover("https://example.com")
```
- **⚡ Performance Boost**: Up to 3x faster with optimized resource handling and memory efficiency
Read the full details in our [0.7.0 Release Notes](https://docs.crawl4ai.com/blog/release-v0.7.0) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
</details>
Read the full details in our [0.4.3bx Release Notes](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
## Version Numbering in Crawl4AI
Crawl4AI follows standard Python version numbering conventions (PEP 440) to help users understand the stability and features of each release.
<details>
<summary>📈 <strong>Version Numbers Explained</strong></summary>
### Version Numbers Explained
Our version numbers follow this pattern: `MAJOR.MINOR.PATCH` (e.g., 0.4.3)
@@ -706,7 +516,7 @@ We use different suffixes to indicate development stages:
- `dev` (0.4.3dev1): Development versions, unstable
- `a` (0.4.3a1): Alpha releases, experimental features
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
- `rc` (0.4.3): Release candidates, potential final version
- `rc` (0.4.3rc1): Release candidates, potential final version
#### Installation
- Regular installation (stable version):
@@ -733,8 +543,6 @@ We use pre-releases to:
For production environments, we recommend using the stable version. For testing new features, you can opt-in to pre-releases using the `--pre` flag.
</details>
## 📖 Documentation & Roadmap
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
@@ -747,16 +555,16 @@ To check our development plans and upcoming features, visit our [Roadmap](https:
<summary>📈 <strong>Development TODOs</strong></summary>
- [x] 0. Graph Crawler: Smart website traversal using graph search algorithms for comprehensive nested page extraction
- [x] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
- [x] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
- [x] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
- [x] 4. Automated Schema Generator: Convert natural language to extraction schemas
- [x] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
- [x] 6. Web Embedding Index: Semantic search infrastructure for crawled content
- [x] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
- [x] 8. Performance Monitor: Real-time insights into crawler operations
- [ ] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
- [ ] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
- [ ] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
- [ ] 4. Automated Schema Generator: Convert natural language to extraction schemas
- [ ] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
- [ ] 6. Web Embedding Index: Semantic search infrastructure for crawled content
- [ ] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
- [ ] 8. Performance Monitor: Real-time insights into crawler operations
- [ ] 9. Cloud Integration: One-click deployment solutions across cloud providers
- [x] 10. Sponsorship Program: Structured support system with tiered benefits
- [ ] 10. Sponsorship Program: Structured support system with tiered benefits
- [ ] 11. Educational Content: "How to Crawl" video series and interactive tutorials
</details>
@@ -765,88 +573,9 @@ To check our development plans and upcoming features, visit our [Roadmap](https:
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTORS.md) for more information.
I'll help modify the license section with badges. For the halftone effect, here's a version with it:
## 📄 License
Here's the updated license section:
## 📄 License & Attribution
This project is licensed under the Apache License 2.0, attribution is recommended via the badges below. See the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) file for details.
### Attribution Requirements
When using Crawl4AI, you must include one of the following attribution methods:
<details>
<summary>📈 <strong>1. Badge Attribution (Recommended)</strong></summary>
Add one of these badges to your README, documentation, or website:
| Theme | Badge |
|-------|-------|
| **Disco Theme (Animated)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Night Theme (Dark with Neon)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Dark Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/></a> |
| **Light Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/></a> |
HTML code for adding the badges:
```html
<!-- Disco Theme (Animated) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Night Theme (Dark with Neon) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Dark Theme (Classic) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Light Theme (Classic) -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/>
</a>
<!-- Simple Shield Badge -->
<a href="https://github.com/unclecode/crawl4ai">
<img src="https://img.shields.io/badge/Powered%20by-Crawl4AI-blue?style=flat-square" alt="Powered by Crawl4AI"/>
</a>
```
</details>
<details>
<summary>📖 <strong>2. Text Attribution</strong></summary>
Add this line to your documentation:
```
This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
```
</details>
## 📚 Citation
If you use Crawl4AI in your research or project, please cite:
```bibtex
@software{crawl4ai2024,
author = {UncleCode},
title = {Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper},
year = {2024},
publisher = {GitHub},
journal = {GitHub Repository},
howpublished = {\url{https://github.com/unclecode/crawl4ai}},
commit = {Please use the commit hash you're working with}
}
```
Text citation format:
```
UncleCode. (2024). Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper [Computer software].
GitHub. https://github.com/unclecode/crawl4ai
```
Crawl4AI is released under the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE).
## 📧 Contact

View File

@@ -1,65 +0,0 @@
# 💖 Sponsors & Supporters
Thank you to everyone supporting Crawl4AI! Your sponsorship helps keep this project open-source and actively maintained.
## 👑 Founding Sponsors
*The first 50 sponsors who believed in our vision - permanently recognized*
<!-- Founding sponsors will be listed here with special recognition -->
🎉 **Become a Founding Sponsor!** Only [X/50] spots remaining! [Join now →](https://github.com/sponsors/unclecode)
---
## 🏢 Data Infrastructure Partners ($2000/month)
*These organizations are building their data sovereignty with Crawl4AI at the core*
<!-- Data Infrastructure Partners will be listed here -->
*Be the first Data Infrastructure Partner! [Join us →](https://github.com/sponsors/unclecode)*
---
## 💼 Growing Teams ($500/month)
*Teams scaling their data extraction with Crawl4AI*
<!-- Growing Teams will be listed here -->
*Your team could be here! [Become a sponsor →](https://github.com/sponsors/unclecode)*
---
## 🚀 Builders ($50/month)
*Developers and entrepreneurs building with Crawl4AI*
<!-- Builders will be listed here -->
*Join the builders! [Start sponsoring →](https://github.com/sponsors/unclecode)*
---
## 🌱 Believers ($5/month)
*The community supporting data democratization*
<!-- Believers will be listed here -->
*Thank you to all our community believers!*
---
## 🤝 Want to Sponsor?
Crawl4AI is the #1 trending open-source web crawler. We're building the future of data extraction - where organizations own their data pipelines instead of relying on rate-limited APIs.
### Available Sponsorship Tiers:
- **🌱 Believer** ($5/mo) - Support the movement
- **🚀 Builder** ($50/mo) - Priority support & early access
- **💼 Growing Team** ($500/mo) - Bi-weekly syncs & optimization
- **🏢 Data Infrastructure Partner** ($2000/mo) - Full partnership & dedicated support
[View all tiers and benefits →](https://github.com/sponsors/unclecode)
### Enterprise & Custom Partnerships
Building data extraction at scale? Need dedicated support or infrastructure? Let's talk about a custom partnership.
📧 Contact: [hello@crawl4ai.com](mailto:hello@crawl4ai.com) | 📅 [Schedule a call](https://calendar.app.google/rEpvi2UBgUQjWHfJ9)
---
*This list is updated regularly. Sponsors at $50+ tiers can submit their logos via [hello@crawl4ai.com](mailto:hello@crawl4ai.com)*

View File

@@ -1,24 +0,0 @@
[changelog]
# Template format
header = """
# Changelog\n
All notable changes to this project will be documented in this file.\n
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n
"""
# Organize commits by type
[git]
conventional_commits = true
filter_unconventional = true
commit_parsers = [
{ message = "^feat", group = "Added"},
{ message = "^fix", group = "Fixed"},
{ message = "^doc", group = "Documentation"},
{ message = "^perf", group = "Performance"},
{ message = "^refactor", group = "Changed"},
{ message = "^style", group = "Changed"},
{ message = "^test", group = "Testing"},
{ message = "^chore\\(release\\): prepare for", skip = true},
{ message = "^chore", group = "Miscellaneous Tasks"},
]

View File

@@ -1,158 +1,46 @@
# __init__.py
import warnings
from .async_webcrawler import AsyncWebCrawler, CacheMode
# MODIFIED: Add SeedingConfig and VirtualScrollConfig here
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig, SeedingConfig, VirtualScrollConfig, LinkPreviewConfig, MatchMode
from .async_configs import BrowserConfig, CrawlerRunConfig
from .content_scraping_strategy import (
ContentScrapingStrategy,
WebScrapingStrategy,
LXMLWebScrapingStrategy,
WebScrapingStrategy, # Backward compatibility alias
)
from .async_logger import (
AsyncLoggerBase,
AsyncLogger,
)
from .proxy_strategy import (
ProxyRotationStrategy,
RoundRobinProxyStrategy,
)
from .extraction_strategy import (
ExtractionStrategy,
LLMExtractionStrategy,
CosineStrategy,
JsonCssExtractionStrategy,
JsonXPathExtractionStrategy,
JsonLxmlExtractionStrategy,
RegexExtractionStrategy
JsonXPathExtractionStrategy
)
from .chunking_strategy import ChunkingStrategy, RegexChunking
from .markdown_generation_strategy import DefaultMarkdownGenerator
from .content_filter_strategy import (
PruningContentFilter,
BM25ContentFilter,
LLMContentFilter,
RelevantContentFilter,
)
from .models import CrawlResult, MarkdownGenerationResult, DisplayMode
from .components.crawler_monitor import CrawlerMonitor
from .link_preview import LinkPreview
from .content_filter_strategy import PruningContentFilter, BM25ContentFilter, LLMContentFilter, RelevantContentFilter
from .models import CrawlResult, MarkdownGenerationResult
from .async_dispatcher import (
MemoryAdaptiveDispatcher,
SemaphoreDispatcher,
RateLimiter,
BaseDispatcher,
)
from .docker_client import Crawl4aiDockerClient
from .hub import CrawlerHub
from .browser_profiler import BrowserProfiler
from .deep_crawling import (
DeepCrawlStrategy,
BFSDeepCrawlStrategy,
FilterChain,
URLPatternFilter,
DomainFilter,
ContentTypeFilter,
URLFilter,
FilterStats,
SEOFilter,
KeywordRelevanceScorer,
URLScorer,
CompositeScorer,
DomainAuthorityScorer,
FreshnessScorer,
PathDepthScorer,
BestFirstCrawlingStrategy,
DFSDeepCrawlStrategy,
DeepCrawlDecorator,
)
# NEW: Import AsyncUrlSeeder
from .async_url_seeder import AsyncUrlSeeder
# Adaptive Crawler
from .adaptive_crawler import (
AdaptiveCrawler,
AdaptiveConfig,
CrawlState,
CrawlStrategy,
StatisticalStrategy
)
# C4A Script Language Support
from .script import (
compile as c4a_compile,
validate as c4a_validate,
compile_file as c4a_compile_file,
CompilationResult,
ValidationResult,
ErrorDetail
)
# Browser Adapters
from .browser_adapter import (
BrowserAdapter,
PlaywrightAdapter,
UndetectedAdapter
)
from .utils import (
start_colab_display_server,
setup_colab_environment
CrawlerMonitor,
DisplayMode,
BaseDispatcher
)
__all__ = [
"AsyncLoggerBase",
"AsyncLogger",
"AsyncWebCrawler",
"BrowserProfiler",
"LLMConfig",
"GeolocationConfig",
# NEW: Add SeedingConfig and VirtualScrollConfig
"SeedingConfig",
"VirtualScrollConfig",
# NEW: Add AsyncUrlSeeder
"AsyncUrlSeeder",
# Adaptive Crawler
"AdaptiveCrawler",
"AdaptiveConfig",
"CrawlState",
"CrawlStrategy",
"StatisticalStrategy",
"DeepCrawlStrategy",
"BFSDeepCrawlStrategy",
"BestFirstCrawlingStrategy",
"DFSDeepCrawlStrategy",
"FilterChain",
"URLPatternFilter",
"ContentTypeFilter",
"DomainFilter",
"FilterStats",
"URLFilter",
"SEOFilter",
"KeywordRelevanceScorer",
"URLScorer",
"CompositeScorer",
"DomainAuthorityScorer",
"FreshnessScorer",
"PathDepthScorer",
"DeepCrawlDecorator",
"CrawlResult",
"CrawlerHub",
"CacheMode",
"MatchMode",
"ContentScrapingStrategy",
"WebScrapingStrategy",
"LXMLWebScrapingStrategy",
"BrowserConfig",
"CrawlerRunConfig",
"HTTPCrawlerConfig",
"ExtractionStrategy",
"LLMExtractionStrategy",
"CosineStrategy",
"JsonCssExtractionStrategy",
"JsonXPathExtractionStrategy",
"JsonLxmlExtractionStrategy",
"RegexExtractionStrategy",
"ChunkingStrategy",
"RegexChunking",
"DefaultMarkdownGenerator",
@@ -165,52 +53,36 @@ __all__ = [
"SemaphoreDispatcher",
"RateLimiter",
"CrawlerMonitor",
"LinkPreview",
"DisplayMode",
"MarkdownGenerationResult",
"Crawl4aiDockerClient",
"ProxyRotationStrategy",
"RoundRobinProxyStrategy",
"ProxyConfig",
"start_colab_display_server",
"setup_colab_environment",
# C4A Script additions
"c4a_compile",
"c4a_validate",
"c4a_compile_file",
"CompilationResult",
"ValidationResult",
"ErrorDetail",
# Browser Adapters
"BrowserAdapter",
"PlaywrightAdapter",
"UndetectedAdapter",
"LinkPreviewConfig"
]
# def is_sync_version_installed():
# try:
# import selenium # noqa
def is_sync_version_installed():
try:
import selenium
# return True
# except ImportError:
# return False
return True
except ImportError:
return False
# if is_sync_version_installed():
# try:
# from .web_crawler import WebCrawler
if is_sync_version_installed():
try:
from .web_crawler import WebCrawler
# __all__.append("WebCrawler")
# except ImportError:
# print(
# "Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
# )
# else:
# WebCrawler = None
# # import warnings
# # print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
__all__.append("WebCrawler")
except ImportError:
print(
"Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
)
else:
WebCrawler = None
# import warnings
# print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
import warnings
from pydantic import warnings as pydantic_warnings
# Disable all Pydantic warnings
warnings.filterwarnings("ignore", module="pydantic")

View File

@@ -1,8 +1,3 @@
# crawl4ai/__version__.py
# This is the version that will be used for stable releases
__version__ = "0.7.3"
# For nightly builds, this gets set during build process
__nightly_version__ = None
# crawl4ai/_version.py
# __version__ = "0.4.3b3"
__version__ = "0.4.248b3"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -4,15 +4,20 @@ import aiosqlite
import asyncio
from typing import Optional, Dict
from contextlib import asynccontextmanager
import json
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
import aiofiles
from .async_logger import AsyncLogger
import logging
import json # Added for serialization/deserialization
from .utils import ensure_content_dirs, generate_content_hash
from .utils import VersionManager
from .models import CrawlResult, MarkdownGenerationResult
import aiofiles
from .version_manager import VersionManager
from .async_logger import AsyncLogger
from .utils import get_error_context, create_box_message
# Set up logging
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
base_directory = DB_PATH = os.path.join(
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
)
@@ -171,10 +176,7 @@ class AsyncDatabaseManager:
f"Code context:\n{error_context['code_context']}"
)
self.logger.error(
message="{error}",
tag="ERROR",
params={"error": str(error_message)},
boxes=["error"],
message=create_box_message(error_message, type="error"),
)
raise
@@ -192,10 +194,7 @@ class AsyncDatabaseManager:
f"Code context:\n{error_context['code_context']}"
)
self.logger.error(
message="{error}",
tag="ERROR",
params={"error": str(error_message)},
boxes=["error"],
message=create_box_message(error_message, type="error"),
)
raise
finally:
@@ -337,17 +336,12 @@ class AsyncDatabaseManager:
except json.JSONDecodeError:
# Very UGLY, never mention it to me please
if field == "markdown" and isinstance(row_dict[field], str):
row_dict[field] = MarkdownGenerationResult(
raw_markdown=row_dict[field] or "",
markdown_with_citations="",
references_markdown="",
fit_markdown="",
fit_html="",
)
row_dict[field] = row_dict[field]
else:
row_dict[field] = {}
if isinstance(row_dict["markdown"], Dict):
row_dict["markdown_v2"] = row_dict["markdown"]
if row_dict["markdown"].get("raw_markdown"):
row_dict["markdown"] = row_dict["markdown"]["raw_markdown"]
@@ -364,7 +358,7 @@ class AsyncDatabaseManager:
# Remove any fields not in CrawlResult model
valid_fields = CrawlResult.__annotations__.keys()
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
filtered_dict["markdown"] = row_dict["markdown"]
return CrawlResult(**filtered_dict)
try:
@@ -390,16 +384,16 @@ class AsyncDatabaseManager:
}
try:
if isinstance(result.markdown, StringCompatibleMarkdown):
content_map["markdown"] = (
result.markdown,
"markdown",
)
elif isinstance(result.markdown, MarkdownGenerationResult):
if isinstance(result.markdown, MarkdownGenerationResult):
content_map["markdown"] = (
result.markdown.model_dump_json(),
"markdown",
)
elif hasattr(result, "markdown_v2"):
content_map["markdown"] = (
result.markdown_v2.model_dump_json(),
"markdown",
)
elif isinstance(result.markdown, str):
markdown_result = MarkdownGenerationResult(raw_markdown=result.markdown)
content_map["markdown"] = (

View File

@@ -1,18 +1,20 @@
from typing import Dict, Optional, List, Tuple, Union
from typing import Dict, Optional, List, Tuple
from .async_configs import CrawlerRunConfig
from .models import (
CrawlResult,
CrawlerTaskResult,
CrawlStatus,
DisplayMode,
CrawlStats,
DomainState,
)
from .components.crawler_monitor import CrawlerMonitor
from .types import AsyncWebCrawler
from rich.live import Live
from rich.table import Table
from rich.console import Console
from rich import box
from datetime import datetime, timedelta
from collections.abc import AsyncGenerator
import time
import psutil
import asyncio
@@ -22,7 +24,6 @@ from urllib.parse import urlparse
import random
from abc import ABC, abstractmethod
from .memory_utils import get_true_memory_usage_percent
class RateLimiter:
@@ -85,6 +86,201 @@ class RateLimiter:
return True
class CrawlerMonitor:
def __init__(
self,
max_visible_rows: int = 15,
display_mode: DisplayMode = DisplayMode.DETAILED,
):
self.console = Console()
self.max_visible_rows = max_visible_rows
self.display_mode = display_mode
self.stats: Dict[str, CrawlStats] = {}
self.process = psutil.Process()
self.start_time = datetime.now()
self.live = Live(self._create_table(), refresh_per_second=2)
def start(self):
self.live.start()
def stop(self):
self.live.stop()
def add_task(self, task_id: str, url: str):
self.stats[task_id] = CrawlStats(
task_id=task_id, url=url, status=CrawlStatus.QUEUED
)
self.live.update(self._create_table())
def update_task(self, task_id: str, **kwargs):
if task_id in self.stats:
for key, value in kwargs.items():
setattr(self.stats[task_id], key, value)
self.live.update(self._create_table())
def _create_aggregated_table(self) -> Table:
"""Creates a compact table showing only aggregated statistics"""
table = Table(
box=box.ROUNDED,
title="Crawler Status Overview",
title_style="bold magenta",
header_style="bold blue",
show_lines=True,
)
# Calculate statistics
total_tasks = len(self.stats)
queued = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
)
in_progress = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
)
completed = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
)
failed = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
)
# Memory statistics
current_memory = self.process.memory_info().rss / (1024 * 1024)
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
peak_memory = max(
(stat.peak_memory for stat in self.stats.values()), default=0.0
)
# Duration
duration = datetime.now() - self.start_time
# Create status row
table.add_column("Status", style="bold cyan")
table.add_column("Count", justify="right")
table.add_column("Percentage", justify="right")
table.add_row("Total Tasks", str(total_tasks), "100%")
table.add_row(
"[yellow]In Queue[/yellow]",
str(queued),
f"{(queued/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
table.add_row(
"[blue]In Progress[/blue]",
str(in_progress),
f"{(in_progress/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
table.add_row(
"[green]Completed[/green]",
str(completed),
f"{(completed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
table.add_row(
"[red]Failed[/red]",
str(failed),
f"{(failed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
# Add memory information
table.add_section()
table.add_row(
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
)
table.add_row(
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
)
table.add_row(
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
)
table.add_row(
"[yellow]Runtime[/yellow]",
str(timedelta(seconds=int(duration.total_seconds()))),
"",
)
return table
def _create_detailed_table(self) -> Table:
table = Table(
box=box.ROUNDED,
title="Crawler Performance Monitor",
title_style="bold magenta",
header_style="bold blue",
)
# Add columns
table.add_column("Task ID", style="cyan", no_wrap=True)
table.add_column("URL", style="cyan", no_wrap=True)
table.add_column("Status", style="bold")
table.add_column("Memory (MB)", justify="right")
table.add_column("Peak (MB)", justify="right")
table.add_column("Duration", justify="right")
table.add_column("Info", style="italic")
# Add summary row
total_memory = sum(stat.memory_usage for stat in self.stats.values())
active_count = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
)
completed_count = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
)
failed_count = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
)
table.add_row(
"[bold yellow]SUMMARY",
f"Total: {len(self.stats)}",
f"Active: {active_count}",
f"{total_memory:.1f}",
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
str(
timedelta(
seconds=int((datetime.now() - self.start_time).total_seconds())
)
),
f"{completed_count}{failed_count}",
style="bold",
)
table.add_section()
# Add rows for each task
visible_stats = sorted(
self.stats.values(),
key=lambda x: (
x.status != CrawlStatus.IN_PROGRESS,
x.status != CrawlStatus.QUEUED,
x.end_time or datetime.max,
),
)[: self.max_visible_rows]
for stat in visible_stats:
status_style = {
CrawlStatus.QUEUED: "white",
CrawlStatus.IN_PROGRESS: "yellow",
CrawlStatus.COMPLETED: "green",
CrawlStatus.FAILED: "red",
}[stat.status]
table.add_row(
stat.task_id[:8], # Show first 8 chars of task ID
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
f"[{status_style}]{stat.status.value}[/{status_style}]",
f"{stat.memory_usage:.1f}",
f"{stat.peak_memory:.1f}",
stat.duration,
stat.error_message[:40] if stat.error_message else "",
)
return table
def _create_table(self) -> Table:
"""Creates the appropriate table based on display mode"""
if self.display_mode == DisplayMode.AGGREGATED:
return self._create_aggregated_table()
return self._create_detailed_table()
class BaseDispatcher(ABC):
def __init__(
@@ -98,37 +294,11 @@ class BaseDispatcher(ABC):
self.rate_limiter = rate_limiter
self.monitor = monitor
def select_config(self, url: str, configs: Union[CrawlerRunConfig, List[CrawlerRunConfig]]) -> Optional[CrawlerRunConfig]:
"""Select the appropriate config for a given URL.
Args:
url: The URL to match against
configs: Single config or list of configs to choose from
Returns:
The matching config, or None if no match found
"""
# Single config - return as is
if isinstance(configs, CrawlerRunConfig):
return configs
# Empty list - return None
if not configs:
return None
# Find first matching config
for config in configs:
if config.is_match(url):
return config
# No match found - return None to indicate URL should be skipped
return None
@abstractmethod
async def crawl_url(
self,
url: str,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
config: CrawlerRunConfig,
task_id: str,
monitor: Optional[CrawlerMonitor] = None,
) -> CrawlerTaskResult:
@@ -138,8 +308,8 @@ class BaseDispatcher(ABC):
async def run_urls(
self,
urls: List[str],
crawler: AsyncWebCrawler, # noqa: F821
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
crawler: "AsyncWebCrawler", # noqa: F821
config: CrawlerRunConfig,
monitor: Optional[CrawlerMonitor] = None,
) -> List[CrawlerTaskResult]:
pass
@@ -149,195 +319,71 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
def __init__(
self,
memory_threshold_percent: float = 90.0,
critical_threshold_percent: float = 95.0, # New critical threshold
recovery_threshold_percent: float = 85.0, # New recovery threshold
check_interval: float = 1.0,
max_session_permit: int = 20,
fairness_timeout: float = 600.0, # 10 minutes before prioritizing long-waiting URLs
memory_wait_timeout: Optional[float] = 600.0,
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
rate_limiter: Optional[RateLimiter] = None,
monitor: Optional[CrawlerMonitor] = None,
):
super().__init__(rate_limiter, monitor)
self.memory_threshold_percent = memory_threshold_percent
self.critical_threshold_percent = critical_threshold_percent
self.recovery_threshold_percent = recovery_threshold_percent
self.check_interval = check_interval
self.max_session_permit = max_session_permit
self.fairness_timeout = fairness_timeout
self.memory_wait_timeout = memory_wait_timeout
self.result_queue = asyncio.Queue()
self.task_queue = asyncio.PriorityQueue() # Priority queue for better management
self.memory_pressure_mode = False # Flag to indicate when we're in memory pressure mode
self.current_memory_percent = 0.0 # Track current memory usage
self._high_memory_start_time: Optional[float] = None
async def _memory_monitor_task(self):
"""Background task to continuously monitor memory usage and update state"""
while True:
self.current_memory_percent = get_true_memory_usage_percent()
self.result_queue = asyncio.Queue() # Queue for storing results
# Enter memory pressure mode if we cross the threshold
if self.current_memory_percent >= self.memory_threshold_percent:
if not self.memory_pressure_mode:
self.memory_pressure_mode = True
self._high_memory_start_time = time.time()
if self.monitor:
self.monitor.update_memory_status("PRESSURE")
else:
if self._high_memory_start_time is None:
self._high_memory_start_time = time.time()
if (
self.memory_wait_timeout is not None
and self._high_memory_start_time is not None
and time.time() - self._high_memory_start_time >= self.memory_wait_timeout
):
raise MemoryError(
"Memory usage exceeded threshold for"
f" {self.memory_wait_timeout} seconds"
)
# Exit memory pressure mode if we go below recovery threshold
elif self.memory_pressure_mode and self.current_memory_percent <= self.recovery_threshold_percent:
self.memory_pressure_mode = False
self._high_memory_start_time = None
if self.monitor:
self.monitor.update_memory_status("NORMAL")
elif self.current_memory_percent < self.memory_threshold_percent:
self._high_memory_start_time = None
# In critical mode, we might need to take more drastic action
if self.current_memory_percent >= self.critical_threshold_percent:
if self.monitor:
self.monitor.update_memory_status("CRITICAL")
# We could implement additional memory-saving measures here
await asyncio.sleep(self.check_interval)
def _get_priority_score(self, wait_time: float, retry_count: int) -> float:
"""Calculate priority score (lower is higher priority)
- URLs waiting longer than fairness_timeout get higher priority
- More retry attempts decreases priority
"""
if wait_time > self.fairness_timeout:
# High priority for long-waiting URLs
return -wait_time
# Standard priority based on retries
return retry_count
async def crawl_url(
self,
url: str,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
config: CrawlerRunConfig,
task_id: str,
retry_count: int = 0,
) -> CrawlerTaskResult:
start_time = time.time()
start_time = datetime.now()
error_message = ""
memory_usage = peak_memory = 0.0
# Select appropriate config for this URL
selected_config = self.select_config(url, config)
# If no config matches, return failed result
if selected_config is None:
error_message = f"No matching configuration found for URL: {url}"
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.FAILED,
error_message=error_message
)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=CrawlResult(
url=url,
html="",
metadata={"status": "no_config_match"},
success=False,
error_message=error_message
),
memory_usage=0,
peak_memory=0,
start_time=start_time,
end_time=time.time(),
error_message=error_message,
retry_count=retry_count
)
# Get starting memory for accurate measurement
process = psutil.Process()
start_memory = process.memory_info().rss / (1024 * 1024)
try:
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=start_time,
retry_count=retry_count
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
)
self.concurrent_sessions += 1
if self.rate_limiter:
await self.rate_limiter.wait_if_needed(url)
# Check if we're in critical memory state
if self.current_memory_percent >= self.critical_threshold_percent:
# Requeue this task with increased priority and retry count
enqueue_time = time.time()
priority = self._get_priority_score(enqueue_time - start_time, retry_count + 1)
await self.task_queue.put((priority, (url, task_id, retry_count + 1, enqueue_time)))
# Update monitoring
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.QUEUED,
error_message="Requeued due to critical memory pressure"
)
# Return placeholder result with requeued status
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=CrawlResult(
url=url, html="", metadata={"status": "requeued"},
success=False, error_message="Requeued due to critical memory pressure"
),
memory_usage=0,
peak_memory=0,
start_time=start_time,
end_time=time.time(),
error_message="Requeued due to critical memory pressure",
retry_count=retry_count + 1
)
# Execute the crawl with selected config
result = await self.crawler.arun(url, config=selected_config, session_id=task_id)
# Measure memory usage
process = psutil.Process()
start_memory = process.memory_info().rss / (1024 * 1024)
result = await self.crawler.arun(url, config=config, session_id=task_id)
end_memory = process.memory_info().rss / (1024 * 1024)
memory_usage = peak_memory = end_memory - start_memory
# Handle rate limiting
if self.rate_limiter and result.status_code:
if not self.rate_limiter.update_delay(url, result.status_code):
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
# Update status based on result
result = CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=datetime.now(),
error_message=error_message,
)
await self.result_queue.put(result)
return result
if not result.success:
error_message = result.error_message
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
elif self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
except Exception as e:
error_message = str(e)
if self.monitor:
@@ -345,9 +391,9 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
result = CrawlResult(
url=url, html="", metadata={}, success=False, error_message=str(e)
)
finally:
end_time = time.time()
end_time = datetime.now()
if self.monitor:
self.monitor.update_task(
task_id,
@@ -355,10 +401,9 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
memory_usage=memory_usage,
peak_memory=peak_memory,
error_message=error_message,
retry_count=retry_count
)
self.concurrent_sessions -= 1
return CrawlerTaskResult(
task_id=task_id,
url=url,
@@ -368,257 +413,117 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
start_time=start_time,
end_time=end_time,
error_message=error_message,
retry_count=retry_count
)
async def run_urls(
self,
urls: List[str],
crawler: AsyncWebCrawler,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
) -> List[CrawlerTaskResult]:
self.crawler = crawler
# Start the memory monitor task
memory_monitor = asyncio.create_task(self._memory_monitor_task())
if self.monitor:
self.monitor.start()
results = []
crawler: "AsyncWebCrawler", # noqa: F821
config: CrawlerRunConfig,
) -> List[CrawlerTaskResult]:
self.crawler = crawler
try:
# Initialize task queue
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
# Add to queue with initial priority 0, retry count 0, and current time
await self.task_queue.put((0, (url, task_id, 0, time.time())))
if self.monitor:
self.monitor.start()
active_tasks = []
try:
pending_tasks = []
active_tasks = []
task_queue = []
# Process until both queues are empty
while not self.task_queue.empty() or active_tasks:
if memory_monitor.done():
exc = memory_monitor.exception()
if exc:
for t in active_tasks:
t.cancel()
raise exc
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
task_queue.append((url, task_id))
# If memory pressure is low, greedily fill all available slots
if not self.memory_pressure_mode:
slots = self.max_session_permit - len(active_tasks)
while slots > 0:
try:
# Use get_nowait() to immediately get tasks without blocking
priority, (url, task_id, retry_count, enqueue_time) = self.task_queue.get_nowait()
# Create and start the task
task = asyncio.create_task(
self.crawl_url(url, config, task_id, retry_count)
)
active_tasks.append(task)
# Update waiting time in monitor
if self.monitor:
wait_time = time.time() - enqueue_time
self.monitor.update_task(
task_id,
wait_time=wait_time,
status=CrawlStatus.IN_PROGRESS
while task_queue or active_tasks:
wait_start_time = time.time()
while len(active_tasks) < self.max_session_permit and task_queue:
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
# Check if we've exceeded the timeout
if time.time() - wait_start_time > self.memory_wait_timeout:
raise MemoryError(
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
)
slots -= 1
except asyncio.QueueEmpty:
# No more tasks in queue, exit the loop
break
# Wait for completion even if queue is starved
if active_tasks:
done, pending = await asyncio.wait(
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
)
# Process completed tasks
for completed_task in done:
result = await completed_task
results.append(result)
# Update active tasks list
active_tasks = list(pending)
else:
# If no active tasks but still waiting, sleep briefly
await asyncio.sleep(self.check_interval / 2)
# Update priorities for waiting tasks if needed
await self._update_queue_priorities()
return results
await asyncio.sleep(self.check_interval)
continue
except Exception as e:
if self.monitor:
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
finally:
# Clean up
memory_monitor.cancel()
if self.monitor:
self.monitor.stop()
async def _update_queue_priorities(self):
"""Periodically update priorities of items in the queue to prevent starvation"""
# Skip if queue is empty
if self.task_queue.empty():
return
# Use a drain-and-refill approach to update all priorities
temp_items = []
# Drain the queue (with a safety timeout to prevent blocking)
try:
drain_start = time.time()
while not self.task_queue.empty() and time.time() - drain_start < 5.0: # 5 second safety timeout
try:
# Get item from queue with timeout
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
self.task_queue.get(), timeout=0.1
url, task_id = task_queue.pop(0)
task = asyncio.create_task(self.crawl_url(url, config, task_id))
active_tasks.append(task)
if not active_tasks:
await asyncio.sleep(self.check_interval)
continue
done, pending = await asyncio.wait(
active_tasks, return_when=asyncio.FIRST_COMPLETED
)
# Calculate new priority based on current wait time
current_time = time.time()
wait_time = current_time - enqueue_time
new_priority = self._get_priority_score(wait_time, retry_count)
# Store with updated priority
temp_items.append((new_priority, (url, task_id, retry_count, enqueue_time)))
# Update monitoring stats for this task
if self.monitor and task_id in self.monitor.stats:
self.monitor.update_task(task_id, wait_time=wait_time)
except asyncio.TimeoutError:
# Queue might be empty or very slow
break
except Exception as e:
# If anything goes wrong, make sure we refill the queue with what we've got
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
# Calculate queue statistics
if temp_items and self.monitor:
total_queued = len(temp_items)
wait_times = [item[1][3] for item in temp_items]
highest_wait_time = time.time() - min(wait_times) if wait_times else 0
avg_wait_time = sum(time.time() - t for t in wait_times) / len(wait_times) if wait_times else 0
# Update queue statistics in monitor
self.monitor.update_queue_statistics(
total_queued=total_queued,
highest_wait_time=highest_wait_time,
avg_wait_time=avg_wait_time
)
# Sort by priority (lowest number = highest priority)
temp_items.sort(key=lambda x: x[0])
# Refill the queue with updated priorities
for item in temp_items:
await self.task_queue.put(item)
pending_tasks.extend(done)
active_tasks = list(pending)
return await asyncio.gather(*pending_tasks)
finally:
if self.monitor:
self.monitor.stop()
async def run_urls_stream(
self,
urls: List[str],
crawler: AsyncWebCrawler,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
crawler: "AsyncWebCrawler",
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlerTaskResult, None]:
self.crawler = crawler
# Start the memory monitor task
memory_monitor = asyncio.create_task(self._memory_monitor_task())
if self.monitor:
self.monitor.start()
try:
active_tasks = []
task_queue = []
completed_count = 0
total_urls = len(urls)
# Initialize task queue
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
# Add to queue with initial priority 0, retry count 0, and current time
await self.task_queue.put((0, (url, task_id, 0, time.time())))
active_tasks = []
completed_count = 0
total_urls = len(urls)
task_queue.append((url, task_id))
while completed_count < total_urls:
if memory_monitor.done():
exc = memory_monitor.exception()
if exc:
for t in active_tasks:
t.cancel()
raise exc
# If memory pressure is low, greedily fill all available slots
if not self.memory_pressure_mode:
slots = self.max_session_permit - len(active_tasks)
while slots > 0:
try:
# Use get_nowait() to immediately get tasks without blocking
priority, (url, task_id, retry_count, enqueue_time) = self.task_queue.get_nowait()
# Create and start the task
task = asyncio.create_task(
self.crawl_url(url, config, task_id, retry_count)
)
active_tasks.append(task)
# Update waiting time in monitor
if self.monitor:
wait_time = time.time() - enqueue_time
self.monitor.update_task(
task_id,
wait_time=wait_time,
status=CrawlStatus.IN_PROGRESS
)
slots -= 1
except asyncio.QueueEmpty:
# No more tasks in queue, exit the loop
break
# Process completed tasks and yield results
# Start new tasks if memory permits
while len(active_tasks) < self.max_session_permit and task_queue:
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
await asyncio.sleep(self.check_interval)
continue
url, task_id = task_queue.pop(0)
task = asyncio.create_task(self.crawl_url(url, config, task_id))
active_tasks.append(task)
if not active_tasks and not task_queue:
break
# Wait for any task to complete and yield results
if active_tasks:
done, pending = await asyncio.wait(
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
active_tasks,
timeout=0.1,
return_when=asyncio.FIRST_COMPLETED
)
for completed_task in done:
result = await completed_task
# Only count as completed if it wasn't requeued
if "requeued" not in result.error_message:
completed_count += 1
yield result
# Update active tasks list
completed_count += 1
yield result
active_tasks = list(pending)
else:
# If no active tasks but still waiting, sleep briefly
await asyncio.sleep(self.check_interval / 2)
# Update priorities for waiting tasks if needed
await self._update_queue_priorities()
await asyncio.sleep(self.check_interval)
finally:
# Clean up
memory_monitor.cancel()
if self.monitor:
self.monitor.stop()
class SemaphoreDispatcher(BaseDispatcher):
def __init__(
@@ -635,44 +540,14 @@ class SemaphoreDispatcher(BaseDispatcher):
async def crawl_url(
self,
url: str,
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
config: CrawlerRunConfig,
task_id: str,
semaphore: asyncio.Semaphore = None,
) -> CrawlerTaskResult:
start_time = time.time()
start_time = datetime.now()
error_message = ""
memory_usage = peak_memory = 0.0
# Select appropriate config for this URL
selected_config = self.select_config(url, config)
# If no config matches, return failed result
if selected_config is None:
error_message = f"No matching configuration found for URL: {url}"
if self.monitor:
self.monitor.update_task(
task_id,
status=CrawlStatus.FAILED,
error_message=error_message
)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=CrawlResult(
url=url,
html="",
metadata={"status": "no_config_match"},
success=False,
error_message=error_message
),
memory_usage=0,
peak_memory=0,
start_time=start_time,
end_time=time.time(),
error_message=error_message
)
try:
if self.monitor:
self.monitor.update_task(
@@ -685,7 +560,7 @@ class SemaphoreDispatcher(BaseDispatcher):
async with semaphore:
process = psutil.Process()
start_memory = process.memory_info().rss / (1024 * 1024)
result = await self.crawler.arun(url, config=selected_config, session_id=task_id)
result = await self.crawler.arun(url, config=config, session_id=task_id)
end_memory = process.memory_info().rss / (1024 * 1024)
memory_usage = peak_memory = end_memory - start_memory
@@ -702,7 +577,7 @@ class SemaphoreDispatcher(BaseDispatcher):
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=time.time(),
end_time=datetime.now(),
error_message=error_message,
)
@@ -722,7 +597,7 @@ class SemaphoreDispatcher(BaseDispatcher):
)
finally:
end_time = time.time()
end_time = datetime.now()
if self.monitor:
self.monitor.update_task(
task_id,
@@ -745,9 +620,9 @@ class SemaphoreDispatcher(BaseDispatcher):
async def run_urls(
self,
crawler: AsyncWebCrawler, # noqa: F821
crawler: "AsyncWebCrawler", # noqa: F821
urls: List[str],
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
config: CrawlerRunConfig,
) -> List[CrawlerTaskResult]:
self.crawler = crawler
if self.monitor:
@@ -769,4 +644,4 @@ class SemaphoreDispatcher(BaseDispatcher):
return await asyncio.gather(*tasks, return_exceptions=True)
finally:
if self.monitor:
self.monitor.stop()
self.monitor.stop()

View File

@@ -0,0 +1,588 @@
from typing import Dict, Optional, List, Tuple
from .async_configs import CrawlerRunConfig
from .models import (
CrawlResult,
CrawlerTaskResult,
CrawlStatus,
DisplayMode,
CrawlStats,
DomainState,
)
from rich.live import Live
from rich.table import Table
from rich.console import Console
from rich import box
from datetime import datetime, timedelta
import time
import psutil
import asyncio
import uuid
from urllib.parse import urlparse
import random
from abc import ABC, abstractmethod
class RateLimiter:
def __init__(
self,
base_delay: Tuple[float, float] = (1.0, 3.0),
max_delay: float = 60.0,
max_retries: int = 3,
rate_limit_codes: List[int] = None,
):
self.base_delay = base_delay
self.max_delay = max_delay
self.max_retries = max_retries
self.rate_limit_codes = rate_limit_codes or [429, 503]
self.domains: Dict[str, DomainState] = {}
def get_domain(self, url: str) -> str:
return urlparse(url).netloc
async def wait_if_needed(self, url: str) -> None:
domain = self.get_domain(url)
state = self.domains.get(domain)
if not state:
self.domains[domain] = DomainState()
state = self.domains[domain]
now = time.time()
if state.last_request_time:
wait_time = max(0, state.current_delay - (now - state.last_request_time))
if wait_time > 0:
await asyncio.sleep(wait_time)
# Random delay within base range if no current delay
if state.current_delay == 0:
state.current_delay = random.uniform(*self.base_delay)
state.last_request_time = time.time()
def update_delay(self, url: str, status_code: int) -> bool:
domain = self.get_domain(url)
state = self.domains[domain]
if status_code in self.rate_limit_codes:
state.fail_count += 1
if state.fail_count > self.max_retries:
return False
# Exponential backoff with random jitter
state.current_delay = min(
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
)
else:
# Gradually reduce delay on success
state.current_delay = max(
random.uniform(*self.base_delay), state.current_delay * 0.75
)
state.fail_count = 0
return True
class CrawlerMonitor:
def __init__(
self,
max_visible_rows: int = 15,
display_mode: DisplayMode = DisplayMode.DETAILED,
):
self.console = Console()
self.max_visible_rows = max_visible_rows
self.display_mode = display_mode
self.stats: Dict[str, CrawlStats] = {}
self.process = psutil.Process()
self.start_time = datetime.now()
self.live = Live(self._create_table(), refresh_per_second=2)
def start(self):
self.live.start()
def stop(self):
self.live.stop()
def add_task(self, task_id: str, url: str):
self.stats[task_id] = CrawlStats(
task_id=task_id, url=url, status=CrawlStatus.QUEUED
)
self.live.update(self._create_table())
def update_task(self, task_id: str, **kwargs):
if task_id in self.stats:
for key, value in kwargs.items():
setattr(self.stats[task_id], key, value)
self.live.update(self._create_table())
def _create_aggregated_table(self) -> Table:
"""Creates a compact table showing only aggregated statistics"""
table = Table(
box=box.ROUNDED,
title="Crawler Status Overview",
title_style="bold magenta",
header_style="bold blue",
show_lines=True,
)
# Calculate statistics
total_tasks = len(self.stats)
queued = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
)
in_progress = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
)
completed = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
)
failed = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
)
# Memory statistics
current_memory = self.process.memory_info().rss / (1024 * 1024)
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
peak_memory = max(
(stat.peak_memory for stat in self.stats.values()), default=0.0
)
# Duration
duration = datetime.now() - self.start_time
# Create status row
table.add_column("Status", style="bold cyan")
table.add_column("Count", justify="right")
table.add_column("Percentage", justify="right")
table.add_row("Total Tasks", str(total_tasks), "100%")
table.add_row(
"[yellow]In Queue[/yellow]",
str(queued),
f"{(queued/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
table.add_row(
"[blue]In Progress[/blue]",
str(in_progress),
f"{(in_progress/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
table.add_row(
"[green]Completed[/green]",
str(completed),
f"{(completed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
table.add_row(
"[red]Failed[/red]",
str(failed),
f"{(failed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
)
# Add memory information
table.add_section()
table.add_row(
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
)
table.add_row(
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
)
table.add_row(
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
)
table.add_row(
"[yellow]Runtime[/yellow]",
str(timedelta(seconds=int(duration.total_seconds()))),
"",
)
return table
def _create_detailed_table(self) -> Table:
table = Table(
box=box.ROUNDED,
title="Crawler Performance Monitor",
title_style="bold magenta",
header_style="bold blue",
)
# Add columns
table.add_column("Task ID", style="cyan", no_wrap=True)
table.add_column("URL", style="cyan", no_wrap=True)
table.add_column("Status", style="bold")
table.add_column("Memory (MB)", justify="right")
table.add_column("Peak (MB)", justify="right")
table.add_column("Duration", justify="right")
table.add_column("Info", style="italic")
# Add summary row
total_memory = sum(stat.memory_usage for stat in self.stats.values())
active_count = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
)
completed_count = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
)
failed_count = sum(
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
)
table.add_row(
"[bold yellow]SUMMARY",
f"Total: {len(self.stats)}",
f"Active: {active_count}",
f"{total_memory:.1f}",
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
str(
timedelta(
seconds=int((datetime.now() - self.start_time).total_seconds())
)
),
f"{completed_count}{failed_count}",
style="bold",
)
table.add_section()
# Add rows for each task
visible_stats = sorted(
self.stats.values(),
key=lambda x: (
x.status != CrawlStatus.IN_PROGRESS,
x.status != CrawlStatus.QUEUED,
x.end_time or datetime.max,
),
)[: self.max_visible_rows]
for stat in visible_stats:
status_style = {
CrawlStatus.QUEUED: "white",
CrawlStatus.IN_PROGRESS: "yellow",
CrawlStatus.COMPLETED: "green",
CrawlStatus.FAILED: "red",
}[stat.status]
table.add_row(
stat.task_id[:8], # Show first 8 chars of task ID
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
f"[{status_style}]{stat.status.value}[/{status_style}]",
f"{stat.memory_usage:.1f}",
f"{stat.peak_memory:.1f}",
stat.duration,
stat.error_message[:40] if stat.error_message else "",
)
return table
def _create_table(self) -> Table:
"""Creates the appropriate table based on display mode"""
if self.display_mode == DisplayMode.AGGREGATED:
return self._create_aggregated_table()
return self._create_detailed_table()
class BaseDispatcher(ABC):
def __init__(
self,
rate_limiter: Optional[RateLimiter] = None,
monitor: Optional[CrawlerMonitor] = None,
):
self.crawler = None
self._domain_last_hit: Dict[str, float] = {}
self.concurrent_sessions = 0
self.rate_limiter = rate_limiter
self.monitor = monitor
@abstractmethod
async def crawl_url(
self,
url: str,
config: CrawlerRunConfig,
task_id: str,
monitor: Optional[CrawlerMonitor] = None,
) -> CrawlerTaskResult:
pass
@abstractmethod
async def run_urls(
self,
urls: List[str],
crawler: "AsyncWebCrawler", # noqa: F821
config: CrawlerRunConfig,
monitor: Optional[CrawlerMonitor] = None,
) -> List[CrawlerTaskResult]:
pass
class MemoryAdaptiveDispatcher(BaseDispatcher):
def __init__(
self,
memory_threshold_percent: float = 90.0,
check_interval: float = 1.0,
max_session_permit: int = 20,
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
rate_limiter: Optional[RateLimiter] = None,
monitor: Optional[CrawlerMonitor] = None,
):
super().__init__(rate_limiter, monitor)
self.memory_threshold_percent = memory_threshold_percent
self.check_interval = check_interval
self.max_session_permit = max_session_permit
self.memory_wait_timeout = memory_wait_timeout
async def crawl_url(
self,
url: str,
config: CrawlerRunConfig,
task_id: str,
) -> CrawlerTaskResult:
start_time = datetime.now()
error_message = ""
memory_usage = peak_memory = 0.0
try:
if self.monitor:
self.monitor.update_task(
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
)
self.concurrent_sessions += 1
if self.rate_limiter:
await self.rate_limiter.wait_if_needed(url)
process = psutil.Process()
start_memory = process.memory_info().rss / (1024 * 1024)
result = await self.crawler.arun(url, config=config, session_id=task_id)
end_memory = process.memory_info().rss / (1024 * 1024)
memory_usage = peak_memory = end_memory - start_memory
if self.rate_limiter and result.status_code:
if not self.rate_limiter.update_delay(url, result.status_code):
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=datetime.now(),
error_message=error_message,
)
if not result.success:
error_message = result.error_message
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
elif self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
except Exception as e:
error_message = str(e)
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
result = CrawlResult(
url=url, html="", metadata={}, success=False, error_message=str(e)
)
finally:
end_time = datetime.now()
if self.monitor:
self.monitor.update_task(
task_id,
end_time=end_time,
memory_usage=memory_usage,
peak_memory=peak_memory,
error_message=error_message,
)
self.concurrent_sessions -= 1
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=end_time,
error_message=error_message,
)
async def run_urls(
self,
urls: List[str],
crawler: "AsyncWebCrawler", # noqa: F821
config: CrawlerRunConfig,
) -> List[CrawlerTaskResult]:
self.crawler = crawler
if self.monitor:
self.monitor.start()
try:
pending_tasks = []
active_tasks = []
task_queue = []
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
task_queue.append((url, task_id))
while task_queue or active_tasks:
wait_start_time = time.time()
while len(active_tasks) < self.max_session_permit and task_queue:
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
# Check if we've exceeded the timeout
if time.time() - wait_start_time > self.memory_wait_timeout:
raise MemoryError(
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
)
await asyncio.sleep(self.check_interval)
continue
url, task_id = task_queue.pop(0)
task = asyncio.create_task(self.crawl_url(url, config, task_id))
active_tasks.append(task)
if not active_tasks:
await asyncio.sleep(self.check_interval)
continue
done, pending = await asyncio.wait(
active_tasks, return_when=asyncio.FIRST_COMPLETED
)
pending_tasks.extend(done)
active_tasks = list(pending)
return await asyncio.gather(*pending_tasks)
finally:
if self.monitor:
self.monitor.stop()
class SemaphoreDispatcher(BaseDispatcher):
def __init__(
self,
semaphore_count: int = 5,
max_session_permit: int = 20,
rate_limiter: Optional[RateLimiter] = None,
monitor: Optional[CrawlerMonitor] = None,
):
super().__init__(rate_limiter, monitor)
self.semaphore_count = semaphore_count
self.max_session_permit = max_session_permit
async def crawl_url(
self,
url: str,
config: CrawlerRunConfig,
task_id: str,
semaphore: asyncio.Semaphore = None,
) -> CrawlerTaskResult:
start_time = datetime.now()
error_message = ""
memory_usage = peak_memory = 0.0
try:
if self.monitor:
self.monitor.update_task(
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
)
if self.rate_limiter:
await self.rate_limiter.wait_if_needed(url)
async with semaphore:
process = psutil.Process()
start_memory = process.memory_info().rss / (1024 * 1024)
result = await self.crawler.arun(url, config=config, session_id=task_id)
end_memory = process.memory_info().rss / (1024 * 1024)
memory_usage = peak_memory = end_memory - start_memory
if self.rate_limiter and result.status_code:
if not self.rate_limiter.update_delay(url, result.status_code):
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=datetime.now(),
error_message=error_message,
)
if not result.success:
error_message = result.error_message
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
elif self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
except Exception as e:
error_message = str(e)
if self.monitor:
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
result = CrawlResult(
url=url, html="", metadata={}, success=False, error_message=str(e)
)
finally:
end_time = datetime.now()
if self.monitor:
self.monitor.update_task(
task_id,
end_time=end_time,
memory_usage=memory_usage,
peak_memory=peak_memory,
error_message=error_message,
)
return CrawlerTaskResult(
task_id=task_id,
url=url,
result=result,
memory_usage=memory_usage,
peak_memory=peak_memory,
start_time=start_time,
end_time=end_time,
error_message=error_message,
)
async def run_urls(
self,
crawler: "AsyncWebCrawler", # noqa: F821
urls: List[str],
config: CrawlerRunConfig,
) -> List[CrawlerTaskResult]:
self.crawler = crawler
if self.monitor:
self.monitor.start()
try:
semaphore = asyncio.Semaphore(self.semaphore_count)
tasks = []
for url in urls:
task_id = str(uuid.uuid4())
if self.monitor:
self.monitor.add_task(task_id, url)
task = asyncio.create_task(
self.crawl_url(url, config, task_id, semaphore)
)
tasks.append(task)
return await asyncio.gather(*tasks, return_exceptions=True)
finally:
if self.monitor:
self.monitor.stop()

View File

@@ -1,82 +1,19 @@
from abc import ABC, abstractmethod
from enum import Enum
from typing import Optional, Dict, Any, List
from typing import Optional, Dict, Any
from colorama import Fore, Style, init
import os
from datetime import datetime
from urllib.parse import unquote
from rich.console import Console
from rich.text import Text
from .utils import create_box_message
class LogLevel(Enum):
DEFAULT = 0
DEBUG = 1
INFO = 2
SUCCESS = 3
WARNING = 4
ERROR = 5
CRITICAL = 6
ALERT = 7
NOTICE = 8
EXCEPTION = 9
FATAL = 10
def __str__(self):
return self.name.lower()
class LogColor(str, Enum):
"""Enum for log colors."""
DEBUG = "bright_black"
INFO = "cyan"
SUCCESS = "green"
WARNING = "yellow"
ERROR = "red"
CYAN = "cyan"
GREEN = "green"
YELLOW = "yellow"
MAGENTA = "magenta"
DIM_MAGENTA = "dim magenta"
RED = "red"
def __str__(self):
"""Automatically convert rich color to string."""
return self.value
class AsyncLoggerBase(ABC):
@abstractmethod
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
pass
@abstractmethod
def info(self, message: str, tag: str = "INFO", **kwargs):
pass
@abstractmethod
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
pass
@abstractmethod
def warning(self, message: str, tag: str = "WARNING", **kwargs):
pass
@abstractmethod
def error(self, message: str, tag: str = "ERROR", **kwargs):
pass
@abstractmethod
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
pass
@abstractmethod
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
pass
class AsyncLogger(AsyncLoggerBase):
class AsyncLogger:
"""
Asynchronous logger with support for colored console output and file logging.
Supports templated messages with colored components.
@@ -93,21 +30,14 @@ class AsyncLogger(AsyncLoggerBase):
"DEBUG": "",
"INFO": "",
"WARNING": "",
"SUCCESS": "",
"CRITICAL": "",
"ALERT": "",
"NOTICE": "",
"EXCEPTION": "",
"FATAL": "",
"DEFAULT": "",
}
DEFAULT_COLORS = {
LogLevel.DEBUG: LogColor.DEBUG,
LogLevel.INFO: LogColor.INFO,
LogLevel.SUCCESS: LogColor.SUCCESS,
LogLevel.WARNING: LogColor.WARNING,
LogLevel.ERROR: LogColor.ERROR,
LogLevel.DEBUG: Fore.LIGHTBLACK_EX,
LogLevel.INFO: Fore.CYAN,
LogLevel.SUCCESS: Fore.GREEN,
LogLevel.WARNING: Fore.YELLOW,
LogLevel.ERROR: Fore.RED,
}
def __init__(
@@ -116,7 +46,7 @@ class AsyncLogger(AsyncLoggerBase):
log_level: LogLevel = LogLevel.DEBUG,
tag_width: int = 10,
icons: Optional[Dict[str, str]] = None,
colors: Optional[Dict[LogLevel, LogColor]] = None,
colors: Optional[Dict[LogLevel, str]] = None,
verbose: bool = True,
):
"""
@@ -130,13 +60,13 @@ class AsyncLogger(AsyncLoggerBase):
colors: Custom colors for different log levels
verbose: Whether to output to console
"""
init() # Initialize colorama
self.log_file = log_file
self.log_level = log_level
self.tag_width = tag_width
self.icons = icons or self.DEFAULT_ICONS
self.colors = colors or self.DEFAULT_COLORS
self.verbose = verbose
self.console = Console()
# Create log file directory if needed
if log_file:
@@ -149,23 +79,20 @@ class AsyncLogger(AsyncLoggerBase):
def _get_icon(self, tag: str) -> str:
"""Get the icon for a tag, defaulting to info icon if not found."""
return self.icons.get(tag, self.icons["INFO"])
def _shorten(self, text, length, placeholder="..."):
"""Truncate text in the middle if longer than length, or pad if shorter."""
if len(text) <= length:
return text.ljust(length) # Pad with spaces to reach desired length
half = (length - len(placeholder)) // 2
shortened = text[:half] + placeholder + text[-half:]
return shortened.ljust(length) # Also pad shortened text to consistent length
def _write_to_file(self, message: str):
"""Write a message to the log file if configured."""
if self.log_file:
text = Text.from_markup(message)
plain_text = text.plain
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
with open(self.log_file, "a", encoding="utf-8") as f:
f.write(f"[{timestamp}] {plain_text}\n")
# Strip ANSI color codes for file output
clean_message = message.replace(Fore.RESET, "").replace(
Style.RESET_ALL, ""
)
for color in vars(Fore).values():
if isinstance(color, str):
clean_message = clean_message.replace(color, "")
f.write(f"[{timestamp}] {clean_message}\n")
def _log(
self,
@@ -173,9 +100,8 @@ class AsyncLogger(AsyncLoggerBase):
message: str,
tag: str,
params: Optional[Dict[str, Any]] = None,
colors: Optional[Dict[str, LogColor]] = None,
boxes: Optional[List[str]] = None,
base_color: Optional[LogColor] = None,
colors: Optional[Dict[str, str]] = None,
base_color: Optional[str] = None,
**kwargs,
):
"""
@@ -187,44 +113,42 @@ class AsyncLogger(AsyncLoggerBase):
tag: Tag for the message
params: Parameters to format into the message
colors: Color overrides for specific parameters
boxes: Box overrides for specific parameters
base_color: Base color for the entire message
"""
if level.value < self.log_level.value:
return
# avoid conflict with rich formatting
parsed_message = message.replace("[", "[[").replace("]", "]]")
# Format the message with parameters if provided
if params:
# FIXME: If there are formatting strings in floating point format,
# this may result in colors and boxes not being applied properly.
# such as {value:.2f}, the value is 0.23333 format it to 0.23,
# but we replace("0.23333", "[color]0.23333[/color]")
formatted_message = parsed_message.format(**params)
for key, value in params.items():
# value_str may discard `[` and `]`, so we need to replace it.
value_str = str(value).replace("[", "[[").replace("]", "]]")
# check is need apply color
if colors and key in colors:
color_str = f"[{colors[key]}]{value_str}[/{colors[key]}]"
formatted_message = formatted_message.replace(value_str, color_str)
value_str = color_str
try:
# First format the message with raw parameters
formatted_message = message.format(**params)
# check is need apply box
if boxes and key in boxes:
formatted_message = formatted_message.replace(value_str,
create_box_message(value_str, type=str(level)))
# Then apply colors if specified
if colors:
for key, color in colors.items():
# Find the formatted value in the message and wrap it with color
if key in params:
value_str = str(params[key])
formatted_message = formatted_message.replace(
value_str, f"{color}{value_str}{Style.RESET_ALL}"
)
except KeyError as e:
formatted_message = (
f"LOGGING ERROR: Missing parameter {e} in message template"
)
level = LogLevel.ERROR
else:
formatted_message = parsed_message
formatted_message = message
# Construct the full log line
color: LogColor = base_color or self.colors[level]
log_line = f"[{color}]{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message} [/{color}]"
color = base_color or self.colors[level]
log_line = f"{color}{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message}{Style.RESET_ALL}"
# Output to console if verbose
if self.verbose or kwargs.get("force_verbose", False):
self.console.print(log_line)
print(log_line)
# Write to file if configured
self._write_to_file(log_line)
@@ -244,22 +168,6 @@ class AsyncLogger(AsyncLoggerBase):
def warning(self, message: str, tag: str = "WARNING", **kwargs):
"""Log a warning message."""
self._log(LogLevel.WARNING, message, tag, **kwargs)
def critical(self, message: str, tag: str = "CRITICAL", **kwargs):
"""Log a critical message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def exception(self, message: str, tag: str = "EXCEPTION", **kwargs):
"""Log an exception message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def fatal(self, message: str, tag: str = "FATAL", **kwargs):
"""Log a fatal message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def alert(self, message: str, tag: str = "ALERT", **kwargs):
"""Log an alert message."""
self._log(LogLevel.ERROR, message, tag, **kwargs)
def notice(self, message: str, tag: str = "NOTICE", **kwargs):
"""Log a notice message."""
self._log(LogLevel.INFO, message, tag, **kwargs)
def error(self, message: str, tag: str = "ERROR", **kwargs):
"""Log an error message."""
@@ -271,7 +179,7 @@ class AsyncLogger(AsyncLoggerBase):
success: bool,
timing: float,
tag: str = "FETCH",
url_length: int = 100,
url_length: int = 50,
):
"""
Convenience method for logging URL fetch status.
@@ -283,20 +191,19 @@ class AsyncLogger(AsyncLoggerBase):
tag: Tag for the message
url_length: Maximum length for URL in log
"""
decoded_url = unquote(url)
readable_url = self._shorten(decoded_url, url_length)
self._log(
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
message="{url} | {status} | : {timing:.2f}s",
message="{url:.{url_length}}... | Status: {status} | Time: {timing:.2f}s",
tag=tag,
params={
"url": readable_url,
"status": "" if success else "",
"url": url,
"url_length": url_length,
"status": success,
"timing": timing,
},
colors={
"status": LogColor.SUCCESS if success else LogColor.ERROR,
"timing": LogColor.WARNING,
"status": Fore.GREEN if success else Fore.RED,
"timing": Fore.YELLOW,
},
)
@@ -312,63 +219,9 @@ class AsyncLogger(AsyncLoggerBase):
tag: Tag for the message
url_length: Maximum length for URL in log
"""
decoded_url = unquote(url)
readable_url = self._shorten(decoded_url, url_length)
self._log(
level=LogLevel.ERROR,
message="{url} | Error: {error}",
message="{url:.{url_length}}... | Error: {error}",
tag=tag,
params={"url": readable_url, "error": error},
params={"url": url, "url_length": url_length, "error": error},
)
class AsyncFileLogger(AsyncLoggerBase):
"""
File-only asynchronous logger that writes logs to a specified file.
"""
def __init__(self, log_file: str):
"""
Initialize the file logger.
Args:
log_file: File path for logging
"""
self.log_file = log_file
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
def _write_to_file(self, level: str, message: str, tag: str):
"""Write a message to the log file."""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
with open(self.log_file, "a", encoding="utf-8") as f:
f.write(f"[{timestamp}] [{level}] [{tag}] {message}\n")
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
"""Log a debug message to file."""
self._write_to_file("DEBUG", message, tag)
def info(self, message: str, tag: str = "INFO", **kwargs):
"""Log an info message to file."""
self._write_to_file("INFO", message, tag)
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
"""Log a success message to file."""
self._write_to_file("SUCCESS", message, tag)
def warning(self, message: str, tag: str = "WARNING", **kwargs):
"""Log a warning message to file."""
self._write_to_file("WARNING", message, tag)
def error(self, message: str, tag: str = "ERROR", **kwargs):
"""Log an error message to file."""
self._write_to_file("ERROR", message, tag)
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
"""Log URL fetch status to file."""
status = "SUCCESS" if success else "FAILED"
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
self._write_to_file("URL_STATUS", message, tag)
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
"""Log error status to file."""
message = f"{url[:url_length]}... | Error: {error}"
self._write_to_file("ERROR", message, tag)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,293 +0,0 @@
# browser_adapter.py
"""
Browser adapter for Crawl4AI to support both Playwright and undetected browsers
with minimal changes to existing codebase.
"""
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, Callable
import time
import json
# Import both, but use conditionally
try:
from playwright.async_api import Page
except ImportError:
Page = Any
try:
from patchright.async_api import Page as UndetectedPage
except ImportError:
UndetectedPage = Any
class BrowserAdapter(ABC):
"""Abstract adapter for browser-specific operations"""
@abstractmethod
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
"""Execute JavaScript in the page"""
pass
@abstractmethod
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup console message capturing, returns handler function if needed"""
pass
@abstractmethod
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup error capturing, returns handler function if needed"""
pass
@abstractmethod
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
"""Retrieve captured console messages (for undetected browsers)"""
pass
@abstractmethod
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
"""Clean up console event listeners"""
pass
@abstractmethod
def get_imports(self) -> tuple:
"""Get the appropriate imports for this adapter"""
pass
class PlaywrightAdapter(BrowserAdapter):
"""Adapter for standard Playwright"""
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
"""Standard Playwright evaluate"""
if arg is not None:
return await page.evaluate(expression, arg)
return await page.evaluate(expression)
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup console capture using Playwright's event system"""
def handle_console_capture(msg):
try:
message_type = "unknown"
try:
message_type = msg.type
except:
pass
message_text = "unknown"
try:
message_text = msg.text
except:
pass
entry = {
"type": message_type,
"text": message_text,
"timestamp": time.time()
}
captured_console.append(entry)
except Exception as e:
captured_console.append({
"type": "console_capture_error",
"error": str(e),
"timestamp": time.time()
})
page.on("console", handle_console_capture)
return handle_console_capture
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup error capture using Playwright's event system"""
def handle_pageerror_capture(err):
try:
error_message = "Unknown error"
try:
error_message = err.message
except:
pass
error_stack = ""
try:
error_stack = err.stack
except:
pass
captured_console.append({
"type": "error",
"text": error_message,
"stack": error_stack,
"timestamp": time.time()
})
except Exception as e:
captured_console.append({
"type": "pageerror_capture_error",
"error": str(e),
"timestamp": time.time()
})
page.on("pageerror", handle_pageerror_capture)
return handle_pageerror_capture
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
"""Not needed for Playwright - messages are captured via events"""
return []
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
"""Remove event listeners"""
if handle_console:
page.remove_listener("console", handle_console)
if handle_error:
page.remove_listener("pageerror", handle_error)
def get_imports(self) -> tuple:
"""Return Playwright imports"""
from playwright.async_api import Page, Error
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
return Page, Error, PlaywrightTimeoutError
class UndetectedAdapter(BrowserAdapter):
"""Adapter for undetected browser automation with stealth features"""
def __init__(self):
self._console_script_injected = {}
async def evaluate(self, page: UndetectedPage, expression: str, arg: Any = None) -> Any:
"""Undetected browser evaluate with isolated context"""
# For most evaluations, use isolated context for stealth
# Only use non-isolated when we need to access our injected console capture
isolated = not (
"__console" in expression or
"__captured" in expression or
"__error" in expression or
"window.__" in expression
)
if arg is not None:
return await page.evaluate(expression, arg, isolated_context=isolated)
return await page.evaluate(expression, isolated_context=isolated)
async def setup_console_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup console capture using JavaScript injection for undetected browsers"""
if not self._console_script_injected.get(page, False):
await page.add_init_script("""
// Initialize console capture
window.__capturedConsole = [];
window.__capturedErrors = [];
// Store original console methods
const originalConsole = {};
['log', 'info', 'warn', 'error', 'debug'].forEach(method => {
originalConsole[method] = console[method];
console[method] = function(...args) {
try {
window.__capturedConsole.push({
type: method,
text: args.map(arg => {
try {
if (typeof arg === 'object') {
return JSON.stringify(arg);
}
return String(arg);
} catch (e) {
return '[Object]';
}
}).join(' '),
timestamp: Date.now()
});
} catch (e) {
// Fail silently to avoid detection
}
// Call original method
originalConsole[method].apply(console, args);
};
});
""")
self._console_script_injected[page] = True
return None # No handler function needed for undetected browser
async def setup_error_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]:
"""Setup error capture using JavaScript injection for undetected browsers"""
if not self._console_script_injected.get(page, False):
await page.add_init_script("""
// Capture errors
window.addEventListener('error', (event) => {
try {
window.__capturedErrors.push({
type: 'error',
text: event.message,
stack: event.error ? event.error.stack : '',
filename: event.filename,
lineno: event.lineno,
colno: event.colno,
timestamp: Date.now()
});
} catch (e) {
// Fail silently
}
});
// Capture unhandled promise rejections
window.addEventListener('unhandledrejection', (event) => {
try {
window.__capturedErrors.push({
type: 'unhandledrejection',
text: event.reason ? String(event.reason) : 'Unhandled Promise Rejection',
stack: event.reason && event.reason.stack ? event.reason.stack : '',
timestamp: Date.now()
});
} catch (e) {
// Fail silently
}
});
""")
self._console_script_injected[page] = True
return None # No handler function needed for undetected browser
async def retrieve_console_messages(self, page: UndetectedPage) -> List[Dict]:
"""Retrieve captured console messages and errors from the page"""
messages = []
try:
# Get console messages
console_messages = await page.evaluate(
"() => { const msgs = window.__capturedConsole || []; window.__capturedConsole = []; return msgs; }",
isolated_context=False
)
messages.extend(console_messages)
# Get errors
errors = await page.evaluate(
"() => { const errs = window.__capturedErrors || []; window.__capturedErrors = []; return errs; }",
isolated_context=False
)
messages.extend(errors)
# Convert timestamps from JS to Python format
for msg in messages:
if 'timestamp' in msg and isinstance(msg['timestamp'], (int, float)):
msg['timestamp'] = msg['timestamp'] / 1000.0 # Convert from ms to seconds
except Exception:
# If retrieval fails, return empty list
pass
return messages
async def cleanup_console_capture(self, page: UndetectedPage, handle_console: Optional[Callable], handle_error: Optional[Callable]):
"""Clean up for undetected browser - retrieve final messages"""
# For undetected browser, we don't have event listeners to remove
# but we should retrieve any final messages
final_messages = await self.retrieve_console_messages(page)
return final_messages
def get_imports(self) -> tuple:
"""Return undetected browser imports"""
from patchright.async_api import Page, Error
from patchright.async_api import TimeoutError as PlaywrightTimeoutError
return Page, Error, PlaywrightTimeoutError

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ from collections import Counter
import string
from .model_loader import load_nltk_punkt
# Define the abstract base class for chunking strategies
class ChunkingStrategy(ABC):
"""
@@ -71,7 +72,6 @@ class NlpSentenceChunking(ChunkingStrategy):
"""
Initialize the NlpSentenceChunking object.
"""
from crawl4ai.le.legacy.model_loader import load_nltk_punkt
load_nltk_punkt()
def chunk(self, text: str) -> list:

File diff suppressed because it is too large Load Diff

View File

@@ -1,837 +0,0 @@
import time
import uuid
import threading
import psutil
from datetime import datetime, timedelta
from typing import Dict, Optional, List
import threading
from rich.console import Console
from rich.layout import Layout
from rich.panel import Panel
from rich.table import Table
from rich.text import Text
from rich.live import Live
from rich import box
from ..models import CrawlStatus
class TerminalUI:
"""Terminal user interface for CrawlerMonitor using rich library."""
def __init__(self, refresh_rate: float = 1.0, max_width: int = 120):
"""
Initialize the terminal UI.
Args:
refresh_rate: How often to refresh the UI (in seconds)
max_width: Maximum width of the UI in characters
"""
self.console = Console(width=max_width)
self.layout = Layout()
self.refresh_rate = refresh_rate
self.stop_event = threading.Event()
self.ui_thread = None
self.monitor = None # Will be set by CrawlerMonitor
self.max_width = max_width
# Setup layout - vertical layout (top to bottom)
self.layout.split(
Layout(name="header", size=3),
Layout(name="pipeline_status", size=10),
Layout(name="task_details", ratio=1),
Layout(name="footer", size=3) # Increased footer size to fit all content
)
def start(self, monitor):
"""Start the UI thread."""
self.monitor = monitor
self.stop_event.clear()
self.ui_thread = threading.Thread(target=self._ui_loop)
self.ui_thread.daemon = True
self.ui_thread.start()
def stop(self):
"""Stop the UI thread."""
if self.ui_thread and self.ui_thread.is_alive():
self.stop_event.set()
# Only try to join if we're not in the UI thread
# This prevents "cannot join current thread" errors
if threading.current_thread() != self.ui_thread:
self.ui_thread.join(timeout=5.0)
def _ui_loop(self):
"""Main UI rendering loop."""
import sys
import select
import termios
import tty
# Setup terminal for non-blocking input
old_settings = termios.tcgetattr(sys.stdin)
try:
tty.setcbreak(sys.stdin.fileno())
# Use Live display to render the UI
with Live(self.layout, refresh_per_second=1/self.refresh_rate, screen=True) as live:
self.live = live # Store the live display for updates
# Main UI loop
while not self.stop_event.is_set():
self._update_display()
# Check for key press (non-blocking)
if select.select([sys.stdin], [], [], 0)[0]:
key = sys.stdin.read(1)
# Check for 'q' to quit
if key == 'q':
# Signal stop but don't call monitor.stop() from UI thread
# as it would cause the thread to try to join itself
self.stop_event.set()
self.monitor.is_running = False
break
time.sleep(self.refresh_rate)
# Just check if the monitor was stopped
if not self.monitor.is_running:
break
finally:
# Restore terminal settings
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
def _update_display(self):
"""Update the terminal display with current statistics."""
if not self.monitor:
return
# Update crawler status panel
self.layout["header"].update(self._create_status_panel())
# Update pipeline status panel and task details panel
self.layout["pipeline_status"].update(self._create_pipeline_panel())
self.layout["task_details"].update(self._create_task_details_panel())
# Update footer
self.layout["footer"].update(self._create_footer())
def _create_status_panel(self) -> Panel:
"""Create the crawler status panel."""
summary = self.monitor.get_summary()
# Format memory status with icon
memory_status = self.monitor.get_memory_status()
memory_icon = "🟢" # Default NORMAL
if memory_status == "PRESSURE":
memory_icon = "🟠"
elif memory_status == "CRITICAL":
memory_icon = "🔴"
# Get current memory usage
current_memory = psutil.Process().memory_info().rss / (1024 * 1024) # MB
memory_percent = (current_memory / psutil.virtual_memory().total) * 100
# Format runtime
runtime = self.monitor._format_time(time.time() - self.monitor.start_time if self.monitor.start_time else 0)
# Create the status text
status_text = Text()
status_text.append(f"Web Crawler Dashboard | Runtime: {runtime} | Memory: {memory_percent:.1f}% {memory_icon}\n")
status_text.append(f"Status: {memory_status} | URLs: {summary['urls_completed']}/{summary['urls_total']} | ")
status_text.append(f"Peak Mem: {summary['peak_memory_percent']:.1f}% at {self.monitor._format_time(summary['peak_memory_time'])}")
return Panel(status_text, title="Crawler Status", border_style="blue")
def _create_pipeline_panel(self) -> Panel:
"""Create the pipeline status panel."""
summary = self.monitor.get_summary()
queue_stats = self.monitor.get_queue_stats()
# Create a table for status counts
table = Table(show_header=True, box=None)
table.add_column("Status", style="cyan")
table.add_column("Count", justify="right")
table.add_column("Percentage", justify="right")
table.add_column("Stat", style="cyan")
table.add_column("Value", justify="right")
# Calculate overall progress
progress = f"{summary['urls_completed']}/{summary['urls_total']}"
progress_percent = f"{summary['completion_percentage']:.1f}%"
# Add rows for each status
table.add_row(
"Overall Progress",
progress,
progress_percent,
"Est. Completion",
summary.get('estimated_completion_time', "N/A")
)
# Add rows for each status
status_counts = summary['status_counts']
total = summary['urls_total'] or 1 # Avoid division by zero
# Status rows
table.add_row(
"Completed",
str(status_counts.get(CrawlStatus.COMPLETED.name, 0)),
f"{status_counts.get(CrawlStatus.COMPLETED.name, 0) / total * 100:.1f}%",
"Avg. Time/URL",
f"{summary.get('avg_task_duration', 0):.2f}s"
)
table.add_row(
"Failed",
str(status_counts.get(CrawlStatus.FAILED.name, 0)),
f"{status_counts.get(CrawlStatus.FAILED.name, 0) / total * 100:.1f}%",
"Concurrent Tasks",
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0))
)
table.add_row(
"In Progress",
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)),
f"{status_counts.get(CrawlStatus.IN_PROGRESS.name, 0) / total * 100:.1f}%",
"Queue Size",
str(queue_stats['total_queued'])
)
table.add_row(
"Queued",
str(status_counts.get(CrawlStatus.QUEUED.name, 0)),
f"{status_counts.get(CrawlStatus.QUEUED.name, 0) / total * 100:.1f}%",
"Max Wait Time",
f"{queue_stats['highest_wait_time']:.1f}s"
)
# Requeued is a special case as it's not a status
requeued_count = summary.get('requeued_count', 0)
table.add_row(
"Requeued",
str(requeued_count),
f"{summary.get('requeue_rate', 0):.1f}%",
"Avg Wait Time",
f"{queue_stats['avg_wait_time']:.1f}s"
)
# Add empty row for spacing
table.add_row(
"",
"",
"",
"Requeue Rate",
f"{summary.get('requeue_rate', 0):.1f}%"
)
return Panel(table, title="Pipeline Status", border_style="green")
def _create_task_details_panel(self) -> Panel:
"""Create the task details panel."""
# Create a table for task details
table = Table(show_header=True, expand=True)
table.add_column("Task ID", style="cyan", no_wrap=True, width=10)
table.add_column("URL", style="blue", ratio=3)
table.add_column("Status", style="green", width=15)
table.add_column("Memory", justify="right", width=8)
table.add_column("Peak", justify="right", width=8)
table.add_column("Duration", justify="right", width=10)
# Get all task stats
task_stats = self.monitor.get_all_task_stats()
# Add summary row
active_tasks = sum(1 for stats in task_stats.values()
if stats['status'] == CrawlStatus.IN_PROGRESS.name)
total_memory = sum(stats['memory_usage'] for stats in task_stats.values())
total_peak = sum(stats['peak_memory'] for stats in task_stats.values())
# Summary row with separators
table.add_row(
"SUMMARY",
f"Total: {len(task_stats)}",
f"Active: {active_tasks}",
f"{total_memory:.1f}",
f"{total_peak:.1f}",
"N/A"
)
# Add a separator
table.add_row("" * 10, "" * 20, "" * 10, "" * 8, "" * 8, "" * 10)
# Status icons
status_icons = {
CrawlStatus.QUEUED.name: "",
CrawlStatus.IN_PROGRESS.name: "🔄",
CrawlStatus.COMPLETED.name: "",
CrawlStatus.FAILED.name: ""
}
# Calculate how many rows we can display based on available space
# We can display more rows now that we have a dedicated panel
display_count = min(len(task_stats), 20) # Display up to 20 tasks
# Add rows for each task
for task_id, stats in sorted(
list(task_stats.items())[:display_count],
# Sort: 1. IN_PROGRESS first, 2. QUEUED, 3. COMPLETED/FAILED by recency
key=lambda x: (
0 if x[1]['status'] == CrawlStatus.IN_PROGRESS.name else
1 if x[1]['status'] == CrawlStatus.QUEUED.name else
2,
-1 * (x[1].get('end_time', 0) or 0) # Most recent first
)
):
# Truncate task_id and URL for display
short_id = task_id[:8]
url = stats['url']
if len(url) > 50: # Allow longer URLs in the dedicated panel
url = url[:47] + "..."
# Format status with icon
status = f"{status_icons.get(stats['status'], '?')} {stats['status']}"
# Add row
table.add_row(
short_id,
url,
status,
f"{stats['memory_usage']:.1f}",
f"{stats['peak_memory']:.1f}",
stats['duration'] if 'duration' in stats else "0:00"
)
return Panel(table, title="Task Details", border_style="yellow")
def _create_footer(self) -> Panel:
"""Create the footer panel."""
from rich.columns import Columns
from rich.align import Align
memory_status = self.monitor.get_memory_status()
memory_icon = "🟢" # Default NORMAL
if memory_status == "PRESSURE":
memory_icon = "🟠"
elif memory_status == "CRITICAL":
memory_icon = "🔴"
# Left section - memory status
left_text = Text()
left_text.append("Memory Status: ", style="bold")
status_style = "green" if memory_status == "NORMAL" else "yellow" if memory_status == "PRESSURE" else "red bold"
left_text.append(f"{memory_icon} {memory_status}", style=status_style)
# Center section - copyright
center_text = Text("© Crawl4AI 2025 | Made by UnclecCode", style="cyan italic")
# Right section - quit instruction
right_text = Text()
right_text.append("Press ", style="bold")
right_text.append("q", style="white on blue")
right_text.append(" to quit", style="bold")
# Create columns with the three sections
footer_content = Columns(
[
Align.left(left_text),
Align.center(center_text),
Align.right(right_text)
],
expand=True
)
# Create a more visible footer panel
return Panel(
footer_content,
border_style="white",
padding=(0, 1) # Add padding for better visibility
)
class CrawlerMonitor:
"""
Comprehensive monitoring and visualization system for tracking web crawler operations in real-time.
Provides a terminal-based dashboard that displays task statuses, memory usage, queue statistics,
and performance metrics.
"""
def __init__(
self,
urls_total: int = 0,
refresh_rate: float = 1.0,
enable_ui: bool = True,
max_width: int = 120
):
"""
Initialize the CrawlerMonitor.
Args:
urls_total: Total number of URLs to be crawled
refresh_rate: How often to refresh the UI (in seconds)
enable_ui: Whether to display the terminal UI
max_width: Maximum width of the UI in characters
"""
# Core monitoring attributes
self.stats = {} # Task ID -> stats dict
self.memory_status = "NORMAL"
self.start_time = None
self.end_time = None
self.is_running = False
self.queue_stats = {
"total_queued": 0,
"highest_wait_time": 0.0,
"avg_wait_time": 0.0
}
self.urls_total = urls_total
self.urls_completed = 0
self.peak_memory_percent = 0.0
self.peak_memory_time = 0.0
# Status counts
self.status_counts = {
CrawlStatus.QUEUED.name: 0,
CrawlStatus.IN_PROGRESS.name: 0,
CrawlStatus.COMPLETED.name: 0,
CrawlStatus.FAILED.name: 0
}
# Requeue tracking
self.requeued_count = 0
# Thread-safety
self._lock = threading.RLock()
# Terminal UI
self.enable_ui = enable_ui
self.terminal_ui = TerminalUI(
refresh_rate=refresh_rate,
max_width=max_width
) if enable_ui else None
def start(self):
"""
Start the monitoring session.
- Initializes the start_time
- Sets is_running to True
- Starts the terminal UI if enabled
"""
with self._lock:
self.start_time = time.time()
self.is_running = True
# Start the terminal UI
if self.enable_ui and self.terminal_ui:
self.terminal_ui.start(self)
def stop(self):
"""
Stop the monitoring session.
- Records end_time
- Sets is_running to False
- Stops the terminal UI
- Generates final summary statistics
"""
with self._lock:
self.end_time = time.time()
self.is_running = False
# Stop the terminal UI
if self.enable_ui and self.terminal_ui:
self.terminal_ui.stop()
def add_task(self, task_id: str, url: str):
"""
Register a new task with the monitor.
Args:
task_id: Unique identifier for the task
url: URL being crawled
The task is initialized with:
- status: QUEUED
- url: The URL to crawl
- enqueue_time: Current time
- memory_usage: 0
- peak_memory: 0
- wait_time: 0
- retry_count: 0
"""
with self._lock:
self.stats[task_id] = {
"task_id": task_id,
"url": url,
"status": CrawlStatus.QUEUED.name,
"enqueue_time": time.time(),
"start_time": None,
"end_time": None,
"memory_usage": 0.0,
"peak_memory": 0.0,
"error_message": "",
"wait_time": 0.0,
"retry_count": 0,
"duration": "0:00",
"counted_requeue": False
}
# Update status counts
self.status_counts[CrawlStatus.QUEUED.name] += 1
def update_task(
self,
task_id: str,
status: Optional[CrawlStatus] = None,
start_time: Optional[float] = None,
end_time: Optional[float] = None,
memory_usage: Optional[float] = None,
peak_memory: Optional[float] = None,
error_message: Optional[str] = None,
retry_count: Optional[int] = None,
wait_time: Optional[float] = None
):
"""
Update statistics for a specific task.
Args:
task_id: Unique identifier for the task
status: New status (QUEUED, IN_PROGRESS, COMPLETED, FAILED)
start_time: When task execution started
end_time: When task execution ended
memory_usage: Current memory usage in MB
peak_memory: Maximum memory usage in MB
error_message: Error description if failed
retry_count: Number of retry attempts
wait_time: Time spent in queue
Updates task statistics and updates status counts.
If status changes, decrements old status count and
increments new status count.
"""
with self._lock:
# Check if task exists
if task_id not in self.stats:
return
task_stats = self.stats[task_id]
# Update status counts if status is changing
old_status = task_stats["status"]
if status and status.name != old_status:
self.status_counts[old_status] -= 1
self.status_counts[status.name] += 1
# Track completion
if status == CrawlStatus.COMPLETED:
self.urls_completed += 1
# Track requeues
if old_status in [CrawlStatus.COMPLETED.name, CrawlStatus.FAILED.name] and not task_stats.get("counted_requeue", False):
self.requeued_count += 1
task_stats["counted_requeue"] = True
# Update task statistics
if status:
task_stats["status"] = status.name
if start_time is not None:
task_stats["start_time"] = start_time
if end_time is not None:
task_stats["end_time"] = end_time
if memory_usage is not None:
task_stats["memory_usage"] = memory_usage
# Update peak memory if necessary
current_percent = (memory_usage / psutil.virtual_memory().total) * 100
if current_percent > self.peak_memory_percent:
self.peak_memory_percent = current_percent
self.peak_memory_time = time.time()
if peak_memory is not None:
task_stats["peak_memory"] = peak_memory
if error_message is not None:
task_stats["error_message"] = error_message
if retry_count is not None:
task_stats["retry_count"] = retry_count
if wait_time is not None:
task_stats["wait_time"] = wait_time
# Calculate duration
if task_stats["start_time"]:
end = task_stats["end_time"] or time.time()
duration = end - task_stats["start_time"]
task_stats["duration"] = self._format_time(duration)
def update_memory_status(self, status: str):
"""
Update the current memory status.
Args:
status: Memory status (NORMAL, PRESSURE, CRITICAL, or custom)
Also updates the UI to reflect the new status.
"""
with self._lock:
self.memory_status = status
def update_queue_statistics(
self,
total_queued: int,
highest_wait_time: float,
avg_wait_time: float
):
"""
Update statistics related to the task queue.
Args:
total_queued: Number of tasks currently in queue
highest_wait_time: Longest wait time of any queued task
avg_wait_time: Average wait time across all queued tasks
"""
with self._lock:
self.queue_stats = {
"total_queued": total_queued,
"highest_wait_time": highest_wait_time,
"avg_wait_time": avg_wait_time
}
def get_task_stats(self, task_id: str) -> Dict:
"""
Get statistics for a specific task.
Args:
task_id: Unique identifier for the task
Returns:
Dictionary containing all task statistics
"""
with self._lock:
return self.stats.get(task_id, {}).copy()
def get_all_task_stats(self) -> Dict[str, Dict]:
"""
Get statistics for all tasks.
Returns:
Dictionary mapping task_ids to their statistics
"""
with self._lock:
return self.stats.copy()
def get_memory_status(self) -> str:
"""
Get the current memory status.
Returns:
Current memory status string
"""
with self._lock:
return self.memory_status
def get_queue_stats(self) -> Dict:
"""
Get current queue statistics.
Returns:
Dictionary with queue statistics including:
- total_queued: Number of tasks in queue
- highest_wait_time: Longest wait time
- avg_wait_time: Average wait time
"""
with self._lock:
return self.queue_stats.copy()
def get_summary(self) -> Dict:
"""
Get a summary of all crawler statistics.
Returns:
Dictionary containing:
- runtime: Total runtime in seconds
- urls_total: Total URLs to process
- urls_completed: Number of completed URLs
- completion_percentage: Percentage complete
- status_counts: Count of tasks in each status
- memory_status: Current memory status
- peak_memory_percent: Highest memory usage
- peak_memory_time: When peak memory occurred
- avg_task_duration: Average task processing time
- estimated_completion_time: Projected finish time
- requeue_rate: Percentage of tasks requeued
"""
with self._lock:
# Calculate runtime
current_time = time.time()
runtime = current_time - (self.start_time or current_time)
# Calculate completion percentage
completion_percentage = 0
if self.urls_total > 0:
completion_percentage = (self.urls_completed / self.urls_total) * 100
# Calculate average task duration for completed tasks
completed_tasks = [
task for task in self.stats.values()
if task["status"] == CrawlStatus.COMPLETED.name and task.get("start_time") and task.get("end_time")
]
avg_task_duration = 0
if completed_tasks:
total_duration = sum(task["end_time"] - task["start_time"] for task in completed_tasks)
avg_task_duration = total_duration / len(completed_tasks)
# Calculate requeue rate
requeue_rate = 0
if len(self.stats) > 0:
requeue_rate = (self.requeued_count / len(self.stats)) * 100
# Calculate estimated completion time
estimated_completion_time = "N/A"
if avg_task_duration > 0 and self.urls_total > 0 and self.urls_completed > 0:
remaining_tasks = self.urls_total - self.urls_completed
estimated_seconds = remaining_tasks * avg_task_duration
estimated_completion_time = self._format_time(estimated_seconds)
return {
"runtime": runtime,
"urls_total": self.urls_total,
"urls_completed": self.urls_completed,
"completion_percentage": completion_percentage,
"status_counts": self.status_counts.copy(),
"memory_status": self.memory_status,
"peak_memory_percent": self.peak_memory_percent,
"peak_memory_time": self.peak_memory_time,
"avg_task_duration": avg_task_duration,
"estimated_completion_time": estimated_completion_time,
"requeue_rate": requeue_rate,
"requeued_count": self.requeued_count
}
def render(self):
"""
Render the terminal UI.
This is the main UI rendering loop that:
1. Updates all statistics
2. Formats the display
3. Renders the ASCII interface
4. Handles keyboard input
Note: The actual rendering is handled by the TerminalUI class
which uses the rich library's Live display.
"""
if self.enable_ui and self.terminal_ui:
# Force an update of the UI
if hasattr(self.terminal_ui, '_update_display'):
self.terminal_ui._update_display()
def _format_time(self, seconds: float) -> str:
"""
Format time in hours:minutes:seconds.
Args:
seconds: Time in seconds
Returns:
Formatted time string (e.g., "1:23:45")
"""
delta = timedelta(seconds=int(seconds))
hours, remainder = divmod(delta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if hours > 0:
return f"{hours}:{minutes:02}:{seconds:02}"
else:
return f"{minutes}:{seconds:02}"
def _calculate_estimated_completion(self) -> str:
"""
Calculate estimated completion time based on current progress.
Returns:
Formatted time string
"""
summary = self.get_summary()
return summary.get("estimated_completion_time", "N/A")
# Example code for testing
if __name__ == "__main__":
# Initialize the monitor
monitor = CrawlerMonitor(urls_total=100)
# Start monitoring
monitor.start()
try:
# Simulate some tasks
for i in range(20):
task_id = str(uuid.uuid4())
url = f"https://example.com/page{i}"
monitor.add_task(task_id, url)
# Simulate 20% of tasks are already running
if i < 4:
monitor.update_task(
task_id=task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=time.time() - 30, # Started 30 seconds ago
memory_usage=10.5
)
# Simulate 10% of tasks are completed
if i >= 4 and i < 6:
start_time = time.time() - 60
end_time = time.time() - 15
monitor.update_task(
task_id=task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=start_time,
memory_usage=8.2
)
monitor.update_task(
task_id=task_id,
status=CrawlStatus.COMPLETED,
end_time=end_time,
memory_usage=0,
peak_memory=15.7
)
# Simulate 5% of tasks fail
if i >= 6 and i < 7:
start_time = time.time() - 45
end_time = time.time() - 20
monitor.update_task(
task_id=task_id,
status=CrawlStatus.IN_PROGRESS,
start_time=start_time,
memory_usage=12.3
)
monitor.update_task(
task_id=task_id,
status=CrawlStatus.FAILED,
end_time=end_time,
memory_usage=0,
peak_memory=18.2,
error_message="Connection timeout"
)
# Simulate memory pressure
monitor.update_memory_status("PRESSURE")
# Simulate queue statistics
monitor.update_queue_statistics(
total_queued=16, # 20 - 4 (in progress)
highest_wait_time=120.5,
avg_wait_time=60.2
)
# Keep the monitor running for a demonstration
print("Crawler Monitor is running. Press 'q' to exit.")
while monitor.is_running:
time.sleep(0.1)
except KeyboardInterrupt:
print("\nExiting crawler monitor...")
finally:
# Stop the monitor
monitor.stop()
print("Crawler monitor exited successfully.")

View File

@@ -4,8 +4,7 @@ from dotenv import load_dotenv
load_dotenv() # Load environment variables from .env file
# Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy
DEFAULT_PROVIDER = "openai/gpt-4o"
DEFAULT_PROVIDER_API_KEY = "OPENAI_API_KEY"
DEFAULT_PROVIDER = "openai/gpt-4o-mini"
MODEL_REPO_BRANCH = "new-release-0.0.2"
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
PROVIDER_MODELS = {
@@ -16,26 +15,10 @@ PROVIDER_MODELS = {
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
"openai/o3-mini": os.getenv("OPENAI_API_KEY"),
"openai/o3-mini-high": os.getenv("OPENAI_API_KEY"),
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
"anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"),
"gemini/gemini-pro": os.getenv("GEMINI_API_KEY"),
'gemini/gemini-1.5-pro': os.getenv("GEMINI_API_KEY"),
'gemini/gemini-2.0-flash': os.getenv("GEMINI_API_KEY"),
'gemini/gemini-2.0-flash-exp': os.getenv("GEMINI_API_KEY"),
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
}
PROVIDER_MODELS_PREFIXES = {
"ollama": "no-token-needed", # Any model from Ollama no need for API token
"groq": os.getenv("GROQ_API_KEY"),
"openai": os.getenv("OPENAI_API_KEY"),
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
"gemini": os.getenv("GEMINI_API_KEY"),
"deepseek": os.getenv("DEEPSEEK_API_KEY"),
}
# Chunk token threshold
@@ -101,46 +84,3 @@ SHOW_DEPRECATION_WARNINGS = True
SCREENSHOT_HEIGHT_TRESHOLD = 10000
PAGE_TIMEOUT = 60000
DOWNLOAD_PAGE_TIMEOUT = 60000
# Global user settings with descriptions and default values
USER_SETTINGS = {
"DEFAULT_LLM_PROVIDER": {
"default": "openai/gpt-4o",
"description": "Default LLM provider in 'company/model' format (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')",
"type": "string"
},
"DEFAULT_LLM_PROVIDER_TOKEN": {
"default": "",
"description": "API token for the default LLM provider",
"type": "string",
"secret": True
},
"VERBOSE": {
"default": False,
"description": "Enable verbose output for all commands",
"type": "boolean"
},
"BROWSER_HEADLESS": {
"default": True,
"description": "Run browser in headless mode by default",
"type": "boolean"
},
"BROWSER_TYPE": {
"default": "chromium",
"description": "Default browser type (chromium or firefox)",
"type": "string",
"options": ["chromium", "firefox"]
},
"CACHE_MODE": {
"default": "bypass",
"description": "Default cache mode (bypass, use, or refresh)",
"type": "string",
"options": ["bypass", "use", "refresh"]
},
"USER_AGENT_MODE": {
"default": "default",
"description": "Default user agent mode (default, random, or mobile)",
"type": "string",
"options": ["default", "random", "mobile"]
}
}

View File

@@ -1,4 +1,3 @@
import inspect
import re
import time
from bs4 import BeautifulSoup, Tag
@@ -6,46 +5,25 @@ from typing import List, Tuple, Dict, Optional
from rank_bm25 import BM25Okapi
from collections import deque
from bs4 import NavigableString, Comment
from .utils import (
clean_tokens,
perform_completion_with_backoff,
escape_json_string,
sanitize_html,
get_home_folder,
extract_xml_data,
merge_chunks,
)
from .types import LLMConfig
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE
from .utils import clean_tokens, perform_completion_with_backoff, escape_json_string, sanitize_html, get_home_folder, extract_xml_data
from abc import ABC, abstractmethod
import math
from snowballstemmer import stemmer
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE
from .models import TokenUsage
from .prompts import PROMPT_FILTER_CONTENT
import os
import json
import hashlib
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
from .async_logger import AsyncLogger, LogLevel, LogColor
from concurrent.futures import ThreadPoolExecutor, as_completed
from .async_logger import AsyncLogger, LogLevel
from colorama import Fore, Style, init
class RelevantContentFilter(ABC):
"""Abstract base class for content filtering strategies"""
def __init__(
self,
user_query: str = None,
verbose: bool = False,
logger: Optional[AsyncLogger] = None,
):
"""
Initializes the RelevantContentFilter class with optional user query.
Args:
user_query (str): User query for filtering (optional).
verbose (bool): Enable verbose logging (default: False).
"""
def __init__(self, user_query: str = None):
self.user_query = user_query
self.included_tags = {
# Primary structure
@@ -114,8 +92,6 @@ class RelevantContentFilter(ABC):
r"nav|footer|header|sidebar|ads|comment|promo|advert|social|share", re.I
)
self.min_word_count = 2
self.verbose = False
self.logger = logger
@abstractmethod
def filter_content(self, html: str) -> List[str]:
@@ -377,7 +353,6 @@ class RelevantContentFilter(ABC):
except Exception:
return str(tag) # Fallback to original if anything fails
class BM25ContentFilter(RelevantContentFilter):
"""
Content filtering using BM25 algorithm with priority tag handling.
@@ -405,7 +380,6 @@ class BM25ContentFilter(RelevantContentFilter):
user_query: str = None,
bm25_threshold: float = 1.0,
language: str = "english",
use_stemming: bool = True,
):
"""
Initializes the BM25ContentFilter class, if not provided, falls back to page metadata.
@@ -417,11 +391,9 @@ class BM25ContentFilter(RelevantContentFilter):
user_query (str): User query for filtering (optional).
bm25_threshold (float): BM25 threshold for filtering (default: 1.0).
language (str): Language for stemming (default: 'english').
use_stemming (bool): Whether to apply stemming (default: True).
"""
super().__init__(user_query=user_query)
self.bm25_threshold = bm25_threshold
self.use_stemming = use_stemming
self.priority_tags = {
"h1": 5.0,
"h2": 4.0,
@@ -435,7 +407,7 @@ class BM25ContentFilter(RelevantContentFilter):
"pre": 1.5,
"th": 1.5, # Table headers
}
self.stemmer = stemmer(language) if use_stemming else None
self.stemmer = stemmer(language)
def filter_content(self, html: str, min_word_threshold: int = None) -> List[str]:
"""
@@ -482,19 +454,13 @@ class BM25ContentFilter(RelevantContentFilter):
# for _, chunk, _, _ in candidates]
# tokenized_query = [ps.stem(word) for word in query.lower().split()]
if self.use_stemming:
tokenized_corpus = [
[self.stemmer.stemWord(word) for word in chunk.lower().split()]
for _, chunk, _, _ in candidates
]
tokenized_query = [
self.stemmer.stemWord(word) for word in query.lower().split()
]
else:
tokenized_corpus = [
chunk.lower().split() for _, chunk, _, _ in candidates
]
tokenized_query = query.lower().split()
tokenized_corpus = [
[self.stemmer.stemWord(word) for word in chunk.lower().split()]
for _, chunk, _, _ in candidates
]
tokenized_query = [
self.stemmer.stemWord(word) for word in query.lower().split()
]
# tokenized_corpus = [[self.stemmer.stemWord(word) for word in tokenize_text(chunk.lower())]
# for _, chunk, _, _ in candidates]
@@ -529,7 +495,6 @@ class BM25ContentFilter(RelevantContentFilter):
return [self.clean_element(tag) for _, _, tag in selected_candidates]
class PruningContentFilter(RelevantContentFilter):
"""
Content filtering using pruning algorithm with dynamic threshold.
@@ -776,130 +741,110 @@ class PruningContentFilter(RelevantContentFilter):
class_id_score -= 0.5
return class_id_score
class LLMContentFilter(RelevantContentFilter):
"""Content filtering using LLMs to generate relevant markdown.
How it works:
1. Extracts page metadata with fallbacks.
2. Extracts text chunks from the body element.
3. Applies LLMs to generate markdown for each chunk.
4. Filters out chunks below the threshold.
5. Sorts chunks by score in descending order.
6. Returns the top N chunks.
Attributes:
llm_config (LLMConfig): LLM configuration object.
instruction (str): Instruction for LLM markdown generation
chunk_token_threshold (int): Chunk token threshold for splitting (default: 1e9).
overlap_rate (float): Overlap rate for chunking (default: 0.5).
word_token_rate (float): Word token rate for chunking (default: 0.2).
verbose (bool): Enable verbose logging (default: False).
logger (AsyncLogger): Custom logger for LLM operations (optional).
"""
_UNWANTED_PROPS = {
'provider' : 'Instead, use llm_config=LLMConfig(provider="...")',
'api_token' : 'Instead, use llm_config=LlMConfig(api_token="...")',
'base_url' : 'Instead, use llm_config=LLMConfig(base_url="...")',
'api_base' : 'Instead, use llm_config=LLMConfig(base_url="...")',
}
"""Content filtering using LLMs to generate relevant markdown."""
def __init__(
self,
llm_config: "LLMConfig" = None,
provider: str = DEFAULT_PROVIDER,
api_token: Optional[str] = None,
instruction: str = None,
chunk_token_threshold: int = int(1e9),
overlap_rate: float = OVERLAP_RATE,
word_token_rate: float = WORD_TOKEN_RATE,
# char_token_rate: float = WORD_TOKEN_RATE * 5,
# chunk_mode: str = "char",
verbose: bool = False,
logger: Optional[AsyncLogger] = None,
ignore_cache: bool = True,
# Deprecated properties
provider: str = DEFAULT_PROVIDER,
api_token: Optional[str] = None,
base_url: Optional[str] = None,
api_base: Optional[str] = None,
extra_args: Dict = None,
verbose: bool = False,
logger: Optional[AsyncLogger] = None,
):
super().__init__(None)
self.provider = provider
self.api_token = api_token
self.base_url = base_url or api_base
self.llm_config = llm_config
self.api_token = (
api_token
or PROVIDER_MODELS.get(provider, "no-token")
or os.getenv("OPENAI_API_KEY")
)
self.instruction = instruction
self.chunk_token_threshold = chunk_token_threshold
self.overlap_rate = overlap_rate
self.word_token_rate = word_token_rate or WORD_TOKEN_RATE
# self.chunk_mode: str = chunk_mode
# self.char_token_rate = char_token_rate or word_token_rate / 5
# self.token_rate = word_token_rate if chunk_mode == "word" else self.char_token_rate
self.token_rate = word_token_rate or WORD_TOKEN_RATE
self.word_token_rate = word_token_rate
self.base_url = base_url
self.api_base = api_base or base_url
self.extra_args = extra_args or {}
self.ignore_cache = ignore_cache
self.verbose = verbose
# Setup logger with custom styling for LLM operations
if logger:
self.logger = logger
elif verbose:
self.logger = AsyncLogger(
verbose=verbose,
verbose=True,
icons={
**AsyncLogger.DEFAULT_ICONS,
"LLM": "", # Star for LLM operations
"CHUNK": "", # Diamond for chunks
"CACHE": "", # Lightning for cache operations
"CACHE": "", # Lightning for cache operations
},
colors={
**AsyncLogger.DEFAULT_COLORS,
LogLevel.INFO: LogColor.DIM_MAGENTA # Dimmed purple for LLM ops
},
LogLevel.INFO: Fore.MAGENTA + Style.DIM, # Dimmed purple for LLM ops
}
)
else:
self.logger = None
self.usages = []
self.total_usage = TokenUsage()
def __setattr__(self, name, value):
"""Handle attribute setting."""
# TODO: Planning to set properties dynamically based on the __init__ signature
sig = inspect.signature(self.__init__)
all_params = sig.parameters # Dictionary of parameter names and their details
if name in self._UNWANTED_PROPS and value is not all_params[name].default:
raise AttributeError(f"Setting '{name}' is deprecated. {self._UNWANTED_PROPS[name]}")
super().__setattr__(name, value)
def _get_cache_key(self, html: str, instruction: str) -> str:
"""Generate a unique cache key based on HTML and instruction"""
content = f"{html}{instruction}"
return hashlib.md5(content.encode()).hexdigest()
def _merge_chunks(self, text: str) -> List[str]:
"""Split text into chunks with overlap using char or word mode."""
ov = int(self.chunk_token_threshold * self.overlap_rate)
sections = merge_chunks(
docs=[text],
target_size=self.chunk_token_threshold,
overlap=ov,
word_token_ratio=self.word_token_rate,
)
return sections
"""Split text into chunks with overlap"""
# Calculate tokens and sections
total_tokens = len(text.split()) * self.word_token_rate
num_sections = max(1, math.floor(total_tokens / self.chunk_token_threshold))
adjusted_chunk_threshold = total_tokens / num_sections
def filter_content(self, html: str, ignore_cache: bool = True) -> List[str]:
# Split into words
words = text.split()
chunks = []
current_chunk = []
current_token_count = 0
for word in words:
word_tokens = len(word) * self.word_token_rate
if current_token_count + word_tokens <= adjusted_chunk_threshold:
current_chunk.append(word)
current_token_count += word_tokens
else:
# Add overlap if not the last chunk
if chunks and self.overlap_rate > 0:
overlap_size = int(len(current_chunk) * self.overlap_rate)
current_chunk.extend(current_chunk[-overlap_size:])
chunks.append(" ".join(current_chunk))
current_chunk = [word]
current_token_count = word_tokens
if current_chunk:
chunks.append(" ".join(current_chunk))
return chunks
def filter_content(self, html: str, ignore_cache: bool = False) -> List[str]:
if not html or not isinstance(html, str):
return []
if self.logger:
self.logger.info(
"Starting LLM markdown content filtering process",
"Starting LLM content filtering process",
tag="LLM",
params={"provider": self.llm_config.provider},
colors={"provider": LogColor.CYAN},
params={"provider": self.provider},
colors={"provider": Fore.CYAN}
)
# Cache handling
@@ -908,88 +853,65 @@ class LLMContentFilter(RelevantContentFilter):
cache_key = self._get_cache_key(html, self.instruction or "")
cache_file = cache_dir / f"{cache_key}.json"
# if ignore_cache == None:
ignore_cache = self.ignore_cache
if not ignore_cache and cache_file.exists():
if self.logger:
self.logger.info("Found cached markdown result", tag="CACHE")
self.logger.info("Found cached result", tag="CACHE")
try:
with cache_file.open("r") as f:
with cache_file.open('r') as f:
cached_data = json.load(f)
usage = TokenUsage(**cached_data["usage"])
usage = TokenUsage(**cached_data['usage'])
self.usages.append(usage)
self.total_usage.completion_tokens += usage.completion_tokens
self.total_usage.prompt_tokens += usage.prompt_tokens
self.total_usage.total_tokens += usage.total_tokens
return cached_data["blocks"]
return cached_data['blocks']
except Exception as e:
if self.logger:
self.logger.error(
f"LLM markdown: Cache read error: {str(e)}", tag="CACHE"
)
self.logger.error(f"Cache read error: {str(e)}", tag="CACHE")
# Split into chunks
html_chunks = self._merge_chunks(html)
if self.logger:
self.logger.info(
"LLM markdown: Split content into {chunk_count} chunks",
"Split content into {chunk_count} chunks",
tag="CHUNK",
params={"chunk_count": len(html_chunks)},
colors={"chunk_count": LogColor.YELLOW},
colors={"chunk_count": Fore.YELLOW}
)
extracted_content = []
start_time = time.time()
# Process chunks in parallel
with ThreadPoolExecutor(max_workers=4) as executor:
futures = []
for i, chunk in enumerate(html_chunks):
if self.logger:
self.logger.debug(
"LLM markdown: Processing chunk {chunk_num}/{total_chunks}",
"Processing chunk {chunk_num}/{total_chunks}",
tag="CHUNK",
params={"chunk_num": i + 1, "total_chunks": len(html_chunks)},
params={
"chunk_num": i + 1,
"total_chunks": len(html_chunks)
}
)
prompt_variables = {
"HTML": escape_json_string(sanitize_html(chunk)),
"REQUEST": self.instruction
or "Convert this HTML into clean, relevant markdown, removing any noise or irrelevant content.",
"REQUEST": self.instruction or "Convert this HTML into clean, relevant markdown, removing any noise or irrelevant content."
}
prompt = PROMPT_FILTER_CONTENT
for var, value in prompt_variables.items():
prompt = prompt.replace("{" + var + "}", value)
def _proceed_with_chunk(
provider: str,
prompt: str,
api_token: str,
base_url: Optional[str] = None,
extra_args: Dict = {},
) -> List[str]:
if self.logger:
self.logger.info(
"LLM Markdown: Processing chunk {chunk_num}",
tag="CHUNK",
params={"chunk_num": i + 1},
)
return perform_completion_with_backoff(
provider,
prompt,
api_token,
base_url=base_url,
extra_args=extra_args,
)
future = executor.submit(
_proceed_with_chunk,
self.llm_config.provider,
perform_completion_with_backoff,
self.provider,
prompt,
self.llm_config.api_token,
self.llm_config.base_url,
self.extra_args,
self.api_token,
base_url=self.api_base,
extra_args=self.extra_args
)
futures.append((i, future))
@@ -998,61 +920,59 @@ class LLMContentFilter(RelevantContentFilter):
for i, future in sorted(futures):
try:
response = future.result()
# Track usage
usage = TokenUsage(
completion_tokens=response.usage.completion_tokens,
prompt_tokens=response.usage.prompt_tokens,
total_tokens=response.usage.total_tokens,
completion_tokens_details=(
response.usage.completion_tokens_details.__dict__
if response.usage.completion_tokens_details
else {}
),
prompt_tokens_details=(
response.usage.prompt_tokens_details.__dict__
if response.usage.prompt_tokens_details
else {}
),
completion_tokens_details=response.usage.completion_tokens_details.__dict__
if response.usage.completion_tokens_details else {},
prompt_tokens_details=response.usage.prompt_tokens_details.__dict__
if response.usage.prompt_tokens_details else {},
)
self.usages.append(usage)
self.total_usage.completion_tokens += usage.completion_tokens
self.total_usage.prompt_tokens += usage.prompt_tokens
self.total_usage.total_tokens += usage.total_tokens
blocks = extract_xml_data(
["content"], response.choices[0].message.content
)["content"]
blocks = extract_xml_data(["content"], response.choices[0].message.content)["content"]
if blocks:
ordered_results.append(blocks)
if self.logger:
self.logger.success(
"LLM markdown: Successfully processed chunk {chunk_num}",
"Successfully processed chunk {chunk_num}",
tag="CHUNK",
params={"chunk_num": i + 1},
params={"chunk_num": i + 1}
)
except Exception as e:
if self.logger:
self.logger.error(
"LLM markdown: Error processing chunk {chunk_num}: {error}",
"Error processing chunk {chunk_num}: {error}",
tag="CHUNK",
params={"chunk_num": i + 1, "error": str(e)},
params={
"chunk_num": i + 1,
"error": str(e)
}
)
end_time = time.time()
if self.logger:
self.logger.success(
"LLM markdown: Completed processing in {time:.2f}s",
"Completed processing in {time:.2f}s",
tag="LLM",
params={"time": end_time - start_time},
colors={"time": LogColor.YELLOW},
colors={"time": Fore.YELLOW}
)
result = ordered_results if ordered_results else []
# Cache the final result
cache_data = {"blocks": result, "usage": self.total_usage.__dict__}
with cache_file.open("w") as f:
cache_data = {
'blocks': result,
'usage': self.total_usage.__dict__
}
with cache_file.open('w') as f:
json.dump(cache_data, f)
if self.logger:
self.logger.info("Cached results for future use", tag="CACHE")
@@ -1076,4 +996,4 @@ class LLMContentFilter(RelevantContentFilter):
print(
f"{i:<10} {usage.completion_tokens:>12,} "
f"{usage.prompt_tokens:>12,} {usage.total_tokens:>12,}"
)
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
from crawl4ai.hub import BaseCrawler
__meta__ = {
"version": "1.2.0",
"tested_on": ["amazon.com"],
"rate_limit": "50 RPM",
"schema": {"product": ["name", "price"]}
}
class AmazonProductCrawler(BaseCrawler):
async def run(self, url: str, **kwargs) -> str:
try:
self.logger.info(f"Crawling {url}")
return '{"product": {"name": "Test Amazon Product"}}'
except Exception as e:
self.logger.error(f"Crawl failed: {str(e)}")
return json.dumps({
"error": str(e),
"metadata": self.meta # Include meta in error response
})

View File

@@ -1,131 +0,0 @@
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
from crawl4ai.hub import BaseCrawler
from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema
from crawl4ai import JsonCssExtractionStrategy
from pathlib import Path
import json
import os
from typing import Dict
class GoogleSearchCrawler(BaseCrawler):
__meta__ = {
"version": "1.0.0",
"tested_on": ["google.com/search*"],
"rate_limit": "10 RPM",
"description": "Crawls Google Search results (text + images)",
}
def __init__(self):
super().__init__()
self.js_script = (Path(__file__).parent /
"script.js").read_text()
async def run(self, url="", query: str = "", search_type: str = "text", schema_cache_path = None, **kwargs) -> str:
"""Crawl Google Search results for a query"""
url = f"https://www.google.com/search?q={query}&gl=sg&hl=en" if search_type == "text" else f"https://www.google.com/search?q={query}&gl=sg&hl=en&tbs=qdr:d&udm=2"
if kwargs.get("page_start", 1) > 1:
url = f"{url}&start={kwargs['page_start'] * 10}"
if kwargs.get("page_length", 1) > 1:
url = f"{url}&num={kwargs['page_length']}"
browser_config = BrowserConfig(headless=True, verbose=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
config = CrawlerRunConfig(
cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS),
keep_attrs=["id", "class"],
keep_data_attributes=True,
delay_before_return_html=kwargs.get(
"delay", 2 if search_type == "image" else 1),
js_code=self.js_script if search_type == "image" else None,
)
result = await crawler.arun(url=url, config=config)
if not result.success:
return json.dumps({"error": result.error})
if search_type == "image":
if result.js_execution_result.get("success", False) is False:
return json.dumps({"error": result.js_execution_result.get("error", "Unknown error")})
if "results" in result.js_execution_result:
image_result = result.js_execution_result['results'][0]
if image_result.get("success", False) is False:
return json.dumps({"error": image_result.get("error", "Unknown error")})
return json.dumps(image_result["result"], indent=4)
# For text search, extract structured data
schemas = await self._build_schemas(result.cleaned_html, schema_cache_path)
extracted = {
key: JsonCssExtractionStrategy(schema=schemas[key]).run(
url=url, sections=[result.html]
)
for key in schemas
}
return json.dumps(extracted, indent=4)
async def _build_schemas(self, html: str, schema_cache_path: str = None) -> Dict[str, Dict]:
"""Build extraction schemas (organic, top stories, etc.)"""
home_dir = get_home_folder() if not schema_cache_path else schema_cache_path
os.makedirs(f"{home_dir}/schema", exist_ok=True)
# cleaned_html = optimize_html(html, threshold=100)
cleaned_html = preprocess_html_for_schema(html)
organic_schema = None
if os.path.exists(f"{home_dir}/schema/organic_schema.json"):
with open(f"{home_dir}/schema/organic_schema.json", "r") as f:
organic_schema = json.load(f)
else:
organic_schema = JsonCssExtractionStrategy.generate_schema(
html=cleaned_html,
target_json_example="""{
"title": "...",
"link": "...",
"snippet": "...",
"date": "1 hour ago",
}""",
query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text. date."""
)
with open(f"{home_dir}/schema/organic_schema.json", "w") as f:
f.write(json.dumps(organic_schema))
top_stories_schema = None
if os.path.exists(f"{home_dir}/schema/top_stories_schema.json"):
with open(f"{home_dir}/schema/top_stories_schema.json", "r") as f:
top_stories_schema = json.load(f)
else:
top_stories_schema = JsonCssExtractionStrategy.generate_schema(
html=cleaned_html,
target_json_example="""{
"title": "...",
"link": "...",
"source": "Insider Monkey",
"date": "1 hour ago",
}""",
query="""The given html is the crawled html from Google search result. Please find the schema for Top Story item int he given html, I am interested in title, link, source. date and imageUrl."""
)
with open(f"{home_dir}/schema/top_stories_schema.json", "w") as f:
f.write(json.dumps(top_stories_schema))
suggested_query_schema = None
if os.path.exists(f"{home_dir}/schema/suggested_query_schema.json"):
with open(f"{home_dir}/schema/suggested_query_schema.json", "r") as f:
suggested_query_schema = json.load(f)
else:
suggested_query_schema = JsonCssExtractionStrategy.generate_schema(
html=cleaned_html,
target_json_example="""{
"query": "A for Apple",
}""",
query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "People also search for" within the given HTML. I am interested in the queries only."""
)
with open(f"{home_dir}/schema/suggested_query_schema.json", "w") as f:
f.write(json.dumps(suggested_query_schema))
return {
"organic_schema": organic_schema,
"top_stories_schema": top_stories_schema,
"suggested_query_schema": suggested_query_schema,
}

View File

@@ -1,115 +0,0 @@
(() => {
// Function to extract image data from Google Images page
function extractImageData() {
const keys = Object.keys(window.W_jd);
let allImageData = [];
let currentPosition = 0;
// Get the symbol we'll use (from first valid entry)
let targetSymbol;
for (let key of keys) {
try {
const symbols = Object.getOwnPropertySymbols(window.W_jd[key]);
if (symbols.length > 0) {
targetSymbol = symbols[0];
break;
}
} catch (e) {
continue;
}
}
if (!targetSymbol) return [];
// Iterate through ALL keys
for (let key of keys) {
try {
const o1 = window.W_jd[key][targetSymbol]
if (!o1) continue;
const data = Object.values(o1)[0]
// const data = window.W_jd[key][targetSymbol]?.Ws;
// Check if this is a valid image data entry
if (data && Array.isArray(data[1])) {
const processedData = processImageEntry(data, currentPosition);
if (processedData) {
allImageData.push(processedData);
currentPosition++;
}
}
} catch (e) {
continue;
}
}
return allImageData;
}
function processImageEntry(entry, position) {
const imageData = entry[1];
if (!Array.isArray(imageData)) return null;
// Extract the image ID
const imageId = imageData[1];
if (!imageId) return null;
// Find the corresponding DOM element
const domElement = document.querySelector(`[data-docid="${imageId}"]`);
if (!domElement) return null;
// Extract data from the array structure
const [
_,
id,
thumbnailInfo,
imageInfo,
__,
___,
rgb,
____,
_____,
metadata
] = imageData;
// Ensure we have the required data
if (!thumbnailInfo || !imageInfo) return null;
// Extract metadata from DOM
const title = domElement?.querySelector('.toI8Rb')?.textContent?.trim();
const source = domElement?.querySelector('.guK3rf')?.textContent?.trim();
const link = domElement?.querySelector('a.EZAeBe')?.href;
if (!link) return null;
// Build Google Image URL
const googleUrl = buildGoogleImageUrl(imageInfo[0], link, imageId, imageInfo[1], imageInfo[2]);
return {
title,
imageUrl: imageInfo[0],
imageWidth: imageInfo[2],
imageHeight: imageInfo[1],
thumbnailUrl: thumbnailInfo[0],
thumbnailWidth: thumbnailInfo[2],
thumbnailHeight: thumbnailInfo[1],
source,
domain: metadata['2000']?.[1] || new URL(link).hostname,
link,
googleUrl,
position: position + 1
};
}
function buildGoogleImageUrl(imgUrl, refUrl, tbnid, height, width) {
const params = new URLSearchParams({
imgurl: imgUrl,
tbnid: tbnid,
imgrefurl: refUrl,
docid: tbnid,
w: width.toString(),
h: height.toString(),
});
return `https://www.google.com/imgres?${params.toString()}`;
}
return extractImageData();
})();

View File

@@ -1,47 +0,0 @@
# deep_crawling/__init__.py
from .base_strategy import DeepCrawlDecorator, DeepCrawlStrategy
from .bfs_strategy import BFSDeepCrawlStrategy
from .bff_strategy import BestFirstCrawlingStrategy
from .dfs_strategy import DFSDeepCrawlStrategy
from .filters import (
FilterChain,
ContentTypeFilter,
DomainFilter,
URLFilter,
URLPatternFilter,
FilterStats,
ContentRelevanceFilter,
SEOFilter
)
from .scorers import (
KeywordRelevanceScorer,
URLScorer,
CompositeScorer,
DomainAuthorityScorer,
FreshnessScorer,
PathDepthScorer,
ContentTypeScorer
)
__all__ = [
"DeepCrawlDecorator",
"DeepCrawlStrategy",
"BFSDeepCrawlStrategy",
"BestFirstCrawlingStrategy",
"DFSDeepCrawlStrategy",
"FilterChain",
"ContentTypeFilter",
"DomainFilter",
"URLFilter",
"URLPatternFilter",
"FilterStats",
"ContentRelevanceFilter",
"SEOFilter",
"KeywordRelevanceScorer",
"URLScorer",
"CompositeScorer",
"DomainAuthorityScorer",
"FreshnessScorer",
"PathDepthScorer",
"ContentTypeScorer",
]

View File

@@ -1,159 +0,0 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import AsyncGenerator, Optional, Set, List, Dict
from functools import wraps
from contextvars import ContextVar
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
class DeepCrawlDecorator:
"""Decorator that adds deep crawling capability to arun method."""
deep_crawl_active = ContextVar("deep_crawl_active", default=False)
def __init__(self, crawler: AsyncWebCrawler):
self.crawler = crawler
def __call__(self, original_arun):
@wraps(original_arun)
async def wrapped_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
# If deep crawling is already active, call the original method to avoid recursion.
if config and config.deep_crawl_strategy and not self.deep_crawl_active.get():
token = self.deep_crawl_active.set(True)
# Await the arun call to get the actual result object.
result_obj = await config.deep_crawl_strategy.arun(
crawler=self.crawler,
start_url=url,
config=config
)
if config.stream:
async def result_wrapper():
try:
async for result in result_obj:
yield result
finally:
self.deep_crawl_active.reset(token)
return result_wrapper()
else:
try:
return result_obj
finally:
self.deep_crawl_active.reset(token)
return await original_arun(url, config=config, **kwargs)
return wrapped_arun
class DeepCrawlStrategy(ABC):
"""
Abstract base class for deep crawling strategies.
Core functions:
- arun: Main entry point that returns an async generator of CrawlResults.
- shutdown: Clean up resources.
- can_process_url: Validate a URL and decide whether to process it.
- _process_links: Extract and process links from a CrawlResult.
"""
@abstractmethod
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Batch (non-streaming) mode:
Processes one BFS level at a time, then yields all the results.
"""
pass
@abstractmethod
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Streaming mode:
Processes one BFS level at a time and yields results immediately as they arrive.
"""
pass
async def arun(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: Optional[CrawlerRunConfig] = None,
) -> RunManyReturn:
"""
Traverse the given URL using the specified crawler.
Args:
start_url (str): The URL from which to start crawling.
crawler (AsyncWebCrawler): The crawler instance to use.
crawler_run_config (Optional[CrawlerRunConfig]): Crawler configuration.
Returns:
Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
"""
if config is None:
raise ValueError("CrawlerRunConfig must be provided")
if config.stream:
return self._arun_stream(start_url, crawler, config)
else:
return await self._arun_batch(start_url, crawler, config)
def __call__(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig):
return self.arun(start_url, crawler, config)
@abstractmethod
async def shutdown(self) -> None:
"""
Clean up resources used by the deep crawl strategy.
"""
pass
@abstractmethod
async def can_process_url(self, url: str, depth: int) -> bool:
"""
Validate the URL format and apply custom filtering logic.
Args:
url (str): The URL to validate.
depth (int): The current depth in the crawl.
Returns:
bool: True if the URL should be processed, False otherwise.
"""
pass
@abstractmethod
async def link_discovery(
self,
result: CrawlResult,
source_url: str,
current_depth: int,
visited: Set[str],
next_level: List[tuple],
depths: Dict[str, int],
) -> None:
"""
Extract and process links from the given crawl result.
This method should:
- Validate each extracted URL using can_process_url.
- Optionally score URLs.
- Append valid URLs (and their parent references) to the next_level list.
- Update the depths dictionary with the new depth for each URL.
Args:
result (CrawlResult): The result from a crawl operation.
source_url (str): The URL from which this result was obtained.
current_depth (int): The depth at which the source URL was processed.
visited (Set[str]): Set of already visited URLs.
next_level (List[tuple]): List of tuples (url, parent_url) for the next BFS level.
depths (Dict[str, int]): Mapping of URLs to their current depth.
"""
pass

View File

@@ -1,269 +0,0 @@
# best_first_crawling_strategy.py
import asyncio
import logging
from datetime import datetime
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
from urllib.parse import urlparse
from ..models import TraversalStats
from .filters import FilterChain
from .scorers import URLScorer
from . import DeepCrawlStrategy
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
from ..utils import normalize_url_for_deep_crawl
from math import inf as infinity
# Configurable batch size for processing items from the priority queue
BATCH_SIZE = 10
class BestFirstCrawlingStrategy(DeepCrawlStrategy):
"""
Best-First Crawling Strategy using a priority queue.
This strategy prioritizes URLs based on their score, ensuring that higher-value
pages are crawled first. It reimplements the core traversal loop to use a priority
queue while keeping URL validation and link discovery consistent with our design.
Core methods:
- arun: Returns either a list (batch mode) or an async generator (stream mode).
- _arun_best_first: Core generator that uses a priority queue to yield CrawlResults.
- can_process_url: Validates URLs and applies filtering (inherited behavior).
- link_discovery: Extracts and validates links from a CrawlResult.
"""
def __init__(
self,
max_depth: int,
filter_chain: FilterChain = FilterChain(),
url_scorer: Optional[URLScorer] = None,
include_external: bool = False,
max_pages: int = infinity,
logger: Optional[logging.Logger] = None,
):
self.max_depth = max_depth
self.filter_chain = filter_chain
self.url_scorer = url_scorer
self.include_external = include_external
self.max_pages = max_pages
self.logger = logger or logging.getLogger(__name__)
self.stats = TraversalStats(start_time=datetime.now())
self._cancel_event = asyncio.Event()
self._pages_crawled = 0
async def can_process_url(self, url: str, depth: int) -> bool:
"""
Validate the URL format and apply filtering.
For the starting URL (depth 0), filtering is bypassed.
"""
try:
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc:
raise ValueError("Missing scheme or netloc")
if parsed.scheme not in ("http", "https"):
raise ValueError("Invalid scheme")
if "." not in parsed.netloc:
raise ValueError("Invalid domain")
except Exception as e:
self.logger.warning(f"Invalid URL: {url}, error: {e}")
return False
if depth != 0 and not await self.filter_chain.apply(url):
return False
return True
async def link_discovery(
self,
result: CrawlResult,
source_url: str,
current_depth: int,
visited: Set[str],
next_links: List[Tuple[str, Optional[str]]],
depths: Dict[str, int],
) -> None:
"""
Extract links from the crawl result, validate them, and append new URLs
(with their parent references) to next_links.
Also updates the depths dictionary.
"""
new_depth = current_depth + 1
if new_depth > self.max_depth:
return
# If we've reached the max pages limit, don't discover new links
remaining_capacity = self.max_pages - self._pages_crawled
if remaining_capacity <= 0:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
return
# Retrieve internal links; include external links if enabled.
links = result.links.get("internal", [])
if self.include_external:
links += result.links.get("external", [])
# If we have more links than remaining capacity, limit how many we'll process
valid_links = []
for link in links:
url = link.get("href")
base_url = normalize_url_for_deep_crawl(url, source_url)
if base_url in visited:
continue
if not await self.can_process_url(url, new_depth):
self.stats.urls_skipped += 1
continue
valid_links.append(base_url)
# If we have more valid links than capacity, limit them
if len(valid_links) > remaining_capacity:
valid_links = valid_links[:remaining_capacity]
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
# Record the new depths and add to next_links
for url in valid_links:
depths[url] = new_depth
next_links.append((url, source_url))
async def _arun_best_first(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Core best-first crawl method using a priority queue.
The queue items are tuples of (score, depth, url, parent_url). Lower scores
are treated as higher priority. URLs are processed in batches for efficiency.
"""
queue: asyncio.PriorityQueue = asyncio.PriorityQueue()
# Push the initial URL with score 0 and depth 0.
await queue.put((0, 0, start_url, None))
visited: Set[str] = set()
depths: Dict[str, int] = {start_url: 0}
while not queue.empty() and not self._cancel_event.is_set():
# Stop if we've reached the max pages limit
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
break
# Calculate how many more URLs we can process in this batch
remaining = self.max_pages - self._pages_crawled
batch_size = min(BATCH_SIZE, remaining)
if batch_size <= 0:
# No more pages to crawl
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
break
batch: List[Tuple[float, int, str, Optional[str]]] = []
# Retrieve up to BATCH_SIZE items from the priority queue.
for _ in range(BATCH_SIZE):
if queue.empty():
break
item = await queue.get()
score, depth, url, parent_url = item
if url in visited:
continue
visited.add(url)
batch.append(item)
if not batch:
continue
# Process the current batch of URLs.
urls = [item[2] for item in batch]
batch_config = config.clone(deep_crawl_strategy=None, stream=True)
stream_gen = await crawler.arun_many(urls=urls, config=batch_config)
async for result in stream_gen:
result_url = result.url
# Find the corresponding tuple from the batch.
corresponding = next((item for item in batch if item[2] == result_url), None)
if not corresponding:
continue
score, depth, url, parent_url = corresponding
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
result.metadata["parent_url"] = parent_url
result.metadata["score"] = score
# Count only successful crawls toward max_pages limit
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
yield result
# Only discover links from successful crawls
if result.success:
# Discover new links from this result
new_links: List[Tuple[str, Optional[str]]] = []
await self.link_discovery(result, result_url, depth, visited, new_links, depths)
for new_url, new_parent in new_links:
new_depth = depths.get(new_url, depth + 1)
new_score = self.url_scorer.score(new_url) if self.url_scorer else 0
await queue.put((new_score, new_depth, new_url, new_parent))
# End of crawl.
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Best-first crawl in batch mode.
Aggregates all CrawlResults into a list.
"""
results: List[CrawlResult] = []
async for result in self._arun_best_first(start_url, crawler, config):
results.append(result)
return results
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Best-first crawl in streaming mode.
Yields CrawlResults as they become available.
"""
async for result in self._arun_best_first(start_url, crawler, config):
yield result
async def arun(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: Optional[CrawlerRunConfig] = None,
) -> "RunManyReturn":
"""
Main entry point for best-first crawling.
Returns either a list (batch mode) or an async generator (stream mode)
of CrawlResults.
"""
if config is None:
raise ValueError("CrawlerRunConfig must be provided")
if config.stream:
return self._arun_stream(start_url, crawler, config)
else:
return await self._arun_batch(start_url, crawler, config)
async def shutdown(self) -> None:
"""
Signal cancellation and clean up resources.
"""
self._cancel_event.set()
self.stats.end_time = datetime.now()

View File

@@ -1,261 +0,0 @@
# bfs_deep_crawl_strategy.py
import asyncio
import logging
from datetime import datetime
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
from urllib.parse import urlparse
from ..models import TraversalStats
from .filters import FilterChain
from .scorers import URLScorer
from . import DeepCrawlStrategy
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult
from ..utils import normalize_url_for_deep_crawl, efficient_normalize_url_for_deep_crawl
from math import inf as infinity
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
"""
Breadth-First Search deep crawling strategy.
Core functions:
- arun: Main entry point; splits execution into batch or stream modes.
- link_discovery: Extracts, filters, and (if needed) scores the outgoing URLs.
- can_process_url: Validates URL format and applies the filter chain.
"""
def __init__(
self,
max_depth: int,
filter_chain: FilterChain = FilterChain(),
url_scorer: Optional[URLScorer] = None,
include_external: bool = False,
score_threshold: float = -infinity,
max_pages: int = infinity,
logger: Optional[logging.Logger] = None,
):
self.max_depth = max_depth
self.filter_chain = filter_chain
self.url_scorer = url_scorer
self.include_external = include_external
self.score_threshold = score_threshold
self.max_pages = max_pages
# Type check for logger
if isinstance(logger, dict):
logging.getLogger(__name__).warning(
"BFSDeepCrawlStrategy received a dict as logger; falling back to default logger."
)
self.logger = logging.getLogger(__name__)
else:
self.logger = logger or logging.getLogger(__name__)
self.stats = TraversalStats(start_time=datetime.now())
self._cancel_event = asyncio.Event()
self._pages_crawled = 0
async def can_process_url(self, url: str, depth: int) -> bool:
"""
Validates the URL and applies the filter chain.
For the start URL (depth 0) filtering is bypassed.
"""
try:
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc:
raise ValueError("Missing scheme or netloc")
if parsed.scheme not in ("http", "https"):
raise ValueError("Invalid scheme")
if "." not in parsed.netloc:
raise ValueError("Invalid domain")
except Exception as e:
self.logger.warning(f"Invalid URL: {url}, error: {e}")
return False
if depth != 0 and not await self.filter_chain.apply(url):
return False
return True
async def link_discovery(
self,
result: CrawlResult,
source_url: str,
current_depth: int,
visited: Set[str],
next_level: List[Tuple[str, Optional[str]]],
depths: Dict[str, int],
) -> None:
"""
Extracts links from the crawl result, validates and scores them, and
prepares the next level of URLs.
Each valid URL is appended to next_level as a tuple (url, parent_url)
and its depth is tracked.
"""
next_depth = current_depth + 1
if next_depth > self.max_depth:
return
# If we've reached the max pages limit, don't discover new links
remaining_capacity = self.max_pages - self._pages_crawled
if remaining_capacity <= 0:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
return
# Get internal links and, if enabled, external links.
links = result.links.get("internal", [])
if self.include_external:
links += result.links.get("external", [])
valid_links = []
# First collect all valid links
for link in links:
url = link.get("href")
# Strip URL fragments to avoid duplicate crawling
# base_url = url.split('#')[0] if url else url
base_url = normalize_url_for_deep_crawl(url, source_url)
if base_url in visited:
continue
if not await self.can_process_url(url, next_depth):
self.stats.urls_skipped += 1
continue
# Score the URL if a scorer is provided
score = self.url_scorer.score(base_url) if self.url_scorer else 0
# Skip URLs with scores below the threshold
if score < self.score_threshold:
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
self.stats.urls_skipped += 1
continue
visited.add(base_url)
valid_links.append((base_url, score))
# If we have more valid links than capacity, sort by score and take the top ones
if len(valid_links) > remaining_capacity:
if self.url_scorer:
# Sort by score in descending order
valid_links.sort(key=lambda x: x[1], reverse=True)
# Take only as many as we have capacity for
valid_links = valid_links[:remaining_capacity]
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
# Process the final selected links
for url, score in valid_links:
# attach the score to metadata if needed
if score:
result.metadata = result.metadata or {}
result.metadata["score"] = score
next_level.append((url, source_url))
depths[url] = next_depth
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Batch (non-streaming) mode:
Processes one BFS level at a time, then yields all the results.
"""
visited: Set[str] = set()
# current_level holds tuples: (url, parent_url)
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
depths: Dict[str, int] = {start_url: 0}
results: List[CrawlResult] = []
while current_level and not self._cancel_event.is_set():
# Check if we've already reached max_pages before starting a new level
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
break
next_level: List[Tuple[str, Optional[str]]] = []
urls = [url for url, _ in current_level]
# Clone the config to disable deep crawling recursion and enforce batch mode.
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
batch_results = await crawler.arun_many(urls=urls, config=batch_config)
# Update pages crawled counter - count only successful crawls
successful_results = [r for r in batch_results if r.success]
self._pages_crawled += len(successful_results)
for result in batch_results:
url = result.url
depth = depths.get(url, 0)
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
parent_url = next((parent for (u, parent) in current_level if u == url), None)
result.metadata["parent_url"] = parent_url
results.append(result)
# Only discover links from successful crawls
if result.success:
# Link discovery will handle the max pages limit internally
await self.link_discovery(result, url, depth, visited, next_level, depths)
current_level = next_level
return results
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Streaming mode:
Processes one BFS level at a time and yields results immediately as they arrive.
"""
visited: Set[str] = set()
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
depths: Dict[str, int] = {start_url: 0}
while current_level and not self._cancel_event.is_set():
next_level: List[Tuple[str, Optional[str]]] = []
urls = [url for url, _ in current_level]
visited.update(urls)
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
stream_gen = await crawler.arun_many(urls=urls, config=stream_config)
# Keep track of processed results for this batch
results_count = 0
async for result in stream_gen:
url = result.url
depth = depths.get(url, 0)
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
parent_url = next((parent for (u, parent) in current_level if u == url), None)
result.metadata["parent_url"] = parent_url
# Count only successful crawls
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
results_count += 1
yield result
# Only discover links from successful crawls
if result.success:
# Link discovery will handle the max pages limit internally
await self.link_discovery(result, url, depth, visited, next_level, depths)
# If we didn't get results back (e.g. due to errors), avoid getting stuck in an infinite loop
# by considering these URLs as visited but not counting them toward the max_pages limit
if results_count == 0 and urls:
self.logger.warning(f"No results returned for {len(urls)} URLs, marking as visited")
current_level = next_level
async def shutdown(self) -> None:
"""
Clean up resources and signal cancellation of the crawl.
"""
self._cancel_event.set()
self.stats.end_time = datetime.now()

View File

@@ -1,432 +0,0 @@
from __future__ import annotations
# I just got crazy, trying to wrute K&R C but in Python. Right now I feel like I'm in a quantum state.
# I probably won't use this; I just want to leave it here. A century later, the future human race will be like, "WTF?"
# ------ Imports That Will Make You Question Reality ------ #
from functools import wraps
from contextvars import ContextVar
import inspect
from crawl4ai import CacheMode
from crawl4ai.async_configs import CrawlerRunConfig
from crawl4ai.models import CrawlResult, TraversalStats
from crawl4ai.deep_crawling.filters import FilterChain
from crawl4ai.async_webcrawler import AsyncWebCrawler
import time
import logging
from urllib.parse import urlparse
from abc import ABC, abstractmethod
from collections import deque
import asyncio
from typing import (
AsyncGenerator,
Dict,
List,
TypeVar,
Generic,
Tuple,
Callable,
Awaitable,
Union,
)
from functools import lru_cache
import mmh3
from bitarray import bitarray
import numpy as np
from heapq import heappush, heappop
# ------ Type Algebra Mastery ------ #
CrawlResultT = TypeVar("CrawlResultT", bound="CrawlResult")
PriorityT = TypeVar("PriorityT")
P = TypeVar("P")
# ------ Hyperscalar Context Management ------ #
deep_crawl_ctx = ContextVar("deep_crawl_stack", default=deque())
# ------ Algebraic Crawler Monoid ------ #
class TraversalContext:
__slots__ = ('visited', 'frontier', 'depths', 'priority_fn', 'current_depth')
def __init__(self,
priority_fn: Callable[[str], Awaitable[float]] = lambda _: 1.0):
self.visited: BloomFilter = BloomFilter(10**6, 0.01) # 1M items, 1% FP
self.frontier: PriorityQueue = PriorityQueue()
self.depths: Dict[str, int] = {}
self.priority_fn = priority_fn
self.current_depth = 0
def clone_for_level(self) -> TraversalContext:
"""Monadic context propagation"""
new_ctx = TraversalContext(self.priority_fn)
new_ctx.visited = self.visited.copy()
new_ctx.depths = self.depths.copy()
new_ctx.current_depth = self.current_depth
return new_ctx
class PriorityQueue(Generic[PriorityT]):
"""Fibonacci heap-inspired priority queue with O(1) amortized operations"""
__slots__ = ('_heap', '_index')
def __init__(self):
self._heap: List[Tuple[PriorityT, float, P]] = []
self._index: Dict[P, int] = {}
def insert(self, priority: PriorityT, item: P) -> None:
tiebreaker = time.time() # Ensure FIFO for equal priorities
heappush(self._heap, (priority, tiebreaker, item))
self._index[item] = len(self._heap) - 1
def extract(self, top_n = 1) -> P:
items = []
for _ in range(top_n):
if not self._heap:
break
priority, _, item = heappop(self._heap)
del self._index[item]
items.append(item)
if not items:
raise IndexError("Priority queue empty")
return items
# while self._heap:
# _, _, item = heappop(self._heap)
# if item in self._index:
# del self._index[item]
# return item
raise IndexError("Priority queue empty")
def is_empty(self) -> bool:
return not bool(self._heap)
class BloomFilter:
"""Optimal Bloom filter using murmur3 hash avalanche"""
__slots__ = ('size', 'hashes', 'bits')
def __init__(self, capacity: int, error_rate: float):
self.size = self._optimal_size(capacity, error_rate)
self.hashes = self._optimal_hashes(capacity, self.size)
self.bits = bitarray(self.size)
self.bits.setall(False)
@staticmethod
def _optimal_size(n: int, p: float) -> int:
m = - (n * np.log(p)) / (np.log(2) ** 2)
return int(np.ceil(m))
@staticmethod
def _optimal_hashes(n: int, m: int) -> int:
k = (m / n) * np.log(2)
return int(np.ceil(k))
def add(self, item: str) -> None:
for seed in range(self.hashes):
digest = mmh3.hash(item, seed) % self.size
self.bits[digest] = True
def __contains__(self, item: str) -> bool:
return all(
self.bits[mmh3.hash(item, seed) % self.size]
for seed in range(self.hashes)
)
def copy(self) -> BloomFilter:
new = object.__new__(BloomFilter)
new.size = self.size
new.hashes = self.hashes
new.bits = self.bits.copy()
return new
def __len__(self) -> int:
"""
Estimates the number of items in the filter using the
count of set bits and the formula:
n = -m/k * ln(1 - X/m)
where:
m = size of bit array
k = number of hash functions
X = count of set bits
"""
set_bits = self.bits.count(True)
if set_bits == 0:
return 0
# Use the inverse bloom filter formula to estimate cardinality
return int(
-(self.size / self.hashes) *
np.log(1 - set_bits / self.size)
)
def bit_count(self) -> int:
"""Returns the raw count of set bits in the filter"""
return self.bits.count(True)
def __repr__(self) -> str:
return f"BloomFilter(est_items={len(self)}, bits={self.bit_count()}/{self.size})"
# ------ Hyper-Optimal Deep Crawl Core ------ #
class DeepCrawlDecorator:
"""Metaprogramming marvel: Zero-cost deep crawl abstraction"""
def __init__(self, crawler: AsyncWebCrawler):
self.crawler = crawler
def __call__(self, original_arun: Callable) -> Callable:
@wraps(original_arun)
async def quantum_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
stack = deep_crawl_ctx.get()
if config and config.deep_crawl_strategy and not stack:
stack.append(self.crawler)
try:
deep_crawl_ctx.set(stack)
async for result in config.deep_crawl_strategy.traverse(
start_url=url,
crawler=self.crawler,
config=config
):
yield result
finally:
stack.pop()
deep_crawl_ctx.set(stack)
else:
result = await original_arun(url, config=config, **kwargs)
yield result
return quantum_arun
async def collect_results(url, crawler, config):
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
setattr(crawler, "arun", getattr(crawler, "original_arun"))
ret = crawler.arun(url, config=config)
# If arun is an async generator, iterate over it
if inspect.isasyncgen(ret):
return [r async for r in ret]
# Otherwise, await the coroutine and normalize to a list
result = await ret
return result if isinstance(result, list) else [result]
async def collect_many_results(url, crawler, config):
# Replace back arun to its original implementation
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
setattr(crawler, "arun", getattr(crawler, "original_arun"))
ret = crawler.arun_many(url, config=config)
# If arun is an async generator, iterate over it
if inspect.isasyncgen(ret):
return [r async for r in ret]
# Otherwise, await the coroutine and normalize to a list
result = await ret
return result if isinstance(result, list) else [result]
# ------ Deep Crawl Strategy Interface ------ #
CrawlResultT = TypeVar("CrawlResultT", bound=CrawlResult)
# In batch mode we return List[CrawlResult] and in stream mode an AsyncGenerator.
RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
class DeepCrawlStrategy(ABC):
"""Abstract base class that will make Dijkstra smile"""
@abstractmethod
async def traverse(self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig) -> RunManyReturn:
"""Traverse with O(1) memory complexity via generator fusion"""
...
@abstractmethod
def precompute_priority(self, url: str) -> Awaitable[float]:
"""Quantum-inspired priority precomputation"""
pass
@abstractmethod
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
"""Hilbert-curve optimized link generation"""
pass
# ------ BFS That Would Make Knuth Proud ------ #
def calculate_quantum_batch_size(
depth: int,
max_depth: int,
frontier_size: int,
visited_size: int
) -> int:
"""
Calculates optimal batch size for URL processing using quantum-inspired mathematical principles.
This function implements a sophisticated batch size calculation using:
1. Golden Ratio (φ) based scaling for optimal irrationality
2. Depth-aware amplitude modulation
3. Harmonic series dampening
4. Logarithmic growth control
5. Dynamic frontier adaptation
The formula follows the quantum harmonic oscillator principle:
N = ⌈φ^(2d) * log₂(|V|) * H(d)⁻¹ * min(20, |F|/10)⌉
where:
φ = Golden Ratio ((1 + √5) / 2)
d = depth factor (normalized remaining depth)
|V| = size of visited set
H(d) = d-th harmonic number
|F| = frontier size
Args:
depth (int): Current traversal depth
max_depth (int): Maximum allowed depth
frontier_size (int): Current size of frontier queue
visited_size (int): Number of URLs visited so far
Returns:
int: Optimal batch size bounded between 1 and 100
Mathematical Properties:
- Maintains O(log n) growth with respect to visited size
- Provides φ-optimal distribution of resources
- Ensures quantum-like state transitions between depths
- Harmonically dampened to prevent exponential explosion
"""
# Golden ratio φ = (1 + √5) / 2
φ = (1 + 5 ** 0.5) / 2
# Calculate normalized depth factor [0, 1]
depth_factor = (max_depth - depth) / max_depth if depth < max_depth else 0
# Compute harmonic number for current depth
harmonic = sum(1/k for k in range(1, depth + 2))
# Calculate quantum batch size
batch_size = int(np.ceil(
(φ ** (depth_factor * 2)) * # Golden ratio scaling
np.log2(visited_size + 2) * # Logarithmic growth factor
(1 / harmonic) * # Harmonic dampening
max(1, min(20, frontier_size / 10)) # Frontier-aware scaling
))
# Enforce practical bounds
return max(1, min(100, batch_size))
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
"""Breadth-First Search with Einstein-Rosen bridge optimization"""
__slots__ = ('max_depth', 'filter_chain', 'priority_fn', 'stats', '_cancel')
def __init__(self,
max_depth: int,
filter_chain: FilterChain = FilterChain(),
priority_fn: Callable[[str], Awaitable[float]] = lambda url: 1.0,
logger: logging.Logger = None):
self.max_depth = max_depth
self.filter_chain = filter_chain
self.priority_fn = priority_fn
self.stats = TraversalStats()
self._cancel = asyncio.Event()
self.semaphore = asyncio.Semaphore(1000)
async def traverse(self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig) -> RunManyReturn:
"""Non-blocking BFS with O(b^d) time complexity awareness"""
ctx = TraversalContext(self.priority_fn)
ctx.frontier.insert(self.priority_fn(start_url), (start_url, None, 0))
ctx.visited.add(start_url)
ctx.depths[start_url] = 0
while not ctx.frontier.is_empty() and not self._cancel.is_set():
# Use the best algorith, to find top_n value
top_n = calculate_quantum_batch_size(
depth=ctx.current_depth,
max_depth=self.max_depth,
frontier_size=len(ctx.frontier._heap),
visited_size=len(ctx.visited)
)
urls = ctx.frontier.extract(top_n=top_n)
# url, parent, depth = ctx.frontier.extract(top_n=top_n)
if urls:
ctx.current_depth = urls[0][2]
async with self.semaphore:
results = await collect_many_results([url for (url, parent, depth) in urls], crawler, config)
# results = await asyncio.gather(*[
# collect_results(url, crawler, config) for (url, parent, depth) in urls
# ])
# result = _result[0]
for ix, result in enumerate(results):
url, parent, depth = result.url, urls[ix][1], urls[ix][2]
result.metadata['depth'] = depth
result.metadata['parent'] = parent
yield result
if depth < self.max_depth:
async for link in self.link_hypercube(result):
if link not in ctx.visited:
priority = self.priority_fn(link)
ctx.frontier.insert(priority, (link, url, depth + 1))
ctx.visited.add(link)
ctx.depths[link] = depth + 1
@lru_cache(maxsize=65536)
async def validate_url(self, url: str) -> bool:
"""Memoized URL validation with λ-calculus purity"""
try:
parsed = urlparse(url)
return (parsed.scheme in {'http', 'https'}
and '.' in parsed.netloc
and await self.filter_chain.apply(url))
except Exception:
return False
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
"""Hilbert-ordered link generation with O(1) yield latency"""
links = (link['href'] for link in result.links.get('internal', []))
validated = filter(self.validate_url, links)
for link in sorted(validated, key=lambda x: -self.priority_fn(x)):
yield link
def __aiter__(self) -> AsyncGenerator[CrawlResult, None]:
"""Native async iterator interface"""
return self.traverse()
async def __anext__(self) -> CrawlResult:
"""True async iterator protocol implementation"""
result = await self.traverse().__anext__()
if result:
return result
raise StopAsyncIteration
async def precompute_priority(self, url):
return super().precompute_priority(url)
async def shutdown(self):
self._cancel.set()
# ------ Usage That Will Drop Jaws ------ #
async def main():
"""Quantum crawl example"""
strategy = BFSDeepCrawlStrategy(
max_depth=2,
priority_fn=lambda url: 1.0 / (len(url) + 1e-9), # Inverse length priority
# filter_chain=FilterChain(...)
)
config: CrawlerRunConfig = CrawlerRunConfig(
deep_crawl_strategy=strategy,
stream=False,
verbose=True,
cache_mode=CacheMode.BYPASS
)
async with AsyncWebCrawler() as crawler:
run_decorator = DeepCrawlDecorator(crawler)
setattr(crawler, "original_arun", crawler.arun)
crawler.arun = run_decorator(crawler.arun)
start_time = time.perf_counter()
async for result in crawler.arun("https://docs.crawl4ai.com", config=config):
print(f"🌀 {result.url} (Depth: {result.metadata['depth']})")
print(f"Deep crawl completed in {time.perf_counter() - start_time:.2f}s")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,110 +0,0 @@
# dfs_deep_crawl_strategy.py
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
from ..models import CrawlResult
from .bfs_strategy import BFSDeepCrawlStrategy # noqa
from ..types import AsyncWebCrawler, CrawlerRunConfig
class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
"""
Depth-First Search (DFS) deep crawling strategy.
Inherits URL validation and link discovery from BFSDeepCrawlStrategy.
Overrides _arun_batch and _arun_stream to use a stack (LIFO) for DFS traversal.
"""
async def _arun_batch(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> List[CrawlResult]:
"""
Batch (non-streaming) DFS mode.
Uses a stack to traverse URLs in DFS order, aggregating CrawlResults into a list.
"""
visited: Set[str] = set()
# Stack items: (url, parent_url, depth)
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
depths: Dict[str, int] = {start_url: 0}
results: List[CrawlResult] = []
while stack and not self._cancel_event.is_set():
url, parent, depth = stack.pop()
if url in visited or depth > self.max_depth:
continue
visited.add(url)
# Clone config to disable recursive deep crawling.
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
url_results = await crawler.arun_many(urls=[url], config=batch_config)
for result in url_results:
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
result.metadata["parent_url"] = parent
if self.url_scorer:
result.metadata["score"] = self.url_scorer.score(url)
results.append(result)
# Count only successful crawls toward max_pages limit
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
# Only discover links from successful crawls
new_links: List[Tuple[str, Optional[str]]] = []
await self.link_discovery(result, url, depth, visited, new_links, depths)
# Push new links in reverse order so the first discovered is processed next.
for new_url, new_parent in reversed(new_links):
new_depth = depths.get(new_url, depth + 1)
stack.append((new_url, new_parent, new_depth))
return results
async def _arun_stream(
self,
start_url: str,
crawler: AsyncWebCrawler,
config: CrawlerRunConfig,
) -> AsyncGenerator[CrawlResult, None]:
"""
Streaming DFS mode.
Uses a stack to traverse URLs in DFS order and yields CrawlResults as they become available.
"""
visited: Set[str] = set()
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
depths: Dict[str, int] = {start_url: 0}
while stack and not self._cancel_event.is_set():
url, parent, depth = stack.pop()
if url in visited or depth > self.max_depth:
continue
visited.add(url)
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
stream_gen = await crawler.arun_many(urls=[url], config=stream_config)
async for result in stream_gen:
result.metadata = result.metadata or {}
result.metadata["depth"] = depth
result.metadata["parent_url"] = parent
if self.url_scorer:
result.metadata["score"] = self.url_scorer.score(url)
yield result
# Only count successful crawls toward max_pages limit
# and only discover links from successful crawls
if result.success:
self._pages_crawled += 1
# Check if we've reached the limit during batch processing
if self._pages_crawled >= self.max_pages:
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
break # Exit the generator
new_links: List[Tuple[str, Optional[str]]] = []
await self.link_discovery(result, url, depth, visited, new_links, depths)
for new_url, new_parent in reversed(new_links):
new_depth = depths.get(new_url, depth + 1)
stack.append((new_url, new_parent, new_depth))

View File

@@ -1,686 +0,0 @@
from abc import ABC, abstractmethod
from typing import List, Pattern, Set, Union
from urllib.parse import urlparse
from array import array
import re
import logging
from functools import lru_cache
import fnmatch
from dataclasses import dataclass
import weakref
import math
from collections import defaultdict
from typing import Dict
from ..utils import HeadPeekr
import asyncio
import inspect
@dataclass
class FilterStats:
__slots__ = ("_counters",)
def __init__(self):
# Use array of unsigned ints for atomic operations
self._counters = array("I", [0, 0, 0]) # total, passed, rejected
@property
def total_urls(self):
return self._counters[0]
@property
def passed_urls(self):
return self._counters[1]
@property
def rejected_urls(self):
return self._counters[2]
class URLFilter(ABC):
"""Optimized base filter class"""
__slots__ = ("name", "stats", "_logger_ref")
def __init__(self, name: str = None):
self.name = name or self.__class__.__name__
self.stats = FilterStats()
# Lazy logger initialization using weakref
self._logger_ref = None
@property
def logger(self):
if self._logger_ref is None or self._logger_ref() is None:
logger = logging.getLogger(f"urlfilter.{self.name}")
self._logger_ref = weakref.ref(logger)
return self._logger_ref()
@abstractmethod
def apply(self, url: str) -> bool:
pass
def _update_stats(self, passed: bool):
# Use direct array index for speed
self.stats._counters[0] += 1 # total
self.stats._counters[1] += passed # passed
self.stats._counters[2] += not passed # rejected
class FilterChain:
"""Optimized filter chain"""
__slots__ = ("filters", "stats", "_logger_ref")
def __init__(self, filters: List[URLFilter] = None):
self.filters = tuple(filters or []) # Immutable tuple for speed
self.stats = FilterStats()
self._logger_ref = None
@property
def logger(self):
if self._logger_ref is None or self._logger_ref() is None:
logger = logging.getLogger("urlfilter.chain")
self._logger_ref = weakref.ref(logger)
return self._logger_ref()
def add_filter(self, filter_: URLFilter) -> "FilterChain":
"""Add a filter to the chain"""
self.filters.append(filter_)
return self # Enable method chaining
async def apply(self, url: str) -> bool:
"""Apply all filters concurrently when possible"""
self.stats._counters[0] += 1 # Total processed URLs
tasks = []
for f in self.filters:
result = f.apply(url)
if inspect.isawaitable(result):
tasks.append(result) # Collect async tasks
elif not result: # Sync rejection
self.stats._counters[2] += 1 # Sync rejected
return False
if tasks:
results = await asyncio.gather(*tasks)
# Count how many filters rejected
rejections = results.count(False)
self.stats._counters[2] += rejections
if not all(results):
return False # Stop early if any filter rejected
self.stats._counters[1] += 1 # Passed
return True
class URLPatternFilter(URLFilter):
"""Pattern filter balancing speed and completeness"""
__slots__ = (
"_simple_suffixes",
"_simple_prefixes",
"_domain_patterns",
"_path_patterns",
"_reverse",
)
PATTERN_TYPES = {
"SUFFIX": 1, # *.html
"PREFIX": 2, # /foo/*
"DOMAIN": 3, # *.example.com
"PATH": 4, # Everything else
"REGEX": 5,
}
def __init__(
self,
patterns: Union[str, Pattern, List[Union[str, Pattern]]],
use_glob: bool = True,
reverse: bool = False,
):
super().__init__()
self._reverse = reverse
patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns
self._simple_suffixes = set()
self._simple_prefixes = set()
self._domain_patterns = []
self._path_patterns = []
for pattern in patterns:
pattern_type = self._categorize_pattern(pattern)
self._add_pattern(pattern, pattern_type)
def _categorize_pattern(self, pattern: str) -> int:
"""Categorize pattern for specialized handling"""
if not isinstance(pattern, str):
return self.PATTERN_TYPES["PATH"]
# Check if it's a regex pattern
if pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern:
return self.PATTERN_TYPES["REGEX"]
if pattern.count("*") == 1:
if pattern.startswith("*."):
return self.PATTERN_TYPES["SUFFIX"]
if pattern.endswith("/*"):
return self.PATTERN_TYPES["PREFIX"]
if "://" in pattern and pattern.startswith("*."):
return self.PATTERN_TYPES["DOMAIN"]
return self.PATTERN_TYPES["PATH"]
def _add_pattern(self, pattern: str, pattern_type: int):
"""Add pattern to appropriate matcher"""
if pattern_type == self.PATTERN_TYPES["REGEX"]:
# For regex patterns, compile directly without glob translation
if isinstance(pattern, str) and (
pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern
):
self._path_patterns.append(re.compile(pattern))
return
elif pattern_type == self.PATTERN_TYPES["SUFFIX"]:
self._simple_suffixes.add(pattern[2:])
elif pattern_type == self.PATTERN_TYPES["PREFIX"]:
self._simple_prefixes.add(pattern[:-2])
elif pattern_type == self.PATTERN_TYPES["DOMAIN"]:
self._domain_patterns.append(re.compile(pattern.replace("*.", r"[^/]+\.")))
else:
if isinstance(pattern, str):
# Handle complex glob patterns
if "**" in pattern:
pattern = pattern.replace("**", ".*")
if "{" in pattern:
# Convert {a,b} to (a|b)
pattern = re.sub(
r"\{([^}]+)\}",
lambda m: f'({"|".join(m.group(1).split(","))})',
pattern,
)
pattern = fnmatch.translate(pattern)
self._path_patterns.append(
pattern if isinstance(pattern, Pattern) else re.compile(pattern)
)
@lru_cache(maxsize=10000)
def apply(self, url: str) -> bool:
# Quick suffix check (*.html)
if self._simple_suffixes:
path = url.split("?")[0]
if path.split("/")[-1].split(".")[-1] in self._simple_suffixes:
result = True
self._update_stats(result)
return not result if self._reverse else result
# Domain check
if self._domain_patterns:
for pattern in self._domain_patterns:
if pattern.match(url):
result = True
self._update_stats(result)
return not result if self._reverse else result
# Prefix check (/foo/*)
if self._simple_prefixes:
path = url.split("?")[0]
# if any(path.startswith(p) for p in self._simple_prefixes):
# result = True
# self._update_stats(result)
# return not result if self._reverse else result
####
# Modified the prefix matching logic to ensure path boundary checking:
# - Check if the matched prefix is followed by a path separator (`/`), query parameter (`?`), fragment (`#`), or is at the end of the path
# - This ensures `/api/` only matches complete path segments, not substrings like `/apiv2/`
####
for prefix in self._simple_prefixes:
if path.startswith(prefix):
if len(path) == len(prefix) or path[len(prefix)] in ['/', '?', '#']:
result = True
self._update_stats(result)
return not result if self._reverse else result
# Complex patterns
if self._path_patterns:
if any(p.search(url) for p in self._path_patterns):
result = True
self._update_stats(result)
return not result if self._reverse else result
result = False
self._update_stats(result)
return not result if self._reverse else result
class ContentTypeFilter(URLFilter):
"""Optimized content type filter using fast lookups"""
__slots__ = ("allowed_types", "_ext_map", "_check_extension")
# Fast extension to mime type mapping
_MIME_MAP = {
# Text Formats
"txt": "text/plain",
"html": "text/html",
"htm": "text/html",
"xhtml": "application/xhtml+xml",
"css": "text/css",
"csv": "text/csv",
"ics": "text/calendar",
"js": "application/javascript",
# Images
"bmp": "image/bmp",
"gif": "image/gif",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"png": "image/png",
"svg": "image/svg+xml",
"tiff": "image/tiff",
"ico": "image/x-icon",
"webp": "image/webp",
# Audio
"mp3": "audio/mpeg",
"wav": "audio/wav",
"ogg": "audio/ogg",
"m4a": "audio/mp4",
"aac": "audio/aac",
# Video
"mp4": "video/mp4",
"mpeg": "video/mpeg",
"webm": "video/webm",
"avi": "video/x-msvideo",
"mov": "video/quicktime",
"flv": "video/x-flv",
"wmv": "video/x-ms-wmv",
"mkv": "video/x-matroska",
# Applications
"json": "application/json",
"xml": "application/xml",
"pdf": "application/pdf",
"zip": "application/zip",
"gz": "application/gzip",
"tar": "application/x-tar",
"rar": "application/vnd.rar",
"7z": "application/x-7z-compressed",
"exe": "application/vnd.microsoft.portable-executable",
"msi": "application/x-msdownload",
# Fonts
"woff": "font/woff",
"woff2": "font/woff2",
"ttf": "font/ttf",
"otf": "font/otf",
# Microsoft Office
"doc": "application/msword",
"dot": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xls": "application/vnd.ms-excel",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
# OpenDocument Formats
"odt": "application/vnd.oasis.opendocument.text",
"ods": "application/vnd.oasis.opendocument.spreadsheet",
"odp": "application/vnd.oasis.opendocument.presentation",
# Archives
"tar.gz": "application/gzip",
"tgz": "application/gzip",
"bz2": "application/x-bzip2",
# Others
"rtf": "application/rtf",
"apk": "application/vnd.android.package-archive",
"epub": "application/epub+zip",
"jar": "application/java-archive",
"swf": "application/x-shockwave-flash",
"midi": "audio/midi",
"mid": "audio/midi",
"ps": "application/postscript",
"ai": "application/postscript",
"eps": "application/postscript",
# Custom or less common
"bin": "application/octet-stream",
"dmg": "application/x-apple-diskimage",
"iso": "application/x-iso9660-image",
"deb": "application/x-debian-package",
"rpm": "application/x-rpm",
"sqlite": "application/vnd.sqlite3",
# Placeholder
"unknown": "application/octet-stream", # Fallback for unknown file types
# php
"php": "application/x-httpd-php",
"php3": "application/x-httpd-php",
"php4": "application/x-httpd-php",
"php5": "application/x-httpd-php",
"php7": "application/x-httpd-php",
"phtml": "application/x-httpd-php",
"phps": "application/x-httpd-php-source",
}
@staticmethod
@lru_cache(maxsize=1000)
def _extract_extension(url: str) -> str:
"""Extracts file extension from a URL."""
# Remove scheme (http://, https://) if present
if "://" in url:
url = url.split("://", 1)[-1] # Get everything after '://'
# Remove domain (everything up to the first '/')
path_start = url.find("/")
path = url[path_start:] if path_start != -1 else ""
# Extract last filename in path
filename = path.rsplit("/", 1)[-1] if "/" in path else ""
# Extract and validate extension
if "." not in filename:
return ""
return filename.rpartition(".")[-1].lower()
def __init__(
self,
allowed_types: Union[str, List[str]],
check_extension: bool = True,
ext_map: Dict[str, str] = _MIME_MAP,
):
super().__init__()
# Normalize and store as frozenset for fast lookup
self.allowed_types = frozenset(
t.lower()
for t in (
allowed_types if isinstance(allowed_types, list) else [allowed_types]
)
)
self._check_extension = check_extension
# Pre-compute extension map for allowed types
self._ext_map = frozenset(
ext
for ext, mime in self._MIME_MAP.items()
if any(allowed in mime for allowed in self.allowed_types)
)
@lru_cache(maxsize=1000)
def _check_url_cached(self, url: str) -> bool:
"""Cached URL checking"""
if not self._check_extension:
return True
ext = self._extract_extension(url)
if not ext:
return True
return ext in self._ext_map
def apply(self, url: str) -> bool:
"""Fast extension check with caching"""
result = self._check_url_cached(url)
self._update_stats(result)
return result
class DomainFilter(URLFilter):
"""Optimized domain filter with fast lookups and caching"""
__slots__ = ("_allowed_domains", "_blocked_domains", "_domain_cache")
# Regex for fast domain extraction
_DOMAIN_REGEX = re.compile(r"://([^/]+)")
def __init__(
self,
allowed_domains: Union[str, List[str]] = None,
blocked_domains: Union[str, List[str]] = None,
):
super().__init__()
# Convert inputs to frozensets for immutable, fast lookups
self._allowed_domains = (
frozenset(self._normalize_domains(allowed_domains))
if allowed_domains
else None
)
self._blocked_domains = (
frozenset(self._normalize_domains(blocked_domains))
if blocked_domains
else frozenset()
)
@staticmethod
def _normalize_domains(domains: Union[str, List[str]]) -> Set[str]:
"""Fast domain normalization"""
if isinstance(domains, str):
return {domains.lower()}
return {d.lower() for d in domains}
@staticmethod
def _is_subdomain(domain: str, parent_domain: str) -> bool:
"""Check if domain is a subdomain of parent_domain"""
return domain == parent_domain or domain.endswith(f".{parent_domain}")
@staticmethod
@lru_cache(maxsize=10000)
def _extract_domain(url: str) -> str:
"""Ultra-fast domain extraction with regex and caching"""
match = DomainFilter._DOMAIN_REGEX.search(url)
return match.group(1).lower() if match else ""
def apply(self, url: str) -> bool:
"""Optimized domain checking with early returns"""
# Skip processing if no filters
if not self._blocked_domains and self._allowed_domains is None:
self._update_stats(True)
return True
domain = self._extract_domain(url)
# Check for blocked domains, including subdomains
for blocked in self._blocked_domains:
if self._is_subdomain(domain, blocked):
self._update_stats(False)
return False
# If no allowed domains specified, accept all non-blocked
if self._allowed_domains is None:
self._update_stats(True)
return True
# Check if domain matches any allowed domain (including subdomains)
for allowed in self._allowed_domains:
if self._is_subdomain(domain, allowed):
self._update_stats(True)
return True
# No matches found
self._update_stats(False)
return False
class ContentRelevanceFilter(URLFilter):
"""BM25-based relevance filter using head section content"""
__slots__ = ("query_terms", "threshold", "k1", "b", "avgdl")
def __init__(
self,
query: str,
threshold: float,
k1: float = 1.2,
b: float = 0.75,
avgdl: int = 1000,
):
super().__init__(name="BM25RelevanceFilter")
self.query_terms = self._tokenize(query)
self.threshold = threshold
self.k1 = k1 # TF saturation parameter
self.b = b # Length normalization parameter
self.avgdl = avgdl # Average document length (empirical value)
async def apply(self, url: str) -> bool:
head_content = await HeadPeekr.peek_html(url)
if not head_content:
self._update_stats(False)
return False
# Field extraction with weighting
fields = {
"title": HeadPeekr.get_title(head_content) or "",
"meta": HeadPeekr.extract_meta_tags(head_content),
}
doc_text = self._build_document(fields)
score = self._bm25(doc_text)
decision = score >= self.threshold
self._update_stats(decision)
return decision
def _build_document(self, fields: Dict) -> str:
"""Weighted document construction"""
return " ".join(
[
fields["title"] * 3, # Title weight
fields["meta"].get("description", "") * 2,
fields["meta"].get("keywords", ""),
" ".join(fields["meta"].values()),
]
)
def _tokenize(self, text: str) -> List[str]:
"""Fast case-insensitive tokenization"""
return text.lower().split()
def _bm25(self, document: str) -> float:
"""Optimized BM25 implementation for head sections"""
doc_terms = self._tokenize(document)
doc_len = len(doc_terms)
tf = defaultdict(int)
for term in doc_terms:
tf[term] += 1
score = 0.0
for term in set(self.query_terms):
term_freq = tf[term]
idf = math.log((1 + 1) / (term_freq + 0.5) + 1) # Simplified IDF
numerator = term_freq * (self.k1 + 1)
denominator = term_freq + self.k1 * (
1 - self.b + self.b * (doc_len / self.avgdl)
)
score += idf * (numerator / denominator)
return score
class SEOFilter(URLFilter):
"""Quantitative SEO quality assessment filter using head section analysis"""
__slots__ = ("threshold", "_weights", "_kw_patterns")
# Based on SEMrush/Google ranking factors research
DEFAULT_WEIGHTS = {
"title_length": 0.15,
"title_kw": 0.18,
"meta_description": 0.12,
"canonical": 0.10,
"robot_ok": 0.20, # Most critical factor
"schema_org": 0.10,
"url_quality": 0.15,
}
def __init__(
self,
threshold: float = 0.65,
keywords: List[str] = None,
weights: Dict[str, float] = None,
):
super().__init__(name="SEOFilter")
self.threshold = threshold
self._weights = weights or self.DEFAULT_WEIGHTS
self._kw_patterns = (
re.compile(
r"\b({})\b".format("|".join(map(re.escape, keywords or []))), re.I
)
if keywords
else None
)
async def apply(self, url: str) -> bool:
head_content = await HeadPeekr.peek_html(url)
if not head_content:
self._update_stats(False)
return False
meta = HeadPeekr.extract_meta_tags(head_content)
title = HeadPeekr.get_title(head_content) or ""
parsed_url = urlparse(url)
scores = {
"title_length": self._score_title_length(title),
"title_kw": self._score_keyword_presence(title),
"meta_description": self._score_meta_description(
meta.get("description", "")
),
"canonical": self._score_canonical(meta.get("canonical"), url),
"robot_ok": 1.0 if "noindex" not in meta.get("robots", "") else 0.0,
"schema_org": self._score_schema_org(head_content),
"url_quality": self._score_url_quality(parsed_url),
}
total_score = sum(
weight * scores[factor] for factor, weight in self._weights.items()
)
decision = total_score >= self.threshold
self._update_stats(decision)
return decision
def _score_title_length(self, title: str) -> float:
length = len(title)
if 50 <= length <= 60:
return 1.0
if 40 <= length < 50 or 60 < length <= 70:
return 0.7
return 0.3 # Poor length
def _score_keyword_presence(self, text: str) -> float:
if not self._kw_patterns:
return 0.0
matches = len(self._kw_patterns.findall(text))
return min(matches * 0.3, 1.0) # Max 3 matches
def _score_meta_description(self, desc: str) -> float:
length = len(desc)
if 140 <= length <= 160:
return 1.0
return 0.5 if 120 <= length <= 200 else 0.2
def _score_canonical(self, canonical: str, original: str) -> float:
if not canonical:
return 0.5 # Neutral score
return 1.0 if canonical == original else 0.2
def _score_schema_org(self, html: str) -> float:
# Detect any schema.org markup in head
return (
1.0
if re.search(r'<script[^>]+type=["\']application/ld\+json', html)
else 0.0
)
def _score_url_quality(self, parsed_url) -> float:
score = 1.0
path = parsed_url.path.lower()
# Penalty factors
if len(path) > 80:
score *= 0.7
if re.search(r"\d{4}", path):
score *= 0.8 # Numbers in path
if parsed_url.query:
score *= 0.6 # URL parameters
if "_" in path:
score *= 0.9 # Underscores vs hyphens
return score

View File

@@ -1,519 +0,0 @@
from abc import ABC, abstractmethod
from typing import List, Dict, Optional
from dataclasses import dataclass
from urllib.parse import urlparse, unquote
import re
import logging
from functools import lru_cache
from array import array
import ctypes
import platform
PLATFORM = platform.system()
# Pre-computed scores for common year differences
_SCORE_LOOKUP = [1.0, 0.5, 0.3333333333333333, 0.25]
# Pre-computed scores for common year differences
_FRESHNESS_SCORES = [
1.0, # Current year
0.9, # Last year
0.8, # 2 years ago
0.7, # 3 years ago
0.6, # 4 years ago
0.5, # 5 years ago
]
class ScoringStats:
__slots__ = ('_urls_scored', '_total_score', '_min_score', '_max_score')
def __init__(self):
self._urls_scored = 0
self._total_score = 0.0
self._min_score = None # Lazy initialization
self._max_score = None
def update(self, score: float) -> None:
"""Optimized update with minimal operations"""
self._urls_scored += 1
self._total_score += score
# Lazy min/max tracking - only if actually accessed
if self._min_score is not None:
if score < self._min_score:
self._min_score = score
if self._max_score is not None:
if score > self._max_score:
self._max_score = score
def get_average(self) -> float:
"""Direct calculation instead of property"""
return self._total_score / self._urls_scored if self._urls_scored else 0.0
def get_min(self) -> float:
"""Lazy min calculation"""
if self._min_score is None:
self._min_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
return self._min_score
def get_max(self) -> float:
"""Lazy max calculation"""
if self._max_score is None:
self._max_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
return self._max_score
class URLScorer(ABC):
__slots__ = ('_weight', '_stats')
def __init__(self, weight: float = 1.0):
# Store weight directly as float32 for memory efficiency
self._weight = ctypes.c_float(weight).value
self._stats = ScoringStats()
@abstractmethod
def _calculate_score(self, url: str) -> float:
"""Calculate raw score for URL."""
pass
def score(self, url: str) -> float:
"""Calculate weighted score with minimal overhead."""
score = self._calculate_score(url) * self._weight
self._stats.update(score)
return score
@property
def stats(self):
"""Access to scoring statistics."""
return self._stats
@property
def weight(self):
return self._weight
class CompositeScorer(URLScorer):
__slots__ = ('_scorers', '_normalize', '_weights_array', '_score_array')
def __init__(self, scorers: List[URLScorer], normalize: bool = True):
"""Initialize composite scorer combining multiple scoring strategies.
Optimized for:
- Fast parallel scoring
- Memory efficient score aggregation
- Quick short-circuit conditions
- Pre-allocated arrays
Args:
scorers: List of scoring strategies to combine
normalize: Whether to normalize final score by scorer count
"""
super().__init__(weight=1.0)
self._scorers = scorers
self._normalize = normalize
# Pre-allocate arrays for scores and weights
self._weights_array = array('f', [s.weight for s in scorers])
self._score_array = array('f', [0.0] * len(scorers))
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate combined score from all scoring strategies.
Uses:
1. Pre-allocated arrays for scores
2. Short-circuit on zero scores
3. Optimized normalization
4. Vectorized operations where possible
Args:
url: URL to score
Returns:
Combined and optionally normalized score
"""
total_score = 0.0
scores = self._score_array
# Get scores from all scorers
for i, scorer in enumerate(self._scorers):
# Use public score() method which applies weight
scores[i] = scorer.score(url)
total_score += scores[i]
# Normalize if requested
if self._normalize and self._scorers:
count = len(self._scorers)
return total_score / count
return total_score
def score(self, url: str) -> float:
"""Public scoring interface with stats tracking.
Args:
url: URL to score
Returns:
Final combined score
"""
score = self._calculate_score(url)
self.stats.update(score)
return score
class KeywordRelevanceScorer(URLScorer):
__slots__ = ('_weight', '_stats', '_keywords', '_case_sensitive')
def __init__(self, keywords: List[str], weight: float = 1.0, case_sensitive: bool = False):
super().__init__(weight=weight)
self._case_sensitive = case_sensitive
# Pre-process keywords once
self._keywords = [k if case_sensitive else k.lower() for k in keywords]
@lru_cache(maxsize=10000)
def _url_bytes(self, url: str) -> bytes:
"""Cache decoded URL bytes"""
return url.encode('utf-8') if self._case_sensitive else url.lower().encode('utf-8')
def _calculate_score(self, url: str) -> float:
"""Fast string matching without regex or byte conversion"""
if not self._case_sensitive:
url = url.lower()
matches = sum(1 for k in self._keywords if k in url)
# Fast return paths
if not matches:
return 0.0
if matches == len(self._keywords):
return 1.0
return matches / len(self._keywords)
class PathDepthScorer(URLScorer):
__slots__ = ('_weight', '_stats', '_optimal_depth') # Remove _url_cache
def __init__(self, optimal_depth: int = 3, weight: float = 1.0):
super().__init__(weight=weight)
self._optimal_depth = optimal_depth
@staticmethod
@lru_cache(maxsize=10000)
def _quick_depth(path: str) -> int:
"""Ultra fast path depth calculation.
Examples:
- "http://example.com" -> 0 # No path segments
- "http://example.com/" -> 0 # Empty path
- "http://example.com/a" -> 1
- "http://example.com/a/b" -> 2
"""
if not path or path == '/':
return 0
if '/' not in path:
return 0
depth = 0
last_was_slash = True
for c in path:
if c == '/':
if not last_was_slash:
depth += 1
last_was_slash = True
else:
last_was_slash = False
if not last_was_slash:
depth += 1
return depth
@lru_cache(maxsize=10000) # Cache the whole calculation
def _calculate_score(self, url: str) -> float:
pos = url.find('/', url.find('://') + 3)
if pos == -1:
depth = 0
else:
depth = self._quick_depth(url[pos:])
# Use lookup table for common distances
distance = depth - self._optimal_depth
distance = distance if distance >= 0 else -distance # Faster than abs()
if distance < 4:
return _SCORE_LOOKUP[distance]
return 1.0 / (1.0 + distance)
class ContentTypeScorer(URLScorer):
__slots__ = ('_weight', '_exact_types', '_regex_types')
def __init__(self, type_weights: Dict[str, float], weight: float = 1.0):
"""Initialize scorer with type weights map.
Args:
type_weights: Dict mapping file extensions/patterns to scores (e.g. {'.html$': 1.0})
weight: Overall weight multiplier for this scorer
"""
super().__init__(weight=weight)
self._exact_types = {} # Fast lookup for simple extensions
self._regex_types = [] # Fallback for complex patterns
# Split into exact vs regex matchers for performance
for pattern, score in type_weights.items():
if pattern.startswith('.') and pattern.endswith('$'):
ext = pattern[1:-1]
self._exact_types[ext] = score
else:
self._regex_types.append((re.compile(pattern), score))
# Sort complex patterns by score for early exit
self._regex_types.sort(key=lambda x: -x[1])
@staticmethod
@lru_cache(maxsize=10000)
def _quick_extension(url: str) -> str:
"""Extract file extension ultra-fast without regex/splits.
Handles:
- Basic extensions: "example.html" -> "html"
- Query strings: "page.php?id=1" -> "php"
- Fragments: "doc.pdf#page=1" -> "pdf"
- Path params: "file.jpg;width=100" -> "jpg"
Args:
url: URL to extract extension from
Returns:
Extension without dot, or empty string if none found
"""
pos = url.rfind('.')
if pos == -1:
return ''
# Find first non-alphanumeric char after extension
end = len(url)
for i in range(pos + 1, len(url)):
c = url[i]
# Stop at query string, fragment, path param or any non-alphanumeric
if c in '?#;' or not c.isalnum():
end = i
break
return url[pos + 1:end].lower()
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate content type score for URL.
Uses staged approach:
1. Try exact extension match (fast path)
2. Fall back to regex patterns if needed
Args:
url: URL to score
Returns:
Score between 0.0 and 1.0 * weight
"""
# Fast path: direct extension lookup
ext = self._quick_extension(url)
if ext:
score = self._exact_types.get(ext, None)
if score is not None:
return score
# Slow path: regex patterns
for pattern, score in self._regex_types:
if pattern.search(url):
return score
return 0.0
class FreshnessScorer(URLScorer):
__slots__ = ('_weight', '_date_pattern', '_current_year')
def __init__(self, weight: float = 1.0, current_year: int = 2024):
"""Initialize freshness scorer.
Extracts and scores dates from URLs using format:
- YYYY/MM/DD
- YYYY-MM-DD
- YYYY_MM_DD
- YYYY (year only)
Args:
weight: Score multiplier
current_year: Year to calculate freshness against (default 2024)
"""
super().__init__(weight=weight)
self._current_year = current_year
# Combined pattern for all date formats
# Uses non-capturing groups (?:) and alternation
self._date_pattern = re.compile(
r'(?:/' # Path separator
r'|[-_])' # or date separators
r'((?:19|20)\d{2})' # Year group (1900-2099)
r'(?:' # Optional month/day group
r'(?:/|[-_])' # Date separator
r'(?:\d{2})' # Month
r'(?:' # Optional day
r'(?:/|[-_])' # Date separator
r'(?:\d{2})' # Day
r')?' # Day is optional
r')?' # Month/day group is optional
)
@lru_cache(maxsize=10000)
def _extract_year(self, url: str) -> Optional[int]:
"""Extract the most recent year from URL.
Args:
url: URL to extract year from
Returns:
Year as int or None if no valid year found
"""
matches = self._date_pattern.finditer(url)
latest_year = None
# Find most recent year
for match in matches:
year = int(match.group(1))
if (year <= self._current_year and # Sanity check
(latest_year is None or year > latest_year)):
latest_year = year
return latest_year
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate freshness score based on URL date.
More recent years score higher. Uses pre-computed scoring
table for common year differences.
Args:
url: URL to score
Returns:
Score between 0.0 and 1.0 * weight
"""
year = self._extract_year(url)
if year is None:
return 0.5 # Default score
# Use lookup table for common year differences
year_diff = self._current_year - year
if year_diff < len(_FRESHNESS_SCORES):
return _FRESHNESS_SCORES[year_diff]
# Fallback calculation for older content
return max(0.1, 1.0 - year_diff * 0.1)
class DomainAuthorityScorer(URLScorer):
__slots__ = ('_weight', '_domain_weights', '_default_weight', '_top_domains')
def __init__(
self,
domain_weights: Dict[str, float],
default_weight: float = 0.5,
weight: float = 1.0,
):
"""Initialize domain authority scorer.
Args:
domain_weights: Dict mapping domains to authority scores
default_weight: Score for unknown domains
weight: Overall scorer weight multiplier
Example:
{
'python.org': 1.0,
'github.com': 0.9,
'medium.com': 0.7
}
"""
super().__init__(weight=weight)
# Pre-process domains for faster lookup
self._domain_weights = {
domain.lower(): score
for domain, score in domain_weights.items()
}
self._default_weight = default_weight
# Cache top domains for fast path
self._top_domains = {
domain: score
for domain, score in sorted(
domain_weights.items(),
key=lambda x: -x[1]
)[:5] # Keep top 5 highest scoring domains
}
@staticmethod
@lru_cache(maxsize=10000)
def _extract_domain(url: str) -> str:
"""Extract domain from URL ultra-fast.
Handles:
- Basic domains: "example.com"
- Subdomains: "sub.example.com"
- Ports: "example.com:8080"
- IPv4: "192.168.1.1"
Args:
url: Full URL to extract domain from
Returns:
Lowercase domain without port
"""
# Find domain start
start = url.find('://')
if start == -1:
start = 0
else:
start += 3
# Find domain end
end = url.find('/', start)
if end == -1:
end = url.find('?', start)
if end == -1:
end = url.find('#', start)
if end == -1:
end = len(url)
# Extract domain and remove port
domain = url[start:end]
port_idx = domain.rfind(':')
if port_idx != -1:
domain = domain[:port_idx]
return domain.lower()
@lru_cache(maxsize=10000)
def _calculate_score(self, url: str) -> float:
"""Calculate domain authority score.
Uses staged approach:
1. Check top domains (fastest)
2. Check full domain weights
3. Return default weight
Args:
url: URL to score
Returns:
Authority score between 0.0 and 1.0 * weight
"""
domain = self._extract_domain(url)
# Fast path: check top domains first
score = self._top_domains.get(domain)
if score is not None:
return score
# Regular path: check all domains
return self._domain_weights.get(domain, self._default_weight)

View File

@@ -1,188 +0,0 @@
from typing import List, Optional, Union, AsyncGenerator, Dict, Any
import httpx
import json
from urllib.parse import urljoin
import asyncio
from .async_configs import BrowserConfig, CrawlerRunConfig
from .models import CrawlResult
from .async_logger import AsyncLogger, LogLevel
class Crawl4aiClientError(Exception):
"""Base exception for Crawl4ai Docker client errors."""
pass
class ConnectionError(Crawl4aiClientError):
"""Raised when connection to the Docker server fails."""
pass
class RequestError(Crawl4aiClientError):
"""Raised when the server returns an error response."""
pass
class Crawl4aiDockerClient:
"""Client for interacting with Crawl4AI Docker server with token authentication."""
def __init__(
self,
base_url: str = "http://localhost:8000",
timeout: float = 600.0, # Increased to 10 minutes for crawling operations
verify_ssl: bool = True,
verbose: bool = True,
log_file: Optional[str] = None
):
self.base_url = base_url.rstrip('/')
self.timeout = timeout
self.logger = AsyncLogger(log_file=log_file, log_level=LogLevel.DEBUG, verbose=verbose)
self._http_client = httpx.AsyncClient(
timeout=timeout,
verify=verify_ssl,
headers={"Content-Type": "application/json"}
)
self._token: Optional[str] = None
async def authenticate(self, email: str) -> None:
"""Authenticate with the server and store the token."""
url = urljoin(self.base_url, "/token")
try:
self.logger.info(f"Authenticating with email: {email}", tag="AUTH")
response = await self._http_client.post(url, json={"email": email})
response.raise_for_status()
data = response.json()
self._token = data["access_token"]
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
self.logger.success("Authentication successful", tag="AUTH")
except (httpx.RequestError, httpx.HTTPStatusError) as e:
error_msg = f"Authentication failed: {str(e)}"
self.logger.error(error_msg, tag="ERROR")
raise ConnectionError(error_msg)
async def _check_server(self) -> None:
"""Check if server is reachable, raising an error if not."""
try:
await self._http_client.get(urljoin(self.base_url, "/health"))
self.logger.success(f"Connected to {self.base_url}", tag="READY")
except httpx.RequestError as e:
self.logger.error(f"Server unreachable: {str(e)}", tag="ERROR")
raise ConnectionError(f"Cannot connect to server: {str(e)}")
def _prepare_request(self, urls: List[str], browser_config: Optional[BrowserConfig] = None,
crawler_config: Optional[CrawlerRunConfig] = None) -> Dict[str, Any]:
"""Prepare request data from configs."""
if self._token:
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
return {
"urls": urls,
"browser_config": browser_config.dump() if browser_config else {},
"crawler_config": crawler_config.dump() if crawler_config else {}
}
async def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
"""Make an HTTP request with error handling."""
url = urljoin(self.base_url, endpoint)
try:
response = await self._http_client.request(method, url, **kwargs)
response.raise_for_status()
return response
except httpx.TimeoutException as e:
raise ConnectionError(f"Request timed out: {str(e)}")
except httpx.RequestError as e:
raise ConnectionError(f"Failed to connect: {str(e)}")
except httpx.HTTPStatusError as e:
error_msg = (e.response.json().get("detail", str(e))
if "application/json" in e.response.headers.get("content-type", "")
else str(e))
raise RequestError(f"Server error {e.response.status_code}: {error_msg}")
async def crawl(
self,
urls: List[str],
browser_config: Optional[BrowserConfig] = None,
crawler_config: Optional[CrawlerRunConfig] = None
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
"""Execute a crawl operation."""
await self._check_server()
data = self._prepare_request(urls, browser_config, crawler_config)
is_streaming = crawler_config and crawler_config.stream
self.logger.info(f"Crawling {len(urls)} URLs {'(streaming)' if is_streaming else ''}", tag="CRAWL")
if is_streaming:
# For streaming, we need to return the async generator properly
# The caller should be able to do: async for result in await client.crawl(...)
async def streaming_wrapper():
async for result in self._stream_crawl_results(data):
yield result
return streaming_wrapper()
response = await self._request("POST", "/crawl", json=data)
result_data = response.json()
if not result_data.get("success", False):
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
results = [CrawlResult(**r) for r in result_data.get("results", [])]
self.logger.success(f"Crawl completed with {len(results)} results", tag="CRAWL")
return results[0] if len(results) == 1 else results
async def _stream_crawl_results(self, data: Dict[str, Any]) -> AsyncGenerator[CrawlResult, None]:
"""Internal method to handle streaming crawl results."""
async with self._http_client.stream("POST", f"{self.base_url}/crawl/stream", json=data) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if line.strip():
try:
result = json.loads(line)
if "error" in result:
self.logger.error_status(url=result.get("url", "unknown"), error=result["error"])
continue
# Check if this is a crawl result (has required fields)
if "url" in result and "success" in result:
self.logger.url_status(url=result.get("url", "unknown"), success=result.get("success", False), timing=result.get("timing", 0.0))
# Create CrawlResult object properly
crawl_result = CrawlResult(**result)
yield crawl_result
# Skip status-only messages
elif result.get("status") == "completed":
continue
except json.JSONDecodeError as e:
self.logger.error(f"Failed to parse streaming response: {e}", tag="STREAM")
continue
except Exception as e:
self.logger.error(f"Error processing streaming result: {e}", tag="STREAM")
continue
async def get_schema(self) -> Dict[str, Any]:
"""Retrieve configuration schemas."""
response = await self._request("GET", "/schema")
return response.json()
async def close(self) -> None:
"""Close the HTTP client session."""
self.logger.info("Closing client", tag="CLOSE")
await self._http_client.aclose()
async def __aenter__(self) -> "Crawl4aiDockerClient":
return self
async def __aexit__(self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Optional[Any]) -> None:
await self.close()
# Example usage
async def main():
async with Crawl4aiDockerClient(verbose=True) as client:
await client.authenticate("user@example.com")
result = await client.crawl(["https://example.com"])
print(result)
schema = await client.get_schema()
print(schema)
if __name__ == "__main__":
asyncio.run(main())

File diff suppressed because it is too large Load Diff

View File

@@ -510,7 +510,6 @@ class HTML2Text(html.parser.HTMLParser):
if tag == "a" and not self.ignore_links:
if start:
self.inside_link = True
if (
"href" in attrs
and attrs["href"] is not None
@@ -527,7 +526,6 @@ class HTML2Text(html.parser.HTMLParser):
else:
self.astack.append(None)
else:
self.inside_link = False
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link and not self.empty_link:
@@ -612,22 +610,13 @@ class HTML2Text(html.parser.HTMLParser):
self.o("[" + str(a_props.count) + "]")
if tag == "dl" and start:
self.p() # Add paragraph break before list starts
self.p_p = 0 # Reset paragraph state
elif tag == "dt" and start:
if self.p_p == 0: # If not first term
self.o("\n\n") # Add spacing before new term-definition pair
self.p_p = 0 # Reset paragraph state
elif tag == "dt" and not start:
self.o("\n") # Single newline between term and definition
elif tag == "dd" and start:
self.o(" ") # Indent definition
elif tag == "dd" and not start:
self.p_p = 0
self.p()
if tag == "dt" and not start:
self.pbr()
if tag == "dd" and start:
self.o(" ")
if tag == "dd" and not start:
self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
@@ -1037,7 +1026,6 @@ class CustomHTML2Text(HTML2Text):
super().__init__(*args, **kwargs)
self.inside_pre = False
self.inside_code = False
self.inside_link = False
self.preserve_tags = set() # Set of tags to preserve
self.current_preserved_tag = None
self.preserved_content = []
@@ -1117,17 +1105,11 @@ class CustomHTML2Text(HTML2Text):
# Ignore code tags inside pre blocks if handle_code_in_pre is False
return
if start:
if not self.inside_link:
self.o("`") # Only output backtick if not inside a link
self.o("`") # Markdown inline code start
self.inside_code = True
else:
if not self.inside_link:
self.o("`") # Only output backtick if not inside a link
self.o("`") # Markdown inline code end
self.inside_code = False
# If inside a link, let the parent class handle the content
if self.inside_link:
super().handle_tag(tag, attrs, start)
else:
super().handle_tag(tag, attrs, start)

View File

@@ -1,69 +0,0 @@
# crawl4ai/hub.py
from abc import ABC, abstractmethod
from typing import Dict, Type, Union
import logging
import importlib
from pathlib import Path
import inspect
logger = logging.getLogger(__name__)
class BaseCrawler(ABC):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
@abstractmethod
async def run(self, url: str = "", **kwargs) -> str:
"""
Implement this method to return JSON string.
Must accept URL + arbitrary kwargs for flexibility.
"""
pass
def __init_subclass__(cls, **kwargs):
"""Enforce interface validation on subclassing"""
super().__init_subclass__(**kwargs)
# Verify run method signature
run_method = cls.run
if not run_method.__code__.co_argcount >= 2: # self + url
raise TypeError(f"{cls.__name__} must implement 'run(self, url: str, **kwargs)'")
# Verify async nature
if not inspect.iscoroutinefunction(run_method):
raise TypeError(f"{cls.__name__}.run must be async")
class CrawlerHub:
_crawlers: Dict[str, Type[BaseCrawler]] = {}
@classmethod
def _discover_crawlers(cls):
"""Dynamically load crawlers from /crawlers in 3 lines"""
base_path = Path(__file__).parent / "crawlers"
for crawler_dir in base_path.iterdir():
if crawler_dir.is_dir():
try:
module = importlib.import_module(
f"crawl4ai.crawlers.{crawler_dir.name}.crawler"
)
for attr in dir(module):
cls._maybe_register_crawler(
getattr(module, attr), crawler_dir.name
)
except Exception as e:
logger.warning(f"Failed {crawler_dir.name}: {str(e)}")
@classmethod
def _maybe_register_crawler(cls, obj, name: str):
"""Brilliant one-liner registration"""
if isinstance(obj, type) and issubclass(obj, BaseCrawler) and obj != BaseCrawler:
module = importlib.import_module(obj.__module__)
obj.meta = getattr(module, "__meta__", {})
cls._crawlers[name] = obj
@classmethod
def get(cls, name: str) -> Union[Type[BaseCrawler], None]:
if not cls._crawlers:
cls._discover_crawlers()
return cls._crawlers.get(name)

View File

@@ -2,93 +2,17 @@ import subprocess
import sys
import asyncio
from .async_logger import AsyncLogger, LogLevel
from pathlib import Path
import os
import shutil
# Initialize logger
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
def setup_home_directory():
"""Set up the .crawl4ai folder structure in the user's home directory."""
base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY")
crawl4ai_folder = Path(base_dir) if base_dir else Path.home()
crawl4ai_config = crawl4ai_folder / "global.yml"
crawl4ai_folder = crawl4ai_folder / ".crawl4ai"
cache_folder = crawl4ai_folder / "cache"
content_folders = [
"html_content",
"cleaned_html",
"markdown_content",
"extracted_content",
"screenshots",
]
# Clean up old cache if exists
if cache_folder.exists():
shutil.rmtree(cache_folder)
# Create new folder structure
crawl4ai_folder.mkdir(exist_ok=True)
cache_folder.mkdir(exist_ok=True)
for folder in content_folders:
(crawl4ai_folder / folder).mkdir(exist_ok=True)
# If config file does not exist, create it
if not crawl4ai_config.exists():
with open(crawl4ai_config, "w") as f:
f.write("")
def post_install():
"""
Run all post-installation tasks.
Checks CRAWL4AI_MODE environment variable. If set to 'api',
skips Playwright browser installation.
"""
"""Run all post-installation tasks"""
logger.info("Running post-installation setup...", tag="INIT")
setup_home_directory()
# Check environment variable to conditionally skip Playwright install
run_mode = os.getenv('CRAWL4AI_MODE')
if run_mode == 'api':
logger.warning(
"CRAWL4AI_MODE=api detected. Skipping Playwright browser installation.",
tag="SETUP"
)
else:
# Proceed with installation only if mode is not 'api'
install_playwright()
install_playwright()
run_migration()
# TODO: Will be added in the future
# setup_builtin_browser()
logger.success("Post-installation setup completed!", tag="COMPLETE")
def setup_builtin_browser():
"""Set up a builtin browser for use with Crawl4AI"""
try:
logger.info("Setting up builtin browser...", tag="INIT")
asyncio.run(_setup_builtin_browser())
logger.success("Builtin browser setup completed!", tag="COMPLETE")
except Exception as e:
logger.warning(f"Failed to set up builtin browser: {e}")
logger.warning("You can manually set up a builtin browser using 'crawl4ai-doctor builtin-browser-start'")
async def _setup_builtin_browser():
try:
# Import BrowserProfiler here to avoid circular imports
from .browser_profiler import BrowserProfiler
profiler = BrowserProfiler(logger=logger)
# Launch the builtin browser
cdp_url = await profiler.launch_builtin_browser(headless=True)
if cdp_url:
logger.success(f"Builtin browser launched at {cdp_url}", tag="BROWSER")
else:
logger.warning("Failed to launch builtin browser", tag="BROWSER")
except Exception as e:
logger.warning(f"Error setting up builtin browser: {e}", tag="BROWSER")
raise
def install_playwright():
@@ -119,32 +43,6 @@ def install_playwright():
logger.warning(
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
)
# Install Patchright browsers for undetected browser support
logger.info("Installing Patchright browsers for undetected mode...", tag="INIT")
try:
subprocess.check_call(
[
sys.executable,
"-m",
"patchright",
"install",
"--with-deps",
"--force",
"chromium",
]
)
logger.success(
"Patchright installation completed successfully.", tag="COMPLETE"
)
except subprocess.CalledProcessError:
logger.warning(
f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation."
)
except Exception:
logger.warning(
f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation."
)
def run_migration():
@@ -208,5 +106,4 @@ def doctor():
"""Entry point for the doctor command"""
import asyncio
asyncio.run(run_doctor())
sys.exit(0)
return asyncio.run(run_doctor())

View File

@@ -115,6 +115,5 @@ async () => {
document.body.style.overflow = "auto";
// Wait a bit for any animations to complete
document.body.scrollIntoView(false);
await new Promise((resolve) => setTimeout(resolve, 50));
await new Promise((resolve) => setTimeout(resolve, 100));
};

View File

@@ -1,123 +0,0 @@
import click
import sys
import asyncio
from typing import List
from .docs_manager import DocsManager
from .async_logger import AsyncLogger
logger = AsyncLogger(verbose=True)
docs_manager = DocsManager(logger)
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
"""Print formatted table with headers and rows"""
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+"
def format_row(row):
return (
"|"
+ "|".join(
f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
for cell, w in zip(row, widths)
)
+ "|"
)
click.echo(border)
click.echo(format_row(headers))
click.echo(border)
for row in rows:
click.echo(format_row(row))
click.echo(border)
@click.group()
def cli():
"""Crawl4AI Command Line Interface"""
pass
@cli.group()
def docs():
"""Documentation operations"""
pass
@docs.command()
@click.argument("sections", nargs=-1)
@click.option(
"--mode", type=click.Choice(["extended", "condensed"]), default="extended"
)
def combine(sections: tuple, mode: str):
"""Combine documentation sections"""
try:
asyncio.run(docs_manager.ensure_docs_exist())
click.echo(docs_manager.generate(sections, mode))
except Exception as e:
logger.error(str(e), tag="ERROR")
sys.exit(1)
@docs.command()
@click.argument("query")
@click.option("--top-k", "-k", default=5)
@click.option("--build-index", is_flag=True, help="Build index if missing")
def search(query: str, top_k: int, build_index: bool):
"""Search documentation"""
try:
result = docs_manager.search(query, top_k)
if result == "No search index available. Call build_search_index() first.":
if build_index or click.confirm("No search index found. Build it now?"):
asyncio.run(docs_manager.llm_text.generate_index_files())
result = docs_manager.search(query, top_k)
click.echo(result)
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
@docs.command()
def update():
"""Update docs from GitHub"""
try:
asyncio.run(docs_manager.fetch_docs())
click.echo("Documentation updated successfully")
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
@docs.command()
@click.option("--force-facts", is_flag=True, help="Force regenerate fact files")
@click.option("--clear-cache", is_flag=True, help="Clear BM25 cache")
def index(force_facts: bool, clear_cache: bool):
"""Build or rebuild search indexes"""
try:
asyncio.run(docs_manager.ensure_docs_exist())
asyncio.run(
docs_manager.llm_text.generate_index_files(
force_generate_facts=force_facts, clear_bm25_cache=clear_cache
)
)
click.echo("Search indexes built successfully")
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
# Add docs list command
@docs.command()
def list():
"""List available documentation sections"""
try:
sections = docs_manager.list()
print_table(["Sections"], [[section] for section in sections])
except Exception as e:
click.echo(f"Error: {str(e)}", err=True)
sys.exit(1)
if __name__ == "__main__":
cli()

View File

@@ -1,395 +0,0 @@
"""
Link Extractor for Crawl4AI
Extracts head content from links discovered during crawling using URLSeeder's
efficient parallel processing and caching infrastructure.
"""
import asyncio
import fnmatch
from typing import Dict, List, Optional, Any
from .async_logger import AsyncLogger
from .async_url_seeder import AsyncUrlSeeder
from .async_configs import SeedingConfig, CrawlerRunConfig
from .models import Links, Link
from .utils import calculate_total_score
class LinkPreview:
"""
Extracts head content from links using URLSeeder's parallel processing infrastructure.
This class provides intelligent link filtering and head content extraction with:
- Pattern-based inclusion/exclusion filtering
- Parallel processing with configurable concurrency
- Caching for performance
- BM25 relevance scoring
- Memory-safe processing for large link sets
"""
def __init__(self, logger: Optional[AsyncLogger] = None):
"""
Initialize the LinkPreview.
Args:
logger: Optional logger instance for recording events
"""
self.logger = logger
self.seeder: Optional[AsyncUrlSeeder] = None
self._owns_seeder = False
async def __aenter__(self):
"""Async context manager entry."""
await self.start()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit."""
await self.close()
async def start(self):
"""Initialize the URLSeeder instance."""
if not self.seeder:
self.seeder = AsyncUrlSeeder(logger=self.logger)
await self.seeder.__aenter__()
self._owns_seeder = True
async def close(self):
"""Clean up resources."""
if self.seeder and self._owns_seeder:
await self.seeder.__aexit__(None, None, None)
self.seeder = None
self._owns_seeder = False
def _log(self, level: str, message: str, tag: str = "LINK_EXTRACT", **kwargs):
"""Helper method to safely log messages."""
if self.logger:
log_method = getattr(self.logger, level, None)
if log_method:
log_method(message=message, tag=tag, params=kwargs.get('params', {}))
async def extract_link_heads(
self,
links: Links,
config: CrawlerRunConfig
) -> Links:
"""
Extract head content for filtered links and attach to Link objects.
Args:
links: Links object containing internal and external links
config: CrawlerRunConfig with link_preview_config settings
Returns:
Links object with head_data attached to filtered Link objects
"""
link_config = config.link_preview_config
# Ensure seeder is initialized
await self.start()
# Filter links based on configuration
filtered_urls = self._filter_links(links, link_config)
if not filtered_urls:
self._log("info", "No links matched filtering criteria")
return links
self._log("info", "Extracting head content for {count} filtered links",
params={"count": len(filtered_urls)})
# Extract head content using URLSeeder
head_results = await self._extract_heads_parallel(filtered_urls, link_config)
# Merge results back into Link objects
updated_links = self._merge_head_data(links, head_results, config)
self._log("info", "Completed head extraction for links, {success} successful",
params={"success": len([r for r in head_results if r.get("status") == "valid"])})
return updated_links
def _filter_links(self, links: Links, link_config: Dict[str, Any]) -> List[str]:
"""
Filter links based on configuration parameters.
Args:
links: Links object containing internal and external links
link_config: Configuration dictionary for link extraction
Returns:
List of filtered URL strings
"""
filtered_urls = []
# Include internal links if configured
if link_config.include_internal:
filtered_urls.extend([link.href for link in links.internal if link.href])
self._log("debug", "Added {count} internal links",
params={"count": len(links.internal)})
# Include external links if configured
if link_config.include_external:
filtered_urls.extend([link.href for link in links.external if link.href])
self._log("debug", "Added {count} external links",
params={"count": len(links.external)})
# Apply include patterns
include_patterns = link_config.include_patterns
if include_patterns:
filtered_urls = [
url for url in filtered_urls
if any(fnmatch.fnmatch(url, pattern) for pattern in include_patterns)
]
self._log("debug", "After include patterns: {count} links remain",
params={"count": len(filtered_urls)})
# Apply exclude patterns
exclude_patterns = link_config.exclude_patterns
if exclude_patterns:
filtered_urls = [
url for url in filtered_urls
if not any(fnmatch.fnmatch(url, pattern) for pattern in exclude_patterns)
]
self._log("debug", "After exclude patterns: {count} links remain",
params={"count": len(filtered_urls)})
# Limit number of links
max_links = link_config.max_links
if max_links > 0 and len(filtered_urls) > max_links:
filtered_urls = filtered_urls[:max_links]
self._log("debug", "Limited to {max_links} links",
params={"max_links": max_links})
# Remove duplicates while preserving order
seen = set()
unique_urls = []
for url in filtered_urls:
if url not in seen:
seen.add(url)
unique_urls.append(url)
self._log("debug", "Final filtered URLs: {count} unique links",
params={"count": len(unique_urls)})
return unique_urls
async def _extract_heads_parallel(
self,
urls: List[str],
link_config: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""
Extract head content for URLs using URLSeeder's parallel processing.
Args:
urls: List of URLs to process
link_config: Configuration dictionary for link extraction
Returns:
List of dictionaries with url, status, head_data, and optional relevance_score
"""
verbose = link_config.verbose
concurrency = link_config.concurrency
if verbose:
self._log("info", "Starting batch processing: {total} links with {concurrency} concurrent workers",
params={"total": len(urls), "concurrency": concurrency})
# Create SeedingConfig for URLSeeder
seeding_config = SeedingConfig(
extract_head=True,
concurrency=concurrency,
hits_per_sec=getattr(link_config, 'hits_per_sec', None),
query=link_config.query,
score_threshold=link_config.score_threshold,
scoring_method="bm25" if link_config.query else None,
verbose=verbose
)
# Use URLSeeder's extract_head_for_urls method with progress tracking
if verbose:
# Create a wrapper to track progress
results = await self._extract_with_progress(urls, seeding_config, link_config)
else:
results = await self.seeder.extract_head_for_urls(
urls=urls,
config=seeding_config,
concurrency=concurrency,
timeout=link_config.timeout
)
return results
async def _extract_with_progress(
self,
urls: List[str],
seeding_config: SeedingConfig,
link_config: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Extract head content with progress reporting."""
total_urls = len(urls)
concurrency = link_config.concurrency
batch_size = max(1, total_urls // 10) # Report progress every 10%
# Process URLs and track progress
completed = 0
successful = 0
failed = 0
# Create a custom progress tracking version
# We'll modify URLSeeder's method to include progress callbacks
# For now, let's use the existing method and report at the end
# In a production version, we would modify URLSeeder to accept progress callbacks
self._log("info", "Processing links in batches...")
# Use existing method
results = await self.seeder.extract_head_for_urls(
urls=urls,
config=seeding_config,
concurrency=concurrency,
timeout=link_config.timeout
)
# Count results
for result in results:
completed += 1
if result.get("status") == "valid":
successful += 1
else:
failed += 1
# Final progress report
self._log("info", "Batch processing completed: {completed}/{total} processed, {successful} successful, {failed} failed",
params={
"completed": completed,
"total": total_urls,
"successful": successful,
"failed": failed
})
return results
def _merge_head_data(
self,
original_links: Links,
head_results: List[Dict[str, Any]],
config: CrawlerRunConfig
) -> Links:
"""
Merge head extraction results back into Link objects.
Args:
original_links: Original Links object
head_results: Results from head extraction
Returns:
Links object with head_data attached to matching links
"""
# Create URL to head_data mapping
url_to_head_data = {}
for result in head_results:
url = result.get("url")
if url:
url_to_head_data[url] = {
"head_data": result.get("head_data", {}),
"status": result.get("status", "unknown"),
"error": result.get("error"),
"relevance_score": result.get("relevance_score")
}
# Update internal links
updated_internal = []
for link in original_links.internal:
if link.href in url_to_head_data:
head_info = url_to_head_data[link.href]
# Create new Link object with head data and scoring
contextual_score = head_info.get("relevance_score")
updated_link = Link(
href=link.href,
text=link.text,
title=link.title,
base_domain=link.base_domain,
head_data=head_info["head_data"],
head_extraction_status=head_info["status"],
head_extraction_error=head_info.get("error"),
intrinsic_score=getattr(link, 'intrinsic_score', None),
contextual_score=contextual_score
)
# Add relevance score to head_data for backward compatibility
if contextual_score is not None:
updated_link.head_data = updated_link.head_data or {}
updated_link.head_data["relevance_score"] = contextual_score
# Calculate total score combining intrinsic and contextual scores
updated_link.total_score = calculate_total_score(
intrinsic_score=updated_link.intrinsic_score,
contextual_score=updated_link.contextual_score,
score_links_enabled=getattr(config, 'score_links', False),
query_provided=bool(config.link_preview_config.query)
)
updated_internal.append(updated_link)
else:
# Keep original link unchanged
updated_internal.append(link)
# Update external links
updated_external = []
for link in original_links.external:
if link.href in url_to_head_data:
head_info = url_to_head_data[link.href]
# Create new Link object with head data and scoring
contextual_score = head_info.get("relevance_score")
updated_link = Link(
href=link.href,
text=link.text,
title=link.title,
base_domain=link.base_domain,
head_data=head_info["head_data"],
head_extraction_status=head_info["status"],
head_extraction_error=head_info.get("error"),
intrinsic_score=getattr(link, 'intrinsic_score', None),
contextual_score=contextual_score
)
# Add relevance score to head_data for backward compatibility
if contextual_score is not None:
updated_link.head_data = updated_link.head_data or {}
updated_link.head_data["relevance_score"] = contextual_score
# Calculate total score combining intrinsic and contextual scores
updated_link.total_score = calculate_total_score(
intrinsic_score=updated_link.intrinsic_score,
contextual_score=updated_link.contextual_score,
score_links_enabled=getattr(config, 'score_links', False),
query_provided=bool(config.link_preview_config.query)
)
updated_external.append(updated_link)
else:
# Keep original link unchanged
updated_external.append(link)
# Sort links by relevance score if available
if any(hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data
for link in updated_internal + updated_external):
def get_relevance_score(link):
if hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data:
return link.head_data['relevance_score']
return 0.0
updated_internal.sort(key=get_relevance_score, reverse=True)
updated_external.sort(key=get_relevance_score, reverse=True)
return Links(
internal=updated_internal,
external=updated_external
)

View File

@@ -2,7 +2,6 @@ from abc import ABC, abstractmethod
from typing import Optional, Dict, Any, Tuple
from .models import MarkdownGenerationResult
from .html2text import CustomHTML2Text
# from .types import RelevantContentFilter
from .content_filter_strategy import RelevantContentFilter
import re
from urllib.parse import urljoin
@@ -30,25 +29,21 @@ class MarkdownGenerationStrategy(ABC):
self,
content_filter: Optional[RelevantContentFilter] = None,
options: Optional[Dict[str, Any]] = None,
verbose: bool = False,
content_source: str = "cleaned_html",
):
self.content_filter = content_filter
self.options = options or {}
self.verbose = verbose
self.content_source = content_source
@abstractmethod
def generate_markdown(
self,
input_html: str,
cleaned_html: str,
base_url: str = "",
html2text_options: Optional[Dict[str, Any]] = None,
content_filter: Optional[RelevantContentFilter] = None,
citations: bool = True,
**kwargs,
) -> MarkdownGenerationResult:
"""Generate markdown from the selected input HTML."""
"""Generate markdown from cleaned HTML."""
pass
@@ -65,7 +60,6 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
Args:
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html".
Returns:
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
@@ -75,9 +69,8 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
self,
content_filter: Optional[RelevantContentFilter] = None,
options: Optional[Dict[str, Any]] = None,
content_source: str = "cleaned_html",
):
super().__init__(content_filter, options, verbose=False, content_source=content_source)
super().__init__(content_filter, options)
def convert_links_to_citations(
self, markdown: str, base_url: str = ""
@@ -147,7 +140,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
def generate_markdown(
self,
input_html: str,
cleaned_html: str,
base_url: str = "",
html2text_options: Optional[Dict[str, Any]] = None,
options: Optional[Dict[str, Any]] = None,
@@ -156,16 +149,16 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
**kwargs,
) -> MarkdownGenerationResult:
"""
Generate markdown with citations from the provided input HTML.
Generate markdown with citations from cleaned HTML.
How it works:
1. Generate raw markdown from the input HTML.
1. Generate raw markdown from cleaned HTML.
2. Convert links to citations.
3. Generate fit markdown if content filter is provided.
4. Return MarkdownGenerationResult.
Args:
input_html (str): The HTML content to process (selected based on content_source).
cleaned_html (str): Cleaned HTML content.
base_url (str): Base URL for URL joins.
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
@@ -183,7 +176,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
"ignore_emphasis": False,
"ignore_links": False,
"ignore_images": False,
"protect_links": False,
"protect_links": True,
"single_line_break": True,
"mark_code": True,
"escape_snob": False,
@@ -200,14 +193,14 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
h.update_params(**default_options)
# Ensure we have valid input
if not input_html:
input_html = ""
elif not isinstance(input_html, str):
input_html = str(input_html)
if not cleaned_html:
cleaned_html = ""
elif not isinstance(cleaned_html, str):
cleaned_html = str(cleaned_html)
# Generate raw markdown
try:
raw_markdown = h.handle(input_html)
raw_markdown = h.handle(cleaned_html)
except Exception as e:
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
@@ -232,7 +225,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
if content_filter or self.content_filter:
try:
content_filter = content_filter or self.content_filter
filtered_html = content_filter.filter_content(input_html)
filtered_html = content_filter.filter_content(cleaned_html)
filtered_html = "\n".join(
"<div>{}</div>".format(s) for s in filtered_html
)

View File

@@ -1,79 +0,0 @@
import psutil
import platform
import subprocess
from typing import Tuple
def get_true_available_memory_gb() -> float:
"""Get truly available memory including inactive pages (cross-platform)"""
vm = psutil.virtual_memory()
if platform.system() == 'Darwin': # macOS
# On macOS, we need to include inactive memory too
try:
# Use vm_stat to get accurate values
result = subprocess.run(['vm_stat'], capture_output=True, text=True)
lines = result.stdout.split('\n')
page_size = 16384 # macOS page size
pages = {}
for line in lines:
if 'Pages free:' in line:
pages['free'] = int(line.split()[-1].rstrip('.'))
elif 'Pages inactive:' in line:
pages['inactive'] = int(line.split()[-1].rstrip('.'))
elif 'Pages speculative:' in line:
pages['speculative'] = int(line.split()[-1].rstrip('.'))
elif 'Pages purgeable:' in line:
pages['purgeable'] = int(line.split()[-1].rstrip('.'))
# Calculate total available (free + inactive + speculative + purgeable)
total_available_pages = (
pages.get('free', 0) +
pages.get('inactive', 0) +
pages.get('speculative', 0) +
pages.get('purgeable', 0)
)
available_gb = (total_available_pages * page_size) / (1024**3)
return available_gb
except:
# Fallback to psutil
return vm.available / (1024**3)
else:
# For Windows and Linux, psutil.available is accurate
return vm.available / (1024**3)
def get_true_memory_usage_percent() -> float:
"""
Get memory usage percentage that accounts for platform differences.
Returns:
float: Memory usage percentage (0-100)
"""
vm = psutil.virtual_memory()
total_gb = vm.total / (1024**3)
available_gb = get_true_available_memory_gb()
# Calculate used percentage based on truly available memory
used_percent = 100.0 * (total_gb - available_gb) / total_gb
# Ensure it's within valid range
return max(0.0, min(100.0, used_percent))
def get_memory_stats() -> Tuple[float, float, float]:
"""
Get comprehensive memory statistics.
Returns:
Tuple[float, float, float]: (used_percent, available_gb, total_gb)
"""
vm = psutil.virtual_memory()
total_gb = vm.total / (1024**3)
available_gb = get_true_available_memory_gb()
used_percent = get_true_memory_usage_percent()
return used_percent, available_gb, total_gb

View File

@@ -1,46 +1,12 @@
"""
Crawl4AI Models Module
This module contains Pydantic models used throughout the Crawl4AI library.
Key Features:
- ORJSONModel: Base model with ORJSON serialization support
- DeprecatedPropertiesMixin: Global system for handling deprecated properties
- CrawlResult: Main result model with backward compatibility support
Deprecated Properties System:
The DeprecatedPropertiesMixin provides a global way to handle deprecated properties
across all models. Instead of manually excluding deprecated properties in each
model_dump() call, you can simply override the get_deprecated_properties() method:
Example:
class MyModel(ORJSONModel):
name: str
old_field: Optional[str] = None
def get_deprecated_properties(self) -> set[str]:
return {'old_field', 'another_deprecated_field'}
@property
def old_field(self):
raise AttributeError("old_field is deprecated, use name instead")
The system automatically excludes these properties from serialization, preventing
property objects from appearing in JSON output.
"""
from pydantic import BaseModel, ConfigDict,HttpUrl, PrivateAttr, Field
from pydantic import BaseModel, HttpUrl
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
from typing import AsyncGenerator
from typing import Generic, TypeVar
from enum import Enum
from dataclasses import dataclass
from .ssl_certificate import SSLCertificate
from datetime import datetime
from datetime import timedelta
import orjson
###############################
# Dispatcher Models
###############################
@@ -58,15 +24,10 @@ class CrawlerTaskResult:
result: "CrawlResult"
memory_usage: float
peak_memory: float
start_time: Union[datetime, float]
end_time: Union[datetime, float]
start_time: datetime
end_time: datetime
error_message: str = ""
retry_count: int = 0
wait_time: float = 0.0
@property
def success(self) -> bool:
return self.result.success
class CrawlStatus(Enum):
QUEUED = "QUEUED"
@@ -74,39 +35,27 @@ class CrawlStatus(Enum):
COMPLETED = "COMPLETED"
FAILED = "FAILED"
@dataclass
class CrawlStats:
task_id: str
url: str
status: CrawlStatus
start_time: Optional[Union[datetime, float]] = None
end_time: Optional[Union[datetime, float]] = None
start_time: Optional[datetime] = None
end_time: Optional[datetime] = None
memory_usage: float = 0.0
peak_memory: float = 0.0
error_message: str = ""
wait_time: float = 0.0
retry_count: int = 0
counted_requeue: bool = False
@property
def duration(self) -> str:
if not self.start_time:
return "0:00"
# Convert start_time to datetime if it's a float
start = self.start_time
if isinstance(start, float):
start = datetime.fromtimestamp(start)
# Get end time or use current time
end = self.end_time or datetime.now()
# Convert end_time to datetime if it's a float
if isinstance(end, float):
end = datetime.fromtimestamp(end)
duration = end - start
duration = end - self.start_time
return str(timedelta(seconds=int(duration.total_seconds())))
class DisplayMode(Enum):
DETAILED = "DETAILED"
AGGREGATED = "AGGREGATED"
@@ -124,169 +73,42 @@ class TokenUsage:
prompt_tokens_details: Optional[dict] = None
def orjson_default(obj):
# Handle datetime (if not already handled by orjson)
if isinstance(obj, datetime):
return obj.isoformat()
# Handle property objects (convert to string or something else)
if isinstance(obj, property):
return str(obj)
# Last resort: convert to string
return str(obj)
class DeprecatedPropertiesMixin:
"""
Mixin to handle deprecated properties in Pydantic models.
Classes that inherit from this mixin can define deprecated properties
that will be automatically excluded from serialization.
Usage:
1. Override the get_deprecated_properties() method to return a set of deprecated property names
2. The model_dump method will automatically exclude these properties
Example:
class MyModel(ORJSONModel):
def get_deprecated_properties(self) -> set[str]:
return {'old_field', 'legacy_property'}
name: str
old_field: Optional[str] = None # Field definition
@property
def old_field(self): # Property that overrides the field
raise AttributeError("old_field is deprecated, use name instead")
"""
def get_deprecated_properties(self) -> set[str]:
"""
Get deprecated property names for this model.
Override this method in subclasses to define deprecated properties.
Returns:
set[str]: Set of deprecated property names
"""
return set()
@classmethod
def get_all_deprecated_properties(cls) -> set[str]:
"""
Get all deprecated properties from this class and all parent classes.
Returns:
set[str]: Set of all deprecated property names
"""
deprecated_props = set()
# Create an instance to call the instance method
try:
# Try to create a dummy instance to get deprecated properties
dummy_instance = cls.__new__(cls)
deprecated_props.update(dummy_instance.get_deprecated_properties())
except Exception:
# If we can't create an instance, check for class-level definitions
pass
# Also check parent classes
for klass in cls.__mro__:
if hasattr(klass, 'get_deprecated_properties') and klass != DeprecatedPropertiesMixin:
try:
dummy_instance = klass.__new__(klass)
deprecated_props.update(dummy_instance.get_deprecated_properties())
except Exception:
pass
return deprecated_props
def model_dump(self, *args, **kwargs):
"""
Override model_dump to automatically exclude deprecated properties.
This method:
1. Gets the existing exclude set from kwargs
2. Adds all deprecated properties defined in get_deprecated_properties()
3. Calls the parent model_dump with the updated exclude set
"""
# Get the default exclude set, or create empty set if None
exclude = kwargs.get('exclude', set())
if exclude is None:
exclude = set()
elif not isinstance(exclude, set):
exclude = set(exclude) if exclude else set()
# Add deprecated properties for this instance
exclude.update(self.get_deprecated_properties())
kwargs['exclude'] = exclude
return super().model_dump(*args, **kwargs)
class ORJSONModel(DeprecatedPropertiesMixin, BaseModel):
model_config = ConfigDict(
ser_json_timedelta="iso8601", # Optional: format timedelta
ser_json_bytes="utf8", # Optional: bytes → UTF-8 string
)
def model_dump_json(self, **kwargs) -> bytes:
"""Custom JSON serialization using orjson"""
return orjson.dumps(self.model_dump(**kwargs), default=orjson_default)
@classmethod
def model_validate_json(cls, json_data: Union[str, bytes], **kwargs):
"""Custom JSON deserialization using orjson"""
if isinstance(json_data, str):
json_data = json_data.encode()
return cls.model_validate(orjson.loads(json_data), **kwargs)
class UrlModel(ORJSONModel):
class UrlModel(BaseModel):
url: HttpUrl
forced: bool = False
@dataclass
class TraversalStats:
"""Statistics for the traversal process"""
start_time: datetime = datetime.now()
urls_processed: int = 0
urls_failed: int = 0
urls_skipped: int = 0
total_depth_reached: int = 0
current_depth: int = 0
class DispatchResult(ORJSONModel):
task_id: str
memory_usage: float
peak_memory: float
start_time: Union[datetime, float]
end_time: Union[datetime, float]
error_message: str = ""
class MarkdownGenerationResult(ORJSONModel):
class MarkdownGenerationResult(BaseModel):
raw_markdown: str
markdown_with_citations: str
references_markdown: str
fit_markdown: Optional[str] = None
fit_html: Optional[str] = None
def __str__(self):
return self.raw_markdown
class CrawlResult(ORJSONModel):
class DispatchResult(BaseModel):
task_id: str
memory_usage: float
peak_memory: float
start_time: datetime
end_time: datetime
error_message: str = ""
class CrawlResult(BaseModel):
url: str
html: str
fit_html: Optional[str] = None
success: bool
cleaned_html: Optional[str] = None
media: Dict[str, List[Dict]] = {}
links: Dict[str, List[Dict]] = {}
downloaded_files: Optional[List[str]] = None
js_execution_result: Optional[Dict[str, Any]] = None
screenshot: Optional[str] = None
pdf: Optional[bytes] = None
mhtml: Optional[str] = None
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
markdown: Optional[Union[str, MarkdownGenerationResult]] = None
markdown_v2: Optional[MarkdownGenerationResult] = None
fit_markdown: Optional[str] = None
fit_html: Optional[str] = None
extracted_content: Optional[str] = None
metadata: Optional[dict] = None
error_message: Optional[str] = None
@@ -296,194 +118,31 @@ class CrawlResult(ORJSONModel):
ssl_certificate: Optional[SSLCertificate] = None
dispatch_result: Optional[DispatchResult] = None
redirected_url: Optional[str] = None
network_requests: Optional[List[Dict[str, Any]]] = None
console_messages: Optional[List[Dict[str, Any]]] = None
tables: List[Dict] = Field(default_factory=list) # NEW [{headers,rows,caption,summary}]
class Config:
arbitrary_types_allowed = True
def get_deprecated_properties(self) -> set[str]:
"""Define deprecated properties that should be excluded from serialization."""
return {'fit_html', 'fit_markdown', 'markdown_v2'}
# NOTE: The StringCompatibleMarkdown class, custom __init__ method, property getters/setters,
# and model_dump override all exist to support a smooth transition from markdown as a string
# to markdown as a MarkdownGenerationResult object, while maintaining backward compatibility.
#
# This allows code that expects markdown to be a string to continue working, while also
# providing access to the full MarkdownGenerationResult object's properties.
#
# The markdown_v2 property is deprecated and raises an error directing users to use markdown.
#
# When backward compatibility is no longer needed in future versions, this entire mechanism
# can be simplified to a standard field with no custom accessors or serialization logic.
def __init__(self, **data):
markdown_result = data.pop('markdown', None)
super().__init__(**data)
if markdown_result is not None:
self._markdown = (
MarkdownGenerationResult(**markdown_result)
if isinstance(markdown_result, dict)
else markdown_result
)
@property
def markdown(self):
"""
Property that returns a StringCompatibleMarkdown object that behaves like
a string but also provides access to MarkdownGenerationResult attributes.
This approach allows backward compatibility with code that expects 'markdown'
to be a string, while providing access to the full MarkdownGenerationResult.
"""
if self._markdown is None:
return None
return StringCompatibleMarkdown(self._markdown)
@markdown.setter
def markdown(self, value):
"""
Setter for the markdown property.
"""
self._markdown = value
@property
def markdown_v2(self):
"""
Deprecated property that raises an AttributeError when accessed.
This property exists to inform users that 'markdown_v2' has been
deprecated and they should use 'markdown' instead.
"""
raise AttributeError(
"The 'markdown_v2' attribute is deprecated and has been removed. "
"""Please use 'markdown' instead, which now returns a MarkdownGenerationResult, with
following properties:
- raw_markdown: The raw markdown string
- markdown_with_citations: The markdown string with citations
- references_markdown: The markdown string with references
- fit_markdown: The markdown string with fit text
"""
)
@property
def fit_markdown(self):
"""
Deprecated property that raises an AttributeError when accessed.
"""
raise AttributeError(
"The 'fit_markdown' attribute is deprecated and has been removed. "
"Please use 'markdown.fit_markdown' instead."
)
@property
def fit_html(self):
"""
Deprecated property that raises an AttributeError when accessed.
"""
raise AttributeError(
"The 'fit_html' attribute is deprecated and has been removed. "
"Please use 'markdown.fit_html' instead."
)
def model_dump(self, *args, **kwargs):
"""
Override model_dump to include the _markdown private attribute in serialization.
This override is necessary because:
1. PrivateAttr fields are excluded from serialization by default
2. We need to maintain backward compatibility by including the 'markdown' field
in the serialized output
3. Uses the DeprecatedPropertiesMixin to automatically exclude deprecated properties
Future developers: This method ensures that the markdown content is properly
serialized despite being stored in a private attribute. The deprecated properties
are automatically handled by the mixin.
"""
# Use the parent class method which handles deprecated properties automatically
result = super().model_dump(*args, **kwargs)
# Add the markdown content if it exists
if self._markdown is not None:
result["markdown"] = self._markdown.model_dump()
return result
class StringCompatibleMarkdown(str):
"""A string subclass that also provides access to MarkdownGenerationResult attributes"""
def __new__(cls, markdown_result):
return super().__new__(cls, markdown_result.raw_markdown)
def __init__(self, markdown_result):
self._markdown_result = markdown_result
def __getattr__(self, name):
return getattr(self._markdown_result, name)
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
class CrawlResultContainer(Generic[CrawlResultT]):
def __init__(self, results: Union[CrawlResultT, List[CrawlResultT]]):
# Normalize to a list
if isinstance(results, list):
self._results = results
else:
self._results = [results]
def __iter__(self):
return iter(self._results)
def __getitem__(self, index):
return self._results[index]
def __len__(self):
return len(self._results)
def __getattr__(self, attr):
# Delegate attribute access to the first element.
if self._results:
return getattr(self._results[0], attr)
raise AttributeError(f"{self.__class__.__name__} object has no attribute '{attr}'")
def __repr__(self):
return f"{self.__class__.__name__}({self._results!r})"
RunManyReturn = Union[
CrawlResultContainer[CrawlResultT],
AsyncGenerator[CrawlResultT, None]
]
# END of backward compatibility code for markdown/markdown_v2.
# When removing this code in the future, make sure to:
# 1. Replace the private attribute and property with a standard field
# 2. Update any serialization logic that might depend on the current behavior
class AsyncCrawlResponse(ORJSONModel):
class AsyncCrawlResponse(BaseModel):
html: str
response_headers: Dict[str, str]
js_execution_result: Optional[Dict[str, Any]] = None
status_code: int
screenshot: Optional[str] = None
pdf_data: Optional[bytes] = None
mhtml_data: Optional[str] = None
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
downloaded_files: Optional[List[str]] = None
ssl_certificate: Optional[SSLCertificate] = None
redirected_url: Optional[str] = None
network_requests: Optional[List[Dict[str, Any]]] = None
console_messages: Optional[List[Dict[str, Any]]] = None
class Config:
arbitrary_types_allowed = True
###############################
# Scraping Models
###############################
class MediaItem(ORJSONModel):
class MediaItem(BaseModel):
src: Optional[str] = ""
data: Optional[str] = ""
alt: Optional[str] = ""
desc: Optional[str] = ""
score: Optional[int] = 0
@@ -493,20 +152,14 @@ class MediaItem(ORJSONModel):
width: Optional[int] = None
class Link(ORJSONModel):
class Link(BaseModel):
href: Optional[str] = ""
text: Optional[str] = ""
title: Optional[str] = ""
base_domain: Optional[str] = ""
head_data: Optional[Dict[str, Any]] = None # Head metadata extracted from link target
head_extraction_status: Optional[str] = None # "success", "failed", "skipped"
head_extraction_error: Optional[str] = None # Error message if extraction failed
intrinsic_score: Optional[float] = None # Quality score based on URL structure, text, and context
contextual_score: Optional[float] = None # BM25 relevance score based on query and head content
total_score: Optional[float] = None # Combined score from intrinsic and contextual scores
class Media(ORJSONModel):
class Media(BaseModel):
images: List[MediaItem] = []
videos: List[
MediaItem
@@ -514,15 +167,14 @@ class Media(ORJSONModel):
audios: List[
MediaItem
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
tables: List[Dict] = [] # Table data extracted from HTML tables
class Links(ORJSONModel):
class Links(BaseModel):
internal: List[Link] = []
external: List[Link] = []
class ScrapingResult(ORJSONModel):
class ScrapingResult(BaseModel):
cleaned_html: str
success: bool
media: Media = Media()

View File

@@ -1,195 +0,0 @@
from pathlib import Path
import asyncio
from dataclasses import asdict
from crawl4ai.async_logger import AsyncLogger
from crawl4ai.async_crawler_strategy import AsyncCrawlerStrategy
from crawl4ai.models import AsyncCrawlResponse, ScrapingResult
from crawl4ai.content_scraping_strategy import ContentScrapingStrategy
from .processor import NaivePDFProcessorStrategy # Assuming your current PDF code is in pdf_processor.py
class PDFCrawlerStrategy(AsyncCrawlerStrategy):
def __init__(self, logger: AsyncLogger = None):
self.logger = logger
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
# Just pass through with empty HTML - scraper will handle actual processing
return AsyncCrawlResponse(
html="Scraper will handle the real work", # Scraper will handle the real work
response_headers={"Content-Type": "application/pdf"},
status_code=200
)
async def close(self):
pass
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
class PDFContentScrapingStrategy(ContentScrapingStrategy):
"""
A content scraping strategy for PDF files.
Attributes:
save_images_locally (bool): Whether to save images locally.
extract_images (bool): Whether to extract images from PDF.
image_save_dir (str): Directory to save extracted images.
logger (AsyncLogger): Logger instance for recording events and errors.
Methods:
scrap(url: str, html: str, **params) -> ScrapingResult:
Scrap content from a PDF file.
ascrap(url: str, html: str, **kwargs) -> ScrapingResult:
Asynchronous version of scrap.
Usage:
strategy = PDFContentScrapingStrategy(
save_images_locally=False,
extract_images=False,
image_save_dir=None,
logger=logger
)
"""
def __init__(self,
save_images_locally : bool = False,
extract_images : bool = False,
image_save_dir : str = None,
batch_size: int = 4,
logger: AsyncLogger = None):
self.logger = logger
self.pdf_processor = NaivePDFProcessorStrategy(
save_images_locally=save_images_locally,
extract_images=extract_images,
image_save_dir=image_save_dir,
batch_size=batch_size
)
self._temp_files = [] # Track temp files for cleanup
def scrap(self, url: str, html: str, **params) -> ScrapingResult:
"""
Scrap content from a PDF file.
Args:
url (str): The URL of the PDF file.
html (str): The HTML content of the page.
**params: Additional parameters.
Returns:
ScrapingResult: The scraped content.
"""
# Download if URL or use local path
pdf_path = self._get_pdf_path(url)
try:
# Process PDF
# result = self.pdf_processor.process(Path(pdf_path))
result = self.pdf_processor.process_batch(Path(pdf_path))
# Combine page HTML
cleaned_html = f"""
<html>
<head><meta name="pdf-pages" content="{len(result.pages)}"></head>
<body>
{''.join(f'<div class="pdf-page" data-page="{i+1}">{page.html}</div>'
for i, page in enumerate(result.pages))}
</body>
</html>
"""
# Accumulate media and links with page numbers
media = {"images": []}
links = {"urls": []}
for page in result.pages:
# Add page number to each image
for img in page.images:
img["page"] = page.page_number
media["images"].append(img)
# Add page number to each link
for link in page.links:
links["urls"].append({
"url": link,
"page": page.page_number
})
return ScrapingResult(
cleaned_html=cleaned_html,
success=True,
media=media,
links=links,
metadata=asdict(result.metadata)
)
finally:
# Cleanup temp file if downloaded
if url.startswith(("http://", "https://")):
try:
Path(pdf_path).unlink(missing_ok=True)
if pdf_path in self._temp_files:
self._temp_files.remove(pdf_path)
except Exception as e:
if self.logger:
self.logger.warning(f"Failed to cleanup temp file {pdf_path}: {e}")
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
# For simple cases, you can use the sync version
return await asyncio.to_thread(self.scrap, url, html, **kwargs)
def _get_pdf_path(self, url: str) -> str:
if url.startswith(("http://", "https://")):
import tempfile
import requests
# Create temp file with .pdf extension
temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False)
self._temp_files.append(temp_file.name)
try:
if self.logger:
self.logger.info(f"Downloading PDF from {url}...")
# Download PDF with streaming and timeout
# Connection timeout: 10s, Read timeout: 300s (5 minutes for large PDFs)
response = requests.get(url, stream=True, timeout=(20, 60 * 10))
response.raise_for_status()
# Get file size if available
total_size = int(response.headers.get('content-length', 0))
downloaded = 0
# Write to temp file
with open(temp_file.name, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
downloaded += len(chunk)
if self.logger and total_size > 0:
progress = (downloaded / total_size) * 100
if progress % 10 < 0.1: # Log every 10%
self.logger.debug(f"PDF download progress: {progress:.0f}%")
if self.logger:
self.logger.info(f"PDF downloaded successfully: {temp_file.name}")
return temp_file.name
except requests.exceptions.Timeout as e:
# Clean up temp file if download fails
Path(temp_file.name).unlink(missing_ok=True)
self._temp_files.remove(temp_file.name)
raise RuntimeError(f"Timeout downloading PDF from {url}: {str(e)}")
except Exception as e:
# Clean up temp file if download fails
Path(temp_file.name).unlink(missing_ok=True)
self._temp_files.remove(temp_file.name)
raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}")
elif url.startswith("file://"):
return url[7:] # Strip file:// prefix
return url # Assume local path
__all__ = ["PDFCrawlerStrategy", "PDFContentScrapingStrategy"]

View File

@@ -1,487 +0,0 @@
import logging
import re
from abc import ABC, abstractmethod
from datetime import datetime
from pathlib import Path
from time import time
from dataclasses import dataclass, asdict, field
from typing import Dict, List, Optional, Any, Union
import base64
import tempfile
from .utils import *
from .utils import (
apply_png_predictor,
clean_pdf_text,
clean_pdf_text_to_html,
)
# Remove direct PyPDF2 imports from the top
# import PyPDF2
# from PyPDF2 import PdfReader
logger = logging.getLogger(__name__)
@dataclass
class PDFMetadata:
title: Optional[str] = None
author: Optional[str] = None
producer: Optional[str] = None
created: Optional[datetime] = None
modified: Optional[datetime] = None
pages: int = 0
encrypted: bool = False
file_size: Optional[int] = None
@dataclass
class PDFPage:
page_number: int
raw_text: str = ""
markdown: str = ""
html: str = ""
images: List[Dict] = field(default_factory=list)
links: List[str] = field(default_factory=list)
layout: List[Dict] = field(default_factory=list)
@dataclass
class PDFProcessResult:
metadata: PDFMetadata
pages: List[PDFPage]
processing_time: float = 0.0
version: str = "1.0"
class PDFProcessorStrategy(ABC):
@abstractmethod
def process(self, pdf_path: Path) -> PDFProcessResult:
pass
class NaivePDFProcessorStrategy(PDFProcessorStrategy):
def __init__(self, image_dpi: int = 144, image_quality: int = 85, extract_images: bool = True,
save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4):
# Import check at initialization time
try:
import PyPDF2
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
self.image_dpi = image_dpi
self.image_quality = image_quality
self.current_page_number = 0
self.extract_images = extract_images
self.save_images_locally = save_images_locally
self.image_save_dir = image_save_dir
self.batch_size = batch_size
self._temp_dir = None
def process(self, pdf_path: Path) -> PDFProcessResult:
# Import inside method to allow dependency to be optional
try:
from PyPDF2 import PdfReader
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
start_time = time()
result = PDFProcessResult(
metadata=PDFMetadata(),
pages=[],
version="1.1"
)
try:
with pdf_path.open('rb') as file:
reader = PdfReader(file)
result.metadata = self._extract_metadata(pdf_path, reader)
# Handle image directory
image_dir = None
if self.extract_images and self.save_images_locally:
if self.image_save_dir:
image_dir = Path(self.image_save_dir)
image_dir.mkdir(exist_ok=True, parents=True)
else:
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
image_dir = Path(self._temp_dir)
for page_num, page in enumerate(reader.pages):
self.current_page_number = page_num + 1
pdf_page = self._process_page(page, image_dir)
result.pages.append(pdf_page)
except Exception as e:
logger.error(f"Failed to process PDF: {str(e)}")
raise
finally:
# Cleanup temp directory if it was created
if self._temp_dir and not self.image_save_dir:
import shutil
try:
shutil.rmtree(self._temp_dir)
except Exception as e:
logger.error(f"Failed to cleanup temp directory: {str(e)}")
result.processing_time = time() - start_time
return result
def process_batch(self, pdf_path: Path) -> PDFProcessResult:
"""Like process() but processes PDF pages in parallel batches"""
# Import inside method to allow dependency to be optional
try:
from PyPDF2 import PdfReader
import PyPDF2 # For type checking
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
import concurrent.futures
import threading
# Initialize PyPDF2 thread support
if not hasattr(threading.current_thread(), "_children"):
threading.current_thread()._children = set()
start_time = time()
result = PDFProcessResult(
metadata=PDFMetadata(),
pages=[],
version="1.1"
)
try:
# Get metadata and page count from main thread
with pdf_path.open('rb') as file:
reader = PdfReader(file)
result.metadata = self._extract_metadata(pdf_path, reader)
total_pages = len(reader.pages)
# Handle image directory setup
image_dir = None
if self.extract_images and self.save_images_locally:
if self.image_save_dir:
image_dir = Path(self.image_save_dir)
image_dir.mkdir(exist_ok=True, parents=True)
else:
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
image_dir = Path(self._temp_dir)
def process_page_safely(page_num: int):
# Each thread opens its own file handle
with pdf_path.open('rb') as file:
thread_reader = PdfReader(file)
page = thread_reader.pages[page_num]
self.current_page_number = page_num + 1
return self._process_page(page, image_dir)
# Process pages in parallel batches
with concurrent.futures.ThreadPoolExecutor(max_workers=self.batch_size) as executor:
futures = []
for page_num in range(total_pages):
future = executor.submit(process_page_safely, page_num)
futures.append((page_num + 1, future))
# Collect results in order
result.pages = [None] * total_pages
for page_num, future in futures:
try:
pdf_page = future.result()
result.pages[page_num - 1] = pdf_page
except Exception as e:
logger.error(f"Failed to process page {page_num}: {str(e)}")
raise
except Exception as e:
logger.error(f"Failed to process PDF: {str(e)}")
raise
finally:
# Cleanup temp directory if it was created
if self._temp_dir and not self.image_save_dir:
import shutil
try:
shutil.rmtree(self._temp_dir)
except Exception as e:
logger.error(f"Failed to cleanup temp directory: {str(e)}")
result.processing_time = time() - start_time
return result
def _process_page(self, page, image_dir: Optional[Path]) -> PDFPage:
pdf_page = PDFPage(
page_number=self.current_page_number,
)
# Text and font extraction
def visitor_text(text, cm, tm, font_dict, font_size):
pdf_page.raw_text += text
pdf_page.layout.append({
"type": "text",
"text": text,
"x": tm[4],
"y": tm[5],
})
page.extract_text(visitor_text=visitor_text)
# Image extraction
if self.extract_images:
pdf_page.images = self._extract_images(page, image_dir)
# Link extraction
pdf_page.links = self._extract_links(page)
# Add markdown content
pdf_page.markdown = clean_pdf_text(self.current_page_number, pdf_page.raw_text)
pdf_page.html = clean_pdf_text_to_html(self.current_page_number, pdf_page.raw_text)
return pdf_page
def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]:
# Import PyPDF2 for type checking only when needed
try:
import PyPDF2
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
if not self.extract_images:
return []
images = []
try:
resources = page.get("/Resources")
if resources: # Check if resources exist
resources = resources.get_object() # Resolve IndirectObject
if '/XObject' in resources:
xobjects = resources['/XObject'].get_object()
img_count = 0
for obj_name in xobjects:
xobj = xobjects[obj_name]
if hasattr(xobj, 'get_object') and callable(xobj.get_object):
xobj = xobj.get_object()
if xobj.get('/Subtype') == '/Image':
try:
img_count += 1
img_filename = f"page_{self.current_page_number}_img_{img_count}"
data = xobj.get_data()
filters = xobj.get('/Filter', [])
if not isinstance(filters, list):
filters = [filters]
# Resolve IndirectObjects in properties
width = xobj.get('/Width', 0)
height = xobj.get('/Height', 0)
color_space = xobj.get('/ColorSpace', '/DeviceRGB')
if isinstance(color_space, PyPDF2.generic.IndirectObject):
color_space = color_space.get_object()
# Handle different image encodings
success = False
image_format = 'bin'
image_data = None
if '/FlateDecode' in filters:
try:
decode_parms = xobj.get('/DecodeParms', {})
if isinstance(decode_parms, PyPDF2.generic.IndirectObject):
decode_parms = decode_parms.get_object()
predictor = decode_parms.get('/Predictor', 1)
bits = xobj.get('/BitsPerComponent', 8)
colors = 3 if color_space == '/DeviceRGB' else 1
if predictor >= 10:
data = apply_png_predictor(data, width, bits, colors)
# Create PIL Image
from PIL import Image
mode = 'RGB' if color_space == '/DeviceRGB' else 'L'
img = Image.frombytes(mode, (width, height), data)
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.png')
img.save(final_path)
image_data = str(final_path)
else:
import io
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
image_data = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
success = True
image_format = 'png'
except Exception as e:
logger.error(f"FlateDecode error: {str(e)}")
elif '/DCTDecode' in filters:
# JPEG image
try:
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.jpg')
with open(final_path, 'wb') as f:
f.write(data)
image_data = str(final_path)
else:
image_data = base64.b64encode(data).decode('utf-8')
success = True
image_format = 'jpeg'
except Exception as e:
logger.error(f"JPEG save error: {str(e)}")
elif '/CCITTFaxDecode' in filters:
try:
if data[:4] != b'II*\x00':
# Add TIFF header if missing
tiff_header = b'II*\x00\x08\x00\x00\x00\x0e\x00\x00\x01\x03\x00\x01\x00\x00\x00' + \
width.to_bytes(4, 'little') + \
b'\x01\x03\x00\x01\x00\x00\x00' + \
height.to_bytes(4, 'little') + \
b'\x01\x12\x00\x03\x00\x00\x00\x01\x00\x01\x00\x00\x01\x17\x00\x04\x00\x00\x00\x01\x00\x00\x00J\x01\x1B\x00\x05\x00\x00\x00\x01\x00\x00\x00R\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\x00'
data = tiff_header + data
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.tiff')
with open(final_path, 'wb') as f:
f.write(data)
image_data = str(final_path)
else:
image_data = base64.b64encode(data).decode('utf-8')
success = True
image_format = 'tiff'
except Exception as e:
logger.error(f"CCITT save error: {str(e)}")
elif '/JPXDecode' in filters:
# JPEG 2000
try:
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.jp2')
with open(final_path, 'wb') as f:
f.write(data)
image_data = str(final_path)
else:
image_data = base64.b64encode(data).decode('utf-8')
success = True
image_format = 'jpeg2000'
except Exception as e:
logger.error(f"JPEG2000 save error: {str(e)}")
if success and image_data:
image_info = {
"format": image_format,
"width": width,
"height": height,
"color_space": str(color_space),
"bits_per_component": xobj.get('/BitsPerComponent', 1)
}
if self.save_images_locally:
image_info["path"] = image_data
else:
image_info["data"] = image_data
images.append(image_info)
else:
# Fallback: Save raw data
if self.save_images_locally:
final_path = (image_dir / img_filename).with_suffix('.bin')
with open(final_path, 'wb') as f:
f.write(data)
logger.warning(f"Saved raw image data to {final_path}")
else:
image_data = base64.b64encode(data).decode('utf-8')
images.append({
"format": "bin",
"width": width,
"height": height,
"color_space": str(color_space),
"bits_per_component": xobj.get('/BitsPerComponent', 1),
"data": image_data
})
except Exception as e:
logger.error(f"Error processing image: {str(e)}")
except Exception as e:
logger.error(f"Image extraction error: {str(e)}")
return images
def _extract_links(self, page) -> List[str]:
links = []
if '/Annots' in page:
try:
for annot in page['/Annots']:
a = annot.get_object()
if '/A' in a and '/URI' in a['/A']:
links.append(a['/A']['/URI'])
except Exception as e:
print(f"Link error: {str(e)}")
return links
def _extract_metadata(self, pdf_path: Path, reader = None) -> PDFMetadata:
# Import inside method to allow dependency to be optional
if reader is None:
try:
from PyPDF2 import PdfReader
reader = PdfReader(pdf_path)
except ImportError:
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
meta = reader.metadata or {}
created = self._parse_pdf_date(meta.get('/CreationDate', ''))
modified = self._parse_pdf_date(meta.get('/ModDate', ''))
return PDFMetadata(
title=meta.get('/Title'),
author=meta.get('/Author'),
producer=meta.get('/Producer'),
created=created,
modified=modified,
pages=len(reader.pages),
encrypted=reader.is_encrypted,
file_size=pdf_path.stat().st_size
)
def _parse_pdf_date(self, date_str: str) -> Optional[datetime]:
try:
match = re.match(r'D:(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})', date_str)
if not match:
return None
return datetime(
year=int(match[1]),
month=int(match[2]),
day=int(match[3]),
hour=int(match[4]),
minute=int(match[5]),
second=int(match[6])
)
except:
return None
# Usage example
if __name__ == "__main__":
import json
from pathlib import Path
try:
# Import PyPDF2 only when running the file directly
import PyPDF2
from PyPDF2 import PdfReader
except ImportError:
print("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
exit(1)
current_dir = Path(__file__).resolve().parent
pdf_path = f'{current_dir}/test.pdf'
strategy = NaivePDFProcessorStrategy()
result = strategy.process(Path(pdf_path))
# Convert to JSON
json_output = asdict(result)
print(json.dumps(json_output, indent=2, default=str))
with open(f'{current_dir}/test.html', 'w') as f:
for page in result.pages:
f.write(f'<h1>Page {page["page_number"]}</h1>')
f.write(page['html'])
with open(f'{current_dir}/test.md', 'w') as f:
for page in result.pages:
f.write(f'# Page {page["page_number"]}\n\n')
f.write(clean_pdf_text(page["page_number"], page['raw_text']))
f.write('\n\n')

View File

@@ -1,350 +0,0 @@
import re
def apply_png_predictor(data, width, bits, color_channels):
"""Decode PNG predictor (PDF 1.5+ filter)"""
bytes_per_pixel = (bits * color_channels) // 8
if (bits * color_channels) % 8 != 0:
bytes_per_pixel += 1
stride = width * bytes_per_pixel
scanline_length = stride + 1 # +1 for filter byte
if len(data) % scanline_length != 0:
raise ValueError("Invalid scanline structure")
num_lines = len(data) // scanline_length
output = bytearray()
prev_line = b'\x00' * stride
for i in range(num_lines):
line = data[i*scanline_length:(i+1)*scanline_length]
filter_type = line[0]
filtered = line[1:]
if filter_type == 0: # None
decoded = filtered
elif filter_type == 1: # Sub
decoded = bytearray(filtered)
for j in range(bytes_per_pixel, len(decoded)):
decoded[j] = (decoded[j] + decoded[j - bytes_per_pixel]) % 256
elif filter_type == 2: # Up
decoded = bytearray([(filtered[j] + prev_line[j]) % 256
for j in range(len(filtered))])
elif filter_type == 3: # Average
decoded = bytearray(filtered)
for j in range(len(decoded)):
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
up = prev_line[j]
avg = (left + up) // 2
decoded[j] = (decoded[j] + avg) % 256
elif filter_type == 4: # Paeth
decoded = bytearray(filtered)
for j in range(len(decoded)):
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
up = prev_line[j]
up_left = prev_line[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
paeth = paeth_predictor(left, up, up_left)
decoded[j] = (decoded[j] + paeth) % 256
else:
raise ValueError(f"Unsupported filter type: {filter_type}")
output.extend(decoded)
prev_line = decoded
return bytes(output)
def paeth_predictor(a, b, c):
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
return a
elif pb <= pc:
return b
else:
return c
import re
import html
def clean_pdf_text_to_html(page_number, text):
# Decode Unicode escapes and handle surrogate pairs
try:
decoded = text.encode('latin-1').decode('unicode-escape')
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
except Exception as e:
decoded = text # Fallback if decoding fails
article_title_detected = False
# decoded = re.sub(r'\.\n', '.\n\n', decoded)
# decoded = re.sub(r'\.\n', '<|break|>', decoded)
lines = decoded.split('\n')
output = []
current_paragraph = []
in_header = False
email_pattern = re.compile(r'\{.*?\}')
affiliation_pattern = re.compile(r'^†')
quote_pattern = re.compile(r'^["“]')
author_pattern = re.compile(
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
)
def flush_paragraph():
if current_paragraph:
para = ' '.join(current_paragraph)
para = re.sub(r'\s+', ' ', para).strip()
if para:
# escaped_para = html.escape(para)
escaped_para = para
# escaped_para = re.sub(r'\.\n', '.\n\n', escaped_para)
# Split escaped_para by <|break|> to avoid HTML escaping
escaped_para = escaped_para.split('.\n\n')
# Wrap each part in <p> tag
escaped_para = [f'<p>{part}</p>' for part in escaped_para]
output.append(f'<div class="paragraph">{"".join(escaped_para)}</div><hr/>')
current_paragraph.clear()
for i, line in enumerate(lines):
line = line.strip()
# Handle empty lines
if not line:
flush_paragraph()
continue
# Detect article title (first line with reasonable length)
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and len(lines) > 1:
flush_paragraph()
escaped_line = html.escape(line)
output.append(f'<h2>{escaped_line}</h2>')
article_title_detected = True
continue
# Detect numbered headers like "2.1 Background"
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
if i > 0 and not lines[i-1].strip() and numbered_header:
flush_paragraph()
level = numbered_header.group(1).count('.') + 1
header_text = numbered_header.group(2)
md_level = min(level + 1, 6)
escaped_header = html.escape(header_text)
output.append(f'<h{md_level}>{escaped_header}</h{md_level}>')
in_header = True
continue
# Detect authors
if page_number == 1 and author_pattern.match(line):
authors = re.sub(r'[†â€]', '', line)
authors = re.split(r', | and ', authors)
formatted_authors = []
for author in authors:
if author.strip():
parts = [p for p in author.strip().split() if p]
formatted = ' '.join(parts)
escaped_author = html.escape(formatted)
formatted_authors.append(f'<strong>{escaped_author}</strong>')
if len(formatted_authors) > 1:
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
else:
joined = formatted_authors[0]
output.append(f'<p>{joined}</p>')
continue
# Detect affiliation
if affiliation_pattern.match(line):
escaped_line = html.escape(line)
output.append(f'<p><em>{escaped_line}</em></p>')
continue
# Detect emails
if email_pattern.match(line):
escaped_line = html.escape(line)
output.append(f'<p><code>{escaped_line}</code></p>')
continue
# Detect section headers
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
flush_paragraph()
escaped_line = html.escape(line)
output.append(f'<h2 class="section-header"><em>{escaped_line}</em></h2>')
in_header = True
continue
# Handle quotes
if quote_pattern.match(line):
flush_paragraph()
escaped_line = html.escape(line)
output.append(f'<blockquote><p>{escaped_line}</p></blockquote>')
continue
# Handle hyphenated words
if line.endswith('-'):
current_paragraph.append(line[:-1].strip())
else:
current_paragraph.append(line)
# Handle paragraph breaks after headers
if in_header and not line.endswith(('.', '!', '?')):
flush_paragraph()
in_header = False
flush_paragraph()
# Post-process HTML
html_output = '\n'.join(output)
# Fix common citation patterns
html_output = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'<cite>\1</cite>', html_output)
# Fix escaped characters
html_output = html_output.replace('\\ud835', '').replace('\\u2020', '')
# Remove leftover hyphens and fix spacing
html_output = re.sub(r'\s+-\s+', '', html_output)
html_output = re.sub(r'\s+([.,!?)])', r'\1', html_output)
return html_output
def clean_pdf_text(page_number, text):
# Decode Unicode escapes and handle surrogate pairs
try:
decoded = text.encode('latin-1').decode('unicode-escape')
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
except Exception as e:
decoded = text # Fallback if decoding fails
article_title_detected = False
decoded = re.sub(r'\.\n', '.\n\n', decoded)
lines = decoded.split('\n')
output = []
current_paragraph = []
in_header = False
email_pattern = re.compile(r'\{.*?\}')
affiliation_pattern = re.compile(r'^†')
quote_pattern = re.compile(r'^["“]')
author_pattern = re.compile(
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
)
def flush_paragraph():
if current_paragraph:
para = ' '.join(current_paragraph)
para = re.sub(r'\s+', ' ', para).strip()
if para:
output.append(para)
current_paragraph.clear()
for i, line in enumerate(lines):
line = line.strip()
# Handle special patterns
if not line:
flush_paragraph()
continue
# Detect headline (first line, reasonable length, surrounded by empty lines)
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and (len(lines) > 1):
flush_paragraph()
output.append(f'## {line}')
continue
# Detect paragraph breaks for ALL paragraphs
if not line and current_paragraph:
flush_paragraph()
output.append('') # Add empty line between paragraphs
continue
# Detect numbered headers like "2.1 Background"
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
if not lines[i-1].strip() and numbered_header:
flush_paragraph()
level = numbered_header.group(1).count('.') + 1 # Convert 2.1 → level 2
header_text = numbered_header.group(2)
# Never go beyond ### for subsections
md_level = min(level + 1, 6) # 1 → ##, 2 → ###, 3 → #### etc
output.append(f'{"#" * md_level} {header_text}')
in_header = True
continue
# Detect authors
if page_number == 1 and author_pattern.match(line):
# Clean and format author names
authors = re.sub(r'[†â€]', '', line) # Remove affiliation markers
authors = re.split(r', | and ', authors)
formatted_authors = []
for author in authors:
if author.strip():
# Handle "First Last" formatting
parts = [p for p in author.strip().split() if p]
formatted = ' '.join(parts)
formatted_authors.append(f'**{formatted}**')
# Join with commas and "and"
if len(formatted_authors) > 1:
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
else:
joined = formatted_authors[0]
output.append(joined)
continue
# Detect affiliation
if affiliation_pattern.match(line):
output.append(f'*{line}*')
continue
# Detect emails
if email_pattern.match(line):
output.append(f'`{line}`')
continue
# Detect section headers
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
flush_paragraph()
output.append(f'_[{line}]_')
in_header = True
continue
# Handle quotes
if quote_pattern.match(line):
flush_paragraph()
output.append(f'> {line}')
continue
# Handle hyphenated words
if line.endswith('-'):
current_paragraph.append(line[:-1].strip())
else:
current_paragraph.append(line)
# Handle paragraph breaks after headers
if in_header and not line.endswith(('.', '!', '?')):
flush_paragraph()
in_header = False
flush_paragraph()
# Post-processing
markdown = '\n\n'.join(output)
# Fix common citation patterns
markdown = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'[\1]', markdown)
# Fix escaped characters
markdown = markdown.replace('\\ud835', '').replace('\\u2020', '')
# Remove leftover hyphens and fix spacing
markdown = re.sub(r'\s+-\s+', '', markdown) # Join hyphenated words
markdown = re.sub(r'\s+([.,!?)])', r'\1', markdown) # Fix punctuation spacing
return markdown

View File

@@ -198,70 +198,25 @@ Avoid Common Mistakes:
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
- Do not miss closing </blocks> tag at the end of the JSON output.
- Do not generate the Python code show me how to do the task, this is your task to extract the information and return it in JSON format.
- Do not generate the Python coee show me how to do the task, this is your task to extract the information and return it in JSON format.
Result
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
PROMPT_EXTRACT_INFERRED_SCHEMA = """Here is the content from the URL:
<url>{URL}</url>
<url_content>
{HTML}
</url_content>
Please carefully read the URL content and the user's request. Analyze the page structure and infer the most appropriate JSON schema based on the content and request.
Extraction Strategy:
1. First, determine if the page contains repetitive items (like multiple products, articles, etc.) or a single content item (like a single article or page).
2. For repetitive items: Identify the common pattern and extract each instance as a separate JSON object in an array.
3. For single content: Extract the key information into a comprehensive JSON object that captures the essential details.
Extraction instructions:
Return the extracted information as a list of JSON objects. For repetitive content, each object in the list should correspond to a distinct item. For single content, you may return just one detailed JSON object. Wrap the entire JSON list in <blocks>...</blocks> XML tags.
Schema Design Guidelines:
- Create meaningful property names that clearly describe the data they contain
- Use nested objects for hierarchical information
- Use arrays for lists of related items
- Include all information requested by the user
- Maintain consistency in property names and data structures
- Only include properties that are actually present in the content
- For dates, prefer ISO format (YYYY-MM-DD)
- For prices or numeric values, extract them without currency symbols when possible
Quality Reflection:
Before outputting your final answer, double check that:
1. The inferred schema makes logical sense for the type of content
2. All requested information is included
3. The JSON is valid and could be parsed without errors
4. Property names are consistent and descriptive
5. The structure is optimal for the type of data being represented
Avoid Common Mistakes:
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
- Do not miss closing </blocks> tag at the end of the JSON output.
- Do not generate Python code showing how to do the task; this is your task to extract the information and return it in JSON format.
- Ensure consistency in property names across all objects
- Don't include empty properties or null values unless they're meaningful
- For repetitive content, ensure all objects follow the same schema
Important: If user specific instruction is provided, then stress significantly on what user is requesting and describing about the schema of end result (if any). If user is requesting to extract specific information, then focus on that and ignore the rest of the content.
<user_request>
{REQUEST}
</user_request>
Result:
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly.
DO NOT ADD ANY PRE OR POST COMMENTS. JUST RETURN THE JSON OBJECTS INSIDE <blocks>...</blocks> TAGS.
CRITICAL: The content inside the <blocks> tags MUST be a direct array of JSON objects (starting with '[' and ending with ']'), not a dictionary/object containing an array. For example, use <blocks>[{...}, {...}]</blocks> instead of <blocks>{"items": [{...}, {...}]}</blocks>. This is essential for proper parsing.
"""
PROMPT_FILTER_CONTENT = """Your task is to filter and convert HTML content into clean, focused markdown that's optimized for use with LLMs and information retrieval systems.
INPUT HTML:
<|HTML_CONTENT_START|>
{HTML}
<|HTML_CONTENT_END|>
SPECIFIC INSTRUCTION:
<|USER_INSTRUCTION_START|>
{REQUEST}
<|USER_INSTRUCTION_END|>
TASK DETAILS:
1. Content Selection
- DO: Keep essential information, main content, key details
@@ -285,7 +240,15 @@ TASK DETAILS:
- DON'T: Fragment related content
- DON'T: Duplicate information
IMPORTANT: If user specific instruction is provided, ignore above guideline and prioritize those requirements over these general guidelines.
Example Input:
<div class="main-content"><h1>Setup Guide</h1><p>Follow these steps...</p></div>
<div class="sidebar">Related articles...</div>
Example Output:
# Setup Guide
Follow these steps...
IMPORTANT: If specific instruction is provided above, prioritize those requirements over these general guidelines.
OUTPUT FORMAT:
Wrap your response in <content> tags. Use proper markdown throughout.
@@ -293,18 +256,7 @@ Wrap your response in <content> tags. Use proper markdown throughout.
[Your markdown content here]
</content>
Begin filtering now.
--------------------------------------------
<|HTML_CONTENT_START|>
{HTML}
<|HTML_CONTENT_END|>
<|USER_INSTRUCTION_START|>
{REQUEST}
<|USER_INSTRUCTION_END|>
"""
Begin filtering now."""
JSON_SCHEMA_BUILDER= """
# HTML Schema Generation Instructions
@@ -1054,525 +1006,4 @@ Your output must:
5. Include all required fields
6. Use valid XPath selectors
</output_requirements>
"""
GENERATE_SCRIPT_PROMPT = r"""You are a world-class browser automation specialist. Your sole purpose is to convert a natural language objective and a snippet of HTML into the most **efficient, robust, and simple** script possible to prepare a web page for data extraction.
Your scripts run **before the crawl** to handle dynamic content, user interactions, and other obstacles. You are a master of two tools: raw **JavaScript** and the high-level **Crawl4ai Script (c4a)**.
────────────────────────────────────────────────────────
## Your Core Philosophy: "Efficiency, Robustness, Simplicity"
This is your mantra. Every line of code you write must adhere to it.
1. **Efficiency (Shortest Path):** Generate the absolute minimum number of steps to achieve the goal. Do not include redundant actions. If a `CLICK` on one button achieves the goal, don't also scroll and wait unnecessarily.
2. **Robustness (Will Not Break):** Prioritize selectors and methods that are resistant to cosmetic site changes. `data-*` attributes are gold. Dynamic, auto-generated class names (`.class-a8B_x3`) are poison. Always prefer waiting for a state change (`WAIT \`#results\``) over a blind delay (`WAIT 5`).
3. **Simplicity (Right Tool for the Job):** Use the simplest tool that works. Prefer a direct `c4a` command over `EVAL` with JavaScript. Only use `EVAL` when the task is impossible with standard commands (e.g., accessing Shadow DOM, complex array filtering).
────────────────────────────────────────────────────────
## Output Mode Selection Logic
Your choice of output mode is a critical strategic decision.
* **Use `crawl4ai_script` for:**
* Standard, sequential browser actions: login forms, clicking "next page," simple "load more" buttons, accepting cookie banners.
* When the user's goal maps clearly to the available `c4a` commands.
* When you need to define reusable macros with `PROC`.
* **Use `javascript` for:**
* Complex DOM manipulation that has no `c4a` equivalent (e.g., transforming data, complex filtering).
* Interacting with web components inside **Shadow DOM** or **iFrames**.
* Implementing sophisticated logic like custom scrolling patterns or handling non-standard events.
* When the goal is a fine-grained DOM tweak, not a full user journey.
**If the user specifies a mode, you MUST respect it.** If not, you must choose the mode that best embodies your core philosophy.
────────────────────────────────────────────────────────
## Available Crawl4ai Commands
| Command | Arguments / Notes |
|------------------------|--------------------------------------------------------------|
| GO `<url>` | Navigate to absolute URL |
| RELOAD | Hard refresh |
| BACK / FORWARD | Browser history nav |
| WAIT `<seconds>` | **Avoid!** Passive delay. Use only as a last resort. |
| WAIT \`<css>\` `<t>` | **Preferred wait.** Poll selector until found, timeout in seconds. |
| WAIT "<text>" `<t>` | Poll page text until found, timeout in seconds. |
| CLICK \`<css>\` | Single click on element |
| CLICK `<x>` `<y>` | Viewport click |
| DOUBLE_CLICK … | Two rapid clicks |
| RIGHT_CLICK … | Context-menu click |
| MOVE `<x>` `<y>` | Mouse move |
| DRAG `<x1>` `<y1>` `<x2>` `<y2>` | Click-drag gesture |
| SCROLL UP|DOWN|LEFT|RIGHT `[px]` | Viewport scroll |
| TYPE "<text>" | Type into focused element |
| CLEAR \`<css>\` | Empty input |
| SET \`<css>\` "<val>" | Set element value and dispatch events |
| PRESS `<Key>` | Keydown + keyup |
| KEY_DOWN `<Key>` / KEY_UP `<Key>` | Separate key events |
| EVAL \`<js>\` | **Your fallback.** Run JS when no direct command exists. |
| SETVAR $name = <val> | Store constant for reuse |
| PROC name … ENDPROC | Define macro |
| IF / ELSE / REPEAT | Flow control |
| USE "<file.c4a>" | Include another script, avoid circular includes |
────────────────────────────────────────────────────────
## Strategic Principles & Anti-Patterns
These are your commandments. Do not deviate.
1. **Selector Quality is Paramount:**
* **GOOD:** `[data-testid="submit-button"]`, `#main-content`, `[aria-label="Close dialog"]`
* **BAD:** `div > span:nth-child(3)`, `.button-gR3xY_s`, `//div[contains(@class, 'button')]`
2. **Wait for State, Not for Time:**
* **DO:** `CLICK \`#load-more\`` followed by `WAIT \`div.new-item\` 10`. This waits for the *result* of the action.
* **DON'T:** `CLICK \`#load-more\`` followed by `WAIT 5`. This is a guess and it will fail.
3. **Target the Action, Not the Artifact:** If you need to reveal content, click the button that reveals it. Don't try to manually change CSS `display` properties, as this can break the page's internal state.
4. **DOM-Awareness is Non-Negotiable:**
* **Shadow DOM:** `c4a` commands CANNOT pierce the Shadow DOM. If you see a `#shadow-root (open)` in the HTML, you MUST use `EVAL` and `element.shadowRoot.querySelector(...)`.
* **iFrames:** Likewise, you MUST use `EVAL` and `iframe.contentDocument.querySelector(...)` to interact with elements inside an iframe.
5. **Be Idempotent:** Your script must be harmless if run multiple times. Use `IF EXISTS` to check for states before acting (e.g., don't try to log in if already logged in).
6. **Forbidden Techniques:** Never use `document.write()`. It is destructive. Avoid overly complex JS in `EVAL` that could be simplified into a few `c4a` commands.
────────────────────────────────────────────────────────
## From Vague Goals to Robust Scripts: Your Duty to Infer and Ensure Reliability
This is your most important responsibility. Users are not automation experts. They will provide incomplete or vague instructions. Your job is to be the expert—to infer their true goal and build a script that is reliable by default. You must add the "invisible scaffolding" of checks and waits to ensure the page is stable and ready for the crawler. **A vague user prompt must still result in a robust, complete script.**
Study these examples. No matter which query is given, your output must be the single, robust solution.
### 1. Scenario: Basic Search Query
* **High Detail Query:** "Find the search box and search button. Wait for the search box to be visible, click it, clear it, type 'r2d2', click the search button, and then wait for the search results to appear."
* **Medium Detail Query:** "Find the search box and search for 'r2d2', click the search button until you get a list of items."
* **Low Detail Query:** "Search for r2d2."
**THE CORRECT, ROBUST OUTPUT (for all three queries):**
```
WAIT `input[type="search"]` 10
SET `input[type="search"]` "r2d2"
CLICK `button[aria-label="Search"]`
WAIT `div.search-results-container` 15
```
**Rationale:** You correctly infer the need to `WAIT` for the input first. You use the more efficient `SET` command. Most importantly, you **infer the crucial final step**: waiting for a results container to appear, confirming the search action was successful.
### 2. Scenario: Clicking a "Load More" Button
* **High Detail Query:** "Click the button with the text 'Load More'. Afterward, wait for a new item with the class '.product-tile' to show up on the page."
* **Medium Detail Query:** "Click the load more button to see more products."
* **Low Detail Query:** "Load more items."
**THE CORRECT, ROBUST OUTPUT:**
```
IF EXISTS `button.load-more` THEN
CLICK `button.load-more`
WAIT `div.new-item-indicator` 8
ENDIF
```
**Rationale:** You wrap the action in `IF EXISTS` to prevent errors if the button is not present (e.g., on the last page). You correctly infer the need to wait for the *consequence* of the click—a new item appearing—rather than a blind `WAIT 2`.
### 3. Scenario: Applying a Filter from a Dropdown
* **High Detail Query:** "First, click the dropdown with id 'color-filter'. Then, from the open menu, click the option that says 'Blue'. Finally, wait for the product grid to update."
* **Medium Detail Query:** "Filter the products by the color Blue."
* **Low Detail Query:** "Show blue products."
**THE CORRECT, ROBUST OUTPUT:**
```
CLICK `#color-filter`
WAIT `[data-value="blue"]` 3
CLICK `[data-value="blue"]`
WAIT `#product-grid[data-status="updated"]` 10
```
**Rationale:** You infer the need for two waits: one for the menu options to appear after the first click, and a second for the main content to update after the filter is applied. This prevents race conditions.
### 4. Scenario: User Authentication (Login)
* **High Detail Query:** "Fill username with 'USER_EMAIL', fill password with 'USER_PASS', click login, and wait for the dashboard to appear."
* **Medium Detail Query:** "Log in as USER_EMAIL with password USER_PASS."
* **Low Detail Query:** "Log in."
**THE CORRECT, ROBUST OUTPUT:**
```
IF EXISTS `[data-testid="logout-button"]` THEN
EVAL `console.log("Already logged in.")`
ELSE
WAIT `input[name="username"]` 10
SET `input[name="username"]` "USER_EMAIL"
SET `input[name="password"]` "USER_PASS"
CLICK `button[type="submit"]`
WAIT `[data-testid="user-dashboard"]` 15
ENDIF
```
**Rationale:** You build an **idempotent** script. You first check if the user is *already* logged in. If not, you proceed with the login and then, critically, `WAIT` for a post-login element to confirm success. You use placeholders when credentials are not provided in low-detail queries.
### 5. Scenario: Dismissing an Interstitial Modal
* **High Detail Query:** "Check if a popup with id '#promo-modal' exists. If it does, click the close button inside it with class '.close-x'."
* **Medium Detail Query:** "Close the promotional popup."
* **Low Detail Query:** "Get rid of the popup."
**THE CORRECT, ROBUST OUTPUT:**
```
IF EXISTS `div#promo-modal` THEN
CLICK `div#promo-modal button.close-x`
ENDIF
```
**Rationale:** You correctly identify this as a conditional action. The script must not fail if the popup doesn't appear. The `IF EXISTS` block is the perfect, robust way to handle this optional interaction.
────────────────────────────────────────────────────────
## Advanced Scenarios & Master-Level Examples
Study these solutions. Understand the *why* behind each choice.
### Scenario: Interacting with a Web Component (Shadow DOM)
**Goal:** Click a button inside a custom element `<user-card>`.
**HTML Snippet:** `<user-card><#shadow-root (open)><button>Details</button></#shadow-root></user-card>`
**Correct Mode:** `javascript` (or `c4a` with `EVAL`)
**Rationale:** Standard selectors can't cross the shadow boundary. JavaScript is mandatory.
```javascript
// Solution in pure JS mode
const card = document.querySelector('user-card');
if (card && card.shadowRoot) {
const button = card.shadowRoot.querySelector('button');
if (button) button.click();
}
```
```
# Solution in c4a mode (using EVAL as the weapon of choice)
EVAL `
const card = document.querySelector('user-card');
if (card && card.shadowRoot) {
const button = card.shadowRoot.querySelector('button');
if (button) button.click();
}
`
```
### Scenario: Handling a Cookie Banner
**Goal:** Accept the cookies to dismiss the modal.
**HTML Snippet:** `<div id="cookie-consent-modal"><button id="accept-cookies">Accept All</button></div>`
**Correct Mode:** `crawl4ai_script`
**Rationale:** A simple, direct action. `c4a` is cleaner and more declarative.
```
# The most efficient solution
IF EXISTS `#cookie-consent-modal` THEN
CLICK `#accept-cookies`
WAIT `div.content-loaded` 5
ENDIF
```
### Scenario: Infinite Scroll Page
**Goal:** Scroll down 5 times to load more content.
**HTML Snippet:** `(A page with a long body and no "load more" button)`
**Correct Mode:** `crawl4ai_script`
**Rationale:** `REPEAT` is designed for exactly this. It's more readable than a JS loop for this simple task.
```
REPEAT (
SCROLL DOWN 1000,
5
)
WAIT 2
```
### Scenario: Hover-to-Reveal Menu
**Goal:** Hover over "Products" to open the menu, then click "Laptops".
**HTML Snippet:** `<a href="/products" id="products-menu">Products</a> <div class="menu-dropdown"><a href="/laptops">Laptops</a></div>`
**Correct Mode:** `crawl4ai_script` (with `EVAL`)
**Rationale:** `c4a` has no `HOVER` command. `EVAL` is the perfect tool to dispatch the `mouseover` event.
```
EVAL `document.querySelector('#products-menu').dispatchEvent(new MouseEvent('mouseover', { bubbles: true }))`
WAIT `div.menu-dropdown a[href="/laptops"]` 3
CLICK `div.menu-dropdown a[href="/laptops"]`
```
### Scenario: Login Form
**Goal:** Fill and submit a login form.
**HTML Snippet:** `<form><input name="email"><input name="password" type="password"><button type="submit"></button></form>`
**Correct Mode:** `crawl4ai_script`
**Rationale:** This is the canonical use case for `c4a`. The commands map 1:1 to the user journey.
```
WAIT `form` 10
SET `input[name="email"]` "USER_EMAIL"
SET `input[name="password"]` "USER_PASS"
CLICK `button[type="submit"]`
WAIT `[data-testid="user-dashboard"]` 12
```
────────────────────────────────────────────────────────
## Final Output Mandate
1. **CODE ONLY.** Your entire response must be the script body.
2. **NO CHAT.** Do not say "Here is the script" or "This should work."
3. **NO MARKDOWN.** Do not wrap your code in ` ``` ` fences.
4. **NO COMMENTS.** Do not add comments to the final code output.
5. **SYNTACTICALLY PERFECT.** The script must be immediately executable.
6. **UTF-8, STANDARD QUOTES.** Use `"` for string literals, not `“` or `”`.
You are an engine of automation. Now, receive the user's request and produce the optimal script."""
GENERATE_JS_SCRIPT_PROMPT = """# The World-Class JavaScript Automation Scripter
You are a world-class browser automation specialist. Your sole purpose is to convert a natural language objective and a snippet of HTML into the most **efficient, robust, and simple** pure JavaScript script possible to prepare a web page for data extraction.
Your scripts will be executed directly in the browser (e.g., via Playwright's `page.evaluate()`) to handle dynamic content, user interactions, and other obstacles before the page is crawled. You are a master of browser-native JavaScript APIs.
────────────────────────────────────────────────────────
## Your Core Philosophy: "Efficiency, Robustness, Simplicity"
This is your mantra. Every line of JavaScript you write must adhere to it.
1. **Efficiency (Shortest Path):** Generate the absolute minimum number of steps to achieve the goal. Do not include redundant actions. Your code should be concise and direct.
2. **Robustness (Will Not Break):** Prioritize selectors that are resistant to cosmetic site changes. `data-*` attributes are gold. Dynamic, auto-generated class names (`.class-a8B_x3`) are poison. Always prefer waiting for a state change over a blind `setTimeout`.
3. **Simplicity (Right Tool for the Job):** Use simple, direct DOM methods (`.querySelector`, `.click()`) whenever possible. Avoid overly complex or fragile logic when a simpler approach exists.
────────────────────────────────────────────────────────
## Essential JavaScript Automation Patterns & Toolkit
All code should be wrapped in an `async` Immediately Invoked Function Expression `(async () => { ... })();` to allow for top-level `await` and to avoid polluting the global scope.
| Task | Best-Practice JavaScript Implementation |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Wait for Element** | Create and use a robust `waitForElement` helper function. This is your most important tool. <br> `const waitForElement = (selector, timeout = 10000) => new Promise((resolve, reject) => { const el = document.querySelector(selector); if (el) return resolve(el); const observer = new MutationObserver(() => { const el = document.querySelector(selector); if (el) { observer.disconnect(); resolve(el); } }); observer.observe(document.body, { childList: true, subtree: true }); setTimeout(() => { observer.disconnect(); reject(new Error(`Timeout waiting for ${selector}`)); }, timeout); });` |
| **Click Element** | `const el = await waitForElement('selector'); if (el) el.click();` |
| **Set Input Value** | `const input = await waitForElement('selector'); if (input) { input.value = 'new value'; input.dispatchEvent(new Event('input', { bubbles: true })); input.dispatchEvent(new Event('change', { bubbles: true })); }` <br> *Crucially, always dispatch `input` and `change` events to trigger framework reactivity.* |
| **Check Existence** | `const el = document.querySelector('selector'); if (el) { /* ... it exists */ }` |
| **Scroll** | `window.scrollBy(0, window.innerHeight);` |
| **Deal with Time** | Use `await new Promise(r => setTimeout(r, 500));` for short, unavoidable pauses after an action. **Avoid long, blind waits.** |
REMEMBER: Make sure to generate very deterministic css selector. If you refer to a specific button, then be specific, otherwise you may capture elements you do not need, be very specific about the element you want to interact with.
────────────────────────────────────────────────────────
## The Art of High-Specificity Selectors: Your Defense Against Ambiguity
This is your most critical skill for ensuring robustness. **You must assume the provided HTML is only a small fragment of the entire page.** A selector that looks unique in the fragment could be disastrously generic on the full page. Your primary defense is to **anchor your selectors to the most specific, stable parent element available in the given HTML context.**
Think of it as creating a "sandbox" for your selectors.
**Your Guiding Principle:** Start from a unique parent, then find the child.
### Scenario: Selecting a Submit Button within a Login Form
**HTML Snippet Provided:**
```html
<div class="user-auth-module" id="login-widget">
<h2>Member Login</h2>
<form action="/login">
<input name="email" type="email">
<input name="password" type="password">
<button type="submit">Sign In</button>
</form>
</div>
```
* **TERRIBLE (High Risk):** `button[type="submit"]`
* **Why it's bad:** There could be dozens of other forms on the full page (e.g., a newsletter signup, a search bar in the header). This selector is a shot in the dark.
* **BETTER (Lower Risk):** `#login-widget button[type="submit"]`
* **Why it's better:** It's anchored to a unique ID (`#login-widget`). This dramatically reduces the chance of ambiguity.
* **EXCELLENT (Minimal Risk):** `div[id="login-widget"] form button[type="submit"]`
* **Why it's best:** This is a highly specific, descriptive path. It says, "Find the login widget, then the form inside it, and then the submit button inside *that* form." It is virtually guaranteed to be unique and is resilient to minor layout changes within the form.
### Scenario: Selecting a "Add to Cart" Button
**HTML Snippet Provided:**
```html
<section data-testid="product-details-main">
<h1>Awesome T-Shirt</h1>
<div class="product-actions">
<button class="add-to-cart-btn">Add to Cart</button>
</div>
</section>
```
* **TERRIBLE (High Risk):** `.add-to-cart-btn`
* **Why it's bad:** A "related products" section outside this snippet might also use the same class name.
* **EXCELLENT (Minimal Risk):** `[data-testid="product-details-main"] .add-to-cart-btn`
* **Why it's best:** It uses the stable `data-testid` attribute of the parent section as an anchor. This is the most robust pattern.
**Your Mandate:** Always examine the provided HTML for a stable, unique parent (like an element with an `id`, a `data-testid`, or a highly specific combination of classes) and use it as the root of your selectors. **NEVER generate a generic, un-anchored selector if a better, more specific parent is available in the context.**
────────────────────────────────────────────────────────
## Strategic Principles & Anti-Patterns
These are your commandments. Do not deviate.
1. **Selector Quality is Paramount:**
* **GOOD:** `[data-testid="submit-button"]`, `#main-content`, `[aria-label="Close dialog"]`
* **BAD:** `div > span:nth-child(3)`, `.button-gR3xY_s`, `//div[contains(@class, 'button')]`
2. **Wait for State, Not for Time:**
* **DO:** `(await waitForElement('#load-more')).click(); await waitForElement('div.new-item');` This waits for the *result* of the action.
* **DON'T:** `document.querySelector('#load-more').click(); await new Promise(r => setTimeout(r, 5000));` This is a guess and it will fail.
3. **Target the Action, Not the Artifact:** If you need to reveal content, click the button that reveals it. Don't try to manually change CSS `display` properties, as this can break the page's internal state.
4. **DOM-Awareness is Non-Negotiable:**
* **Shadow DOM:** You MUST use `element.shadowRoot.querySelector(...)` to access elements inside a `#shadow-root (open)`.
* **iFrames:** You MUST use `iframe.contentDocument.querySelector(...)` to interact with elements inside an iframe.
5. **Be Idempotent:** Your script must be harmless if run multiple times. Use `if (document.querySelector(...))` checks to avoid re-doing actions unnecessarily.
6. **Forbidden Techniques:** Never use `document.write()`. It is destructive.
────────────────────────────────────────────────────────
## From Vague Goals to Robust Scripts: Your Duty to Infer and Ensure Reliability
This is your most important responsibility. Users are not automation experts. They will provide incomplete or vague instructions. Your job is to be the expert—to infer their true goal and build a script that is reliable by default. **A vague user prompt must still result in a robust, complete script.**
Study these examples. No matter which query is given, your output must be the single, robust solution.
### 1. Scenario: Basic Search Query
* **High Detail Query:** "Find the search box and search button. Wait for the search box to be visible, click it, clear it, type 'r2d2', click the search button, and then wait for the search results to appear."
* **Medium Detail Query:** "Find the search box and search for 'r2d2'."
* **Low Detail Query:** "Search for r2d2."
**THE CORRECT, ROBUST JAVASCRIPT OUTPUT (for all three queries):**
```javascript
(async () => {
const waitForElement = (selector, timeout = 10000) => new Promise((resolve, reject) => { const el = document.querySelector(selector); if (el) return resolve(el); const observer = new MutationObserver(() => { const el = document.querySelector(selector); if (el) { observer.disconnect(); resolve(el); } }); observer.observe(document.body, { childList: true, subtree: true }); setTimeout(() => { observer.disconnect(); reject(new Error(`Timeout waiting for ${selector}`)); }, timeout); });
try {
const searchInput = await waitForElement('input[type="search"], input[aria-label*="search"]');
searchInput.value = 'r2d2';
searchInput.dispatchEvent(new Event('input', { bubbles: true }));
const searchButton = await waitForElement('button[type="submit"], button[aria-label*="search"]');
searchButton.click();
await waitForElement('div.search-results-container, #search-results');
} catch (e) {
console.error('Search script failed:', e.message);
}
})();
```
### 2. Scenario: Clicking a "Load More" Button
* **High Detail Query:** "Click the button with the text 'Load More'. Afterward, wait for a new item with the class '.product-tile' to show up."
* **Medium Detail Query:** "Click the load more button."
* **Low Detail Query:** "Load more items."
**THE CORRECT, ROBUST JAVASCRIPT OUTPUT:**
```javascript
(async () => {
const loadMoreButton = document.querySelector('button.load-more, [data-testid="load-more"]');
if (loadMoreButton) {
const initialItemCount = document.querySelectorAll('.product-tile').length;
loadMoreButton.click();
const waitForNewItem = (timeout = 8000) => new Promise((resolve, reject) => { const t0 = Date.now(); const check = () => { if (document.querySelectorAll('.product-tile').length > initialItemCount) return resolve(); if (Date.now() - t0 > timeout) return reject(new Error('Timeout waiting for new items to load.')); setTimeout(check, 200); }; check(); });
await waitForNewItem();
}
})();
```
### 3. Scenario: User Authentication (Login)
* **High Detail Query:** "Fill username with 'USER_EMAIL', password with 'USER_PASS', click login, and wait for the dashboard."
* **Medium Detail Query:** "Log in as USER_EMAIL."
* **Low Detail Query:** "Log in."
**THE CORRECT, ROBUST JAVASCRIPT OUTPUT:**
```javascript
(async () => {
if (document.querySelector('[data-testid="logout-button"]')) {
console.log('Already logged in.');
return;
}
const waitForElement = (selector, timeout = 10000) => new Promise((resolve, reject) => { const el = document.querySelector(selector); if (el) return resolve(el); const observer = new MutationObserver(() => { const el = document.querySelector(selector); if (el) { observer.disconnect(); resolve(el); } }); observer.observe(document.body, { childList: true, subtree: true }); setTimeout(() => { observer.disconnect(); reject(new Error(`Timeout waiting for ${selector}`)); }, timeout); });
try {
const userInput = await waitForElement('input[name*="user"], input[name*="email"]');
userInput.value = 'USER_EMAIL';
userInput.dispatchEvent(new Event('input', { bubbles: true }));
const passInput = await waitForElement('input[name*="pass"], input[type="password"]');
passInput.value = 'USER_PASS';
passInput.dispatchEvent(new Event('input', { bubbles: true }));
const submitButton = await waitForElement('button[type="submit"]');
submitButton.click();
await waitForElement('[data-testid="user-dashboard"], #dashboard, .account-page');
} catch (e) {
console.error('Login script failed:', e.message);
}
})();
```
────────────────────────────────────────────────────────
## The Art of High-Specificity Selectors: Your Defense Against Ambiguity
This is your most critical skill for ensuring robustness. **You must assume the provided HTML is only a small fragment of the entire page.** A selector that looks unique in the fragment could be disastrously generic on the full page. Your primary defense is to **anchor your selectors to the most specific, stable parent element available in the given HTML context.**
Think of it as creating a "sandbox" for your selectors.
**Your Guiding Principle:** Start from a unique parent, then find the child.
### Scenario: Selecting a Submit Button within a Login Form
**HTML Snippet Provided:**
```html
<div class="user-auth-module" id="login-widget">
<h2>Member Login</h2>
<form action="/login">
<input name="email" type="email">
<input name="password" type="password">
<button type="submit">Sign In</button>
</form>
</div>
```
* **TERRIBLE (High Risk):** `button[type="submit"]`
* **Why it's bad:** There could be dozens of other forms on the full page (e.g., a newsletter signup, a search bar in the header). This selector is a shot in the dark.
* **BETTER (Lower Risk):** `#login-widget button[type="submit"]`
* **Why it's better:** It's anchored to a unique ID (`#login-widget`). This dramatically reduces the chance of ambiguity.
* **EXCELLENT (Minimal Risk):** `div[id="login-widget"] form button[type="submit"]`
* **Why it's best:** This is a highly specific, descriptive path. It says, "Find the login widget, then the form inside it, and then the submit button inside *that* form." It is virtually guaranteed to be unique and is resilient to minor layout changes within the form.
### Scenario: Selecting a "Add to Cart" Button
**HTML Snippet Provided:**
```html
<section data-testid="product-details-main">
<h1>Awesome T-Shirt</h1>
<div class="product-actions">
<button class="add-to-cart-btn">Add to Cart</button>
</div>
</section>
```
* **TERRIBLE (High Risk):** `.add-to-cart-btn`
* **Why it's bad:** A "related products" section outside this snippet might also use the same class name.
* **EXCELLENT (Minimal Risk):** `[data-testid="product-details-main"] .add-to-cart-btn`
* **Why it's best:** It uses the stable `data-testid` attribute of the parent section as an anchor. This is the most robust pattern.
**Your Mandate:** Always examine the provided HTML for a stable, unique parent (like an element with an `id`, a `data-testid`, or a highly specific combination of classes) and use it as the root of your selectors. **NEVER generate a generic, un-anchored selector if a better, more specific parent is available in the context.**
────────────────────────────────────────────────────────
## Final Output Mandate
1. **CODE ONLY.** Your entire response must be the script body.
2. **NO CHAT.** Do not say "Here is the script" or "This should work."
3. **NO MARKDOWN.** Do not wrap your code in ` ``` ` fences.
4. **NO COMMENTS.** Do not add comments to the final code output, except within the logic where it's a best practice.
5. **SYNTACTICALLY PERFECT.** The script must be a single, self-contained block, immediately executable. Wrap it in `(async () => { ... })();`.
6. **UTF-8, STANDARD QUOTES.** Use `'` for string literals, not `“` or `”`.
You are an engine of automation. Now, receive the user's request and produce the optimal JavaScript."""
"""

View File

@@ -1,158 +0,0 @@
from typing import List, Dict, Optional
from abc import ABC, abstractmethod
from itertools import cycle
import os
########### ATTENTION PEOPLE OF EARTH ###########
# I have moved this config to async_configs.py, kept it here, in case someone still importing it, however
# be a dear and follow `from crawl4ai import ProxyConfig` instead :)
class ProxyConfig:
def __init__(
self,
server: str,
username: Optional[str] = None,
password: Optional[str] = None,
ip: Optional[str] = None,
):
"""Configuration class for a single proxy.
Args:
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
username: Optional username for proxy authentication
password: Optional password for proxy authentication
ip: Optional IP address for verification purposes
"""
self.server = server
self.username = username
self.password = password
# Extract IP from server if not explicitly provided
self.ip = ip or self._extract_ip_from_server()
def _extract_ip_from_server(self) -> Optional[str]:
"""Extract IP address from server URL."""
try:
# Simple extraction assuming http://ip:port format
if "://" in self.server:
parts = self.server.split("://")[1].split(":")
return parts[0]
else:
parts = self.server.split(":")
return parts[0]
except Exception:
return None
@staticmethod
def from_string(proxy_str: str) -> "ProxyConfig":
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
parts = proxy_str.split(":")
if len(parts) == 4: # ip:port:username:password
ip, port, username, password = parts
return ProxyConfig(
server=f"http://{ip}:{port}",
username=username,
password=password,
ip=ip
)
elif len(parts) == 2: # ip:port only
ip, port = parts
return ProxyConfig(
server=f"http://{ip}:{port}",
ip=ip
)
else:
raise ValueError(f"Invalid proxy string format: {proxy_str}")
@staticmethod
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
"""Create a ProxyConfig from a dictionary."""
return ProxyConfig(
server=proxy_dict.get("server"),
username=proxy_dict.get("username"),
password=proxy_dict.get("password"),
ip=proxy_dict.get("ip")
)
@staticmethod
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
"""Load proxies from environment variable.
Args:
env_var: Name of environment variable containing comma-separated proxy strings
Returns:
List of ProxyConfig objects
"""
proxies = []
try:
proxy_list = os.getenv(env_var, "").split(",")
for proxy in proxy_list:
if not proxy:
continue
proxies.append(ProxyConfig.from_string(proxy))
except Exception as e:
print(f"Error loading proxies from environment: {e}")
return proxies
def to_dict(self) -> Dict:
"""Convert to dictionary representation."""
return {
"server": self.server,
"username": self.username,
"password": self.password,
"ip": self.ip
}
def clone(self, **kwargs) -> "ProxyConfig":
"""Create a copy of this configuration with updated values.
Args:
**kwargs: Key-value pairs of configuration options to update
Returns:
ProxyConfig: A new instance with the specified updates
"""
config_dict = self.to_dict()
config_dict.update(kwargs)
return ProxyConfig.from_dict(config_dict)
class ProxyRotationStrategy(ABC):
"""Base abstract class for proxy rotation strategies"""
@abstractmethod
async def get_next_proxy(self) -> Optional[ProxyConfig]:
"""Get next proxy configuration from the strategy"""
pass
@abstractmethod
def add_proxies(self, proxies: List[ProxyConfig]):
"""Add proxy configurations to the strategy"""
pass
class RoundRobinProxyStrategy:
"""Simple round-robin proxy rotation strategy using ProxyConfig objects"""
def __init__(self, proxies: List[ProxyConfig] = None):
"""
Initialize with optional list of proxy configurations
Args:
proxies: List of ProxyConfig objects
"""
self._proxies = []
self._proxy_cycle = None
if proxies:
self.add_proxies(proxies)
def add_proxies(self, proxies: List[ProxyConfig]):
"""Add new proxies to the rotation pool"""
self._proxies.extend(proxies)
self._proxy_cycle = cycle(self._proxies)
async def get_next_proxy(self) -> Optional[ProxyConfig]:
"""Get next proxy in round-robin fashion"""
if not self._proxy_cycle:
return None
return next(self._proxy_cycle)

View File

@@ -1,35 +0,0 @@
"""
C4A-Script: A domain-specific language for web automation in Crawl4AI
"""
from .c4a_compile import C4ACompiler, compile, validate, compile_file
from .c4a_result import (
CompilationResult,
ValidationResult,
ErrorDetail,
WarningDetail,
ErrorType,
Severity,
Suggestion
)
__all__ = [
# Main compiler
"C4ACompiler",
# Convenience functions
"compile",
"validate",
"compile_file",
# Result types
"CompilationResult",
"ValidationResult",
"ErrorDetail",
"WarningDetail",
# Enums
"ErrorType",
"Severity",
"Suggestion"
]

View File

@@ -1,398 +0,0 @@
"""
Clean C4A-Script API with Result pattern
No exceptions - always returns results
"""
from __future__ import annotations
import pathlib
import re
from typing import Union, List, Optional
# JSON_SCHEMA_BUILDER is still used elsewhere,
# but we now also need the new script-builder prompt.
from ..prompts import GENERATE_JS_SCRIPT_PROMPT, GENERATE_SCRIPT_PROMPT
import logging
import re
from .c4a_result import (
CompilationResult, ValidationResult, ErrorDetail, WarningDetail,
ErrorType, Severity, Suggestion
)
from .c4ai_script import Compiler
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
from ..async_configs import LLMConfig
from ..utils import perform_completion_with_backoff
class C4ACompiler:
"""Main compiler with result-based API"""
# Error code mapping
ERROR_CODES = {
"missing_then": "E001",
"missing_paren": "E002",
"missing_comma": "E003",
"missing_endproc": "E004",
"undefined_proc": "E005",
"missing_backticks": "E006",
"invalid_command": "E007",
"syntax_error": "E999"
}
@classmethod
def compile(cls, script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
"""
Compile C4A-Script to JavaScript
Args:
script: C4A-Script as string or list of lines
root: Root directory for includes
Returns:
CompilationResult with success status and JS code or errors
"""
# Normalize input
if isinstance(script, list):
script_text = '\n'.join(script)
script_lines = script
else:
script_text = script
script_lines = script.split('\n')
try:
# Try compilation
compiler = Compiler(root)
js_code = compiler.compile(script_text)
# Success!
result = CompilationResult(
success=True,
js_code=js_code,
metadata={
"lineCount": len(script_lines),
"statementCount": len(js_code)
}
)
# Add any warnings (future feature)
# result.warnings = cls._check_warnings(script_text)
return result
except Exception as e:
# Convert exception to ErrorDetail
error = cls._exception_to_error(e, script_lines)
return CompilationResult(
success=False,
errors=[error],
metadata={
"lineCount": len(script_lines)
}
)
@classmethod
def validate(cls, script: Union[str, List[str]]) -> ValidationResult:
"""
Validate script syntax without generating code
Args:
script: C4A-Script to validate
Returns:
ValidationResult with validity status and any errors
"""
result = cls.compile(script)
return ValidationResult(
valid=result.success,
errors=result.errors,
warnings=result.warnings
)
@classmethod
def compile_file(cls, path: Union[str, pathlib.Path]) -> CompilationResult:
"""
Compile a C4A-Script file
Args:
path: Path to the file
Returns:
CompilationResult
"""
path = pathlib.Path(path)
if not path.exists():
error = ErrorDetail(
type=ErrorType.RUNTIME,
code="E100",
severity=Severity.ERROR,
message=f"File not found: {path}",
line=0,
column=0,
source_line=""
)
return CompilationResult(success=False, errors=[error])
try:
script = path.read_text()
return cls.compile(script, root=path.parent)
except Exception as e:
error = ErrorDetail(
type=ErrorType.RUNTIME,
code="E101",
severity=Severity.ERROR,
message=f"Error reading file: {str(e)}",
line=0,
column=0,
source_line=""
)
return CompilationResult(success=False, errors=[error])
@classmethod
def _exception_to_error(cls, exc: Exception, script_lines: List[str]) -> ErrorDetail:
"""Convert an exception to ErrorDetail"""
if isinstance(exc, UnexpectedToken):
return cls._handle_unexpected_token(exc, script_lines)
elif isinstance(exc, UnexpectedCharacters):
return cls._handle_unexpected_chars(exc, script_lines)
elif isinstance(exc, ValueError):
return cls._handle_value_error(exc, script_lines)
else:
# Generic error
return ErrorDetail(
type=ErrorType.SYNTAX,
code=cls.ERROR_CODES["syntax_error"],
severity=Severity.ERROR,
message=str(exc),
line=1,
column=1,
source_line=script_lines[0] if script_lines else ""
)
@classmethod
def _handle_unexpected_token(cls, exc: UnexpectedToken, script_lines: List[str]) -> ErrorDetail:
"""Handle UnexpectedToken errors"""
line = exc.line
column = exc.column
# Get context lines
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
line_before = script_lines[line - 2] if line > 1 and line <= len(script_lines) + 1 else None
line_after = script_lines[line] if 0 < line < len(script_lines) else None
# Determine error type and suggestions
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
code = cls.ERROR_CODES["missing_then"]
message = "Missing 'THEN' keyword after IF condition"
suggestions = [
Suggestion(
"Add 'THEN' after the condition",
source_line.replace("CLICK", "THEN CLICK") if source_line else None
)
]
elif exc.token.type == '$END':
code = cls.ERROR_CODES["missing_endproc"]
message = "Unexpected end of script"
suggestions = [
Suggestion("Check for missing ENDPROC"),
Suggestion("Ensure all procedures are properly closed")
]
elif 'RPAR' in str(exc.expected):
code = cls.ERROR_CODES["missing_paren"]
message = "Missing closing parenthesis ')'"
suggestions = [
Suggestion("Add closing parenthesis at the end of the condition")
]
elif 'COMMA' in str(exc.expected):
code = cls.ERROR_CODES["missing_comma"]
message = "Missing comma ',' in command"
suggestions = [
Suggestion("Add comma between arguments")
]
else:
# Check if this might be missing backticks
if exc.token.type == 'NAME' and 'BACKTICK_STRING' in str(exc.expected):
code = cls.ERROR_CODES["missing_backticks"]
message = "Selector must be wrapped in backticks"
suggestions = [
Suggestion(
"Wrap the selector in backticks",
f"`{exc.token.value}`"
)
]
else:
code = cls.ERROR_CODES["syntax_error"]
message = f"Unexpected '{exc.token.value}'"
if exc.expected:
expected_list = [str(e) for e in exc.expected if not str(e).startswith('_')][:3]
if expected_list:
message += f". Expected: {', '.join(expected_list)}"
suggestions = []
return ErrorDetail(
type=ErrorType.SYNTAX,
code=code,
severity=Severity.ERROR,
message=message,
line=line,
column=column,
source_line=source_line,
line_before=line_before,
line_after=line_after,
suggestions=suggestions
)
@classmethod
def _handle_unexpected_chars(cls, exc: UnexpectedCharacters, script_lines: List[str]) -> ErrorDetail:
"""Handle UnexpectedCharacters errors"""
line = exc.line
column = exc.column
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
# Check for missing backticks
if "CLICK" in source_line and column > source_line.find("CLICK"):
code = cls.ERROR_CODES["missing_backticks"]
message = "Selector must be wrapped in backticks"
suggestions = [
Suggestion(
"Wrap the selector in backticks",
re.sub(r'CLICK\s+([^\s]+)', r'CLICK `\1`', source_line)
)
]
else:
code = cls.ERROR_CODES["syntax_error"]
message = f"Invalid character at position {column}"
suggestions = []
return ErrorDetail(
type=ErrorType.SYNTAX,
code=code,
severity=Severity.ERROR,
message=message,
line=line,
column=column,
source_line=source_line,
suggestions=suggestions
)
@classmethod
def _handle_value_error(cls, exc: ValueError, script_lines: List[str]) -> ErrorDetail:
"""Handle ValueError (runtime errors)"""
message = str(exc)
# Check for undefined procedure
if "Unknown procedure" in message:
proc_match = re.search(r"'([^']+)'", message)
if proc_match:
proc_name = proc_match.group(1)
# Find the line with the procedure call
for i, line in enumerate(script_lines):
if proc_name in line and not line.strip().startswith('PROC'):
return ErrorDetail(
type=ErrorType.RUNTIME,
code=cls.ERROR_CODES["undefined_proc"],
severity=Severity.ERROR,
message=f"Undefined procedure '{proc_name}'",
line=i + 1,
column=line.find(proc_name) + 1,
source_line=line,
suggestions=[
Suggestion(
f"Define the procedure before using it",
f"PROC {proc_name}\n # commands here\nENDPROC"
)
]
)
# Generic runtime error
return ErrorDetail(
type=ErrorType.RUNTIME,
code="E999",
severity=Severity.ERROR,
message=message,
line=1,
column=1,
source_line=script_lines[0] if script_lines else ""
)
@staticmethod
def generate_script(
html: str,
query: str | None = None,
mode: str = "c4a",
llm_config: LLMConfig | None = None,
**completion_kwargs,
) -> str:
"""
One-shot helper that calls the LLM exactly once to convert a
natural-language goal + HTML snippet into either:
1. raw JavaScript (`mode="js"`)
2. Crawl4ai DSL (`mode="c4a"`)
The returned string is guaranteed to be free of markdown wrappers
or explanatory text, ready for direct execution.
"""
if llm_config is None:
llm_config = LLMConfig() # falls back to env vars / defaults
# Build the user chunk
user_prompt = "\n".join(
[
"## GOAL",
"<<goael>>",
(query or "Prepare the page for crawling."),
"<</goal>>",
"",
"## HTML",
"<<html>>",
html[:100000], # guardrail against token blast
"<</html>>",
"",
"## MODE",
mode,
]
)
# Call the LLM with retry/back-off logic
full_prompt = f"{GENERATE_SCRIPT_PROMPT}\n\n{user_prompt}" if mode == "c4a" else f"{GENERATE_JS_SCRIPT_PROMPT}\n\n{user_prompt}"
response = perform_completion_with_backoff(
provider=llm_config.provider,
prompt_with_variables=full_prompt,
api_token=llm_config.api_token,
json_response=False,
base_url=getattr(llm_config, 'base_url', None),
**completion_kwargs,
)
# Extract content from the response
raw_response = response.choices[0].message.content.strip()
# Strip accidental markdown fences (```js … ```)
clean = re.sub(r"^```(?:[a-zA-Z0-9_-]+)?\s*|```$", "", raw_response, flags=re.MULTILINE).strip()
if not clean:
raise RuntimeError("LLM returned empty script.")
return clean
# Convenience functions for direct use
def compile(script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
"""Compile C4A-Script to JavaScript"""
return C4ACompiler.compile(script, root)
def validate(script: Union[str, List[str]]) -> ValidationResult:
"""Validate C4A-Script syntax"""
return C4ACompiler.validate(script)
def compile_file(path: Union[str, pathlib.Path]) -> CompilationResult:
"""Compile C4A-Script file"""
return C4ACompiler.compile_file(path)

View File

@@ -1,219 +0,0 @@
"""
Result classes for C4A-Script compilation
Clean API design with no exceptions
"""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Dict, Any, Optional
import json
class ErrorType(Enum):
SYNTAX = "syntax"
SEMANTIC = "semantic"
RUNTIME = "runtime"
class Severity(Enum):
ERROR = "error"
WARNING = "warning"
INFO = "info"
@dataclass
class Suggestion:
"""A suggestion for fixing an error"""
message: str
fix: Optional[str] = None
def to_dict(self) -> dict:
return {
"message": self.message,
"fix": self.fix
}
@dataclass
class ErrorDetail:
"""Detailed information about a compilation error"""
# Core info
type: ErrorType
code: str # E001, E002, etc.
severity: Severity
message: str
# Location
line: int
column: int
# Context
source_line: str
# Optional fields with defaults
end_line: Optional[int] = None
end_column: Optional[int] = None
line_before: Optional[str] = None
line_after: Optional[str] = None
# Help
suggestions: List[Suggestion] = field(default_factory=list)
documentation_url: Optional[str] = None
def to_dict(self) -> dict:
"""Convert to dictionary for JSON serialization"""
return {
"type": self.type.value,
"code": self.code,
"severity": self.severity.value,
"message": self.message,
"location": {
"line": self.line,
"column": self.column,
"endLine": self.end_line,
"endColumn": self.end_column
},
"context": {
"sourceLine": self.source_line,
"lineBefore": self.line_before,
"lineAfter": self.line_after,
"marker": {
"start": self.column - 1,
"length": (self.end_column - self.column) if self.end_column else 1
}
},
"suggestions": [s.to_dict() for s in self.suggestions],
"documentationUrl": self.documentation_url
}
def to_json(self) -> str:
"""Convert to JSON string"""
return json.dumps(self.to_dict(), indent=2)
@property
def formatted_message(self) -> str:
"""Returns the nice text format for terminals"""
lines = []
lines.append(f"\n{'='*60}")
lines.append(f"{self.type.value.title()} Error [{self.code}]")
lines.append(f"{'='*60}")
lines.append(f"Location: Line {self.line}, Column {self.column}")
lines.append(f"Error: {self.message}")
if self.source_line:
marker = " " * (self.column - 1) + "^"
if self.end_column:
marker += "~" * (self.end_column - self.column - 1)
lines.append(f"\nCode:")
if self.line_before:
lines.append(f" {self.line - 1: >3} | {self.line_before}")
lines.append(f" {self.line: >3} | {self.source_line}")
lines.append(f" | {marker}")
if self.line_after:
lines.append(f" {self.line + 1: >3} | {self.line_after}")
if self.suggestions:
lines.append("\nSuggestions:")
for i, suggestion in enumerate(self.suggestions, 1):
lines.append(f" {i}. {suggestion.message}")
if suggestion.fix:
lines.append(f" Fix: {suggestion.fix}")
lines.append("="*60)
return "\n".join(lines)
@property
def simple_message(self) -> str:
"""Returns just the error message without formatting"""
return f"Line {self.line}: {self.message}"
@dataclass
class WarningDetail:
"""Information about a compilation warning"""
code: str
message: str
line: int
column: int
def to_dict(self) -> dict:
return {
"code": self.code,
"message": self.message,
"line": self.line,
"column": self.column
}
@dataclass
class CompilationResult:
"""Result of C4A-Script compilation"""
success: bool
js_code: Optional[List[str]] = None
errors: List[ErrorDetail] = field(default_factory=list)
warnings: List[WarningDetail] = field(default_factory=list)
metadata: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> dict:
"""Convert to dictionary for JSON serialization"""
return {
"success": self.success,
"jsCode": self.js_code,
"errors": [e.to_dict() for e in self.errors],
"warnings": [w.to_dict() for w in self.warnings],
"metadata": self.metadata
}
def to_json(self) -> str:
"""Convert to JSON string"""
return json.dumps(self.to_dict(), indent=2)
@property
def has_errors(self) -> bool:
"""Check if there are any errors"""
return len(self.errors) > 0
@property
def has_warnings(self) -> bool:
"""Check if there are any warnings"""
return len(self.warnings) > 0
@property
def first_error(self) -> Optional[ErrorDetail]:
"""Get the first error if any"""
return self.errors[0] if self.errors else None
def __str__(self) -> str:
"""String representation for debugging"""
if self.success:
msg = f"✓ Compilation successful"
if self.js_code:
msg += f" - {len(self.js_code)} statements generated"
if self.warnings:
msg += f" ({len(self.warnings)} warnings)"
return msg
else:
return f"✗ Compilation failed - {len(self.errors)} error(s)"
@dataclass
class ValidationResult:
"""Result of script validation"""
valid: bool
errors: List[ErrorDetail] = field(default_factory=list)
warnings: List[WarningDetail] = field(default_factory=list)
def to_dict(self) -> dict:
return {
"valid": self.valid,
"errors": [e.to_dict() for e in self.errors],
"warnings": [w.to_dict() for w in self.warnings]
}
def to_json(self) -> str:
return json.dumps(self.to_dict(), indent=2)
@property
def first_error(self) -> Optional[ErrorDetail]:
return self.errors[0] if self.errors else None

View File

@@ -1,690 +0,0 @@
"""
2025-06-03
By Unclcode:
C4A-Script Language Documentation
Feeds Crawl4AI via CrawlerRunConfig(js_code=[ ... ]) no core modifications.
"""
from __future__ import annotations
import pathlib, re, sys, textwrap
from dataclasses import dataclass
from typing import Any, Dict, List, Union
from lark import Lark, Transformer, v_args
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
# --------------------------------------------------------------------------- #
# Custom Error Classes
# --------------------------------------------------------------------------- #
class C4AScriptError(Exception):
"""Custom error class for C4A-Script compilation errors"""
def __init__(self, message: str, line: int = None, column: int = None,
error_type: str = "Syntax Error", details: str = None):
self.message = message
self.line = line
self.column = column
self.error_type = error_type
self.details = details
super().__init__(self._format_message())
def _format_message(self) -> str:
"""Format a clear error message"""
lines = [f"\n{'='*60}"]
lines.append(f"C4A-Script {self.error_type}")
lines.append(f"{'='*60}")
if self.line:
lines.append(f"Location: Line {self.line}" + (f", Column {self.column}" if self.column else ""))
lines.append(f"Error: {self.message}")
if self.details:
lines.append(f"\nDetails: {self.details}")
lines.append("="*60)
return "\n".join(lines)
@classmethod
def from_exception(cls, exc: Exception, script: Union[str, List[str]]) -> 'C4AScriptError':
"""Create C4AScriptError from another exception"""
script_text = script if isinstance(script, str) else '\n'.join(script)
script_lines = script_text.split('\n')
if isinstance(exc, UnexpectedToken):
# Extract line and column from UnexpectedToken
line = exc.line
column = exc.column
# Get the problematic line
if 0 < line <= len(script_lines):
problem_line = script_lines[line - 1]
marker = " " * (column - 1) + "^"
details = f"\nCode:\n {problem_line}\n {marker}\n"
# Improve error message based on context
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
message = "Missing 'THEN' keyword after IF condition"
elif exc.token.type == '$END':
message = "Unexpected end of script. Check for missing ENDPROC or incomplete commands"
elif 'RPAR' in str(exc.expected):
message = "Missing closing parenthesis ')'"
elif 'COMMA' in str(exc.expected):
message = "Missing comma ',' in command"
else:
message = f"Unexpected '{exc.token}'"
if exc.expected:
expected_list = [str(e) for e in exc.expected if not e.startswith('_')]
if expected_list:
message += f". Expected: {', '.join(expected_list[:3])}"
details += f"Token: {exc.token.type} ('{exc.token.value}')"
else:
message = str(exc)
details = None
return cls(message, line, column, "Syntax Error", details)
elif isinstance(exc, UnexpectedCharacters):
# Extract line and column
line = exc.line
column = exc.column
if 0 < line <= len(script_lines):
problem_line = script_lines[line - 1]
marker = " " * (column - 1) + "^"
details = f"\nCode:\n {problem_line}\n {marker}\n"
message = f"Invalid character or unexpected text at position {column}"
else:
message = str(exc)
details = None
return cls(message, line, column, "Syntax Error", details)
elif isinstance(exc, ValueError):
# Handle runtime errors like undefined procedures
message = str(exc)
# Try to find which line caused the error
if "Unknown procedure" in message:
proc_name = re.search(r"'([^']+)'", message)
if proc_name:
proc_name = proc_name.group(1)
for i, line in enumerate(script_lines, 1):
if proc_name in line and not line.strip().startswith('PROC'):
details = f"\nCode:\n {line.strip()}\n\nMake sure the procedure '{proc_name}' is defined with PROC...ENDPROC"
return cls(f"Undefined procedure '{proc_name}'", i, None, "Runtime Error", details)
return cls(message, None, None, "Runtime Error", None)
else:
# Generic error
return cls(str(exc), None, None, "Compilation Error", None)
# --------------------------------------------------------------------------- #
# 1. Grammar
# --------------------------------------------------------------------------- #
GRAMMAR = r"""
start : line*
?line : command | proc_def | include | comment
command : wait | nav | click_cmd | double_click | right_click | move | drag | scroll
| type | clear | set_input | press | key_down | key_up
| eval_cmd | setvar | proc_call | if_cmd | repeat_cmd
wait : "WAIT" (ESCAPED_STRING|BACKTICK_STRING|NUMBER) NUMBER? -> wait_cmd
nav : "GO" URL -> go
| "RELOAD" -> reload
| "BACK" -> back
| "FORWARD" -> forward
click_cmd : "CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> click
double_click : "DOUBLE_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> double_click
right_click : "RIGHT_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> right_click
move : "MOVE" coords -> move
drag : "DRAG" coords coords -> drag
scroll : "SCROLL" DIR NUMBER? -> scroll
type : "TYPE" (ESCAPED_STRING | NAME) -> type
clear : "CLEAR" BACKTICK_STRING -> clear
set_input : "SET" BACKTICK_STRING (ESCAPED_STRING | BACKTICK_STRING | NAME) -> set_input
press : "PRESS" WORD -> press
key_down : "KEY_DOWN" WORD -> key_down
key_up : "KEY_UP" WORD -> key_up
eval_cmd : "EVAL" BACKTICK_STRING -> eval_cmd
setvar : "SETVAR" NAME "=" value -> setvar
proc_call : NAME -> proc_call
proc_def : "PROC" NAME line* "ENDPROC" -> proc_def
include : "USE" ESCAPED_STRING -> include
comment : /#.*/ -> comment
if_cmd : "IF" "(" condition ")" "THEN" command ("ELSE" command)? -> if_cmd
repeat_cmd : "REPEAT" "(" command "," repeat_count ")" -> repeat_cmd
condition : not_cond | exists_cond | js_cond
not_cond : "NOT" condition -> not_cond
exists_cond : "EXISTS" BACKTICK_STRING -> exists_cond
js_cond : BACKTICK_STRING -> js_cond
repeat_count : NUMBER | BACKTICK_STRING
coords : NUMBER NUMBER
value : ESCAPED_STRING | BACKTICK_STRING | NUMBER
DIR : /(UP|DOWN|LEFT|RIGHT)/i
REST : /[^\n]+/
URL : /(http|https):\/\/[^\s]+/
NAME : /\$?[A-Za-z_][A-Za-z0-9_]*/
WORD : /[A-Za-z0-9+]+/
BACKTICK_STRING : /`[^`]*`/
%import common.NUMBER
%import common.ESCAPED_STRING
%import common.WS_INLINE
%import common.NEWLINE
%ignore WS_INLINE
%ignore NEWLINE
"""
# --------------------------------------------------------------------------- #
# 2. IR dataclasses
# --------------------------------------------------------------------------- #
@dataclass
class Cmd:
op: str
args: List[Any]
@dataclass
class Proc:
name: str
body: List[Cmd]
# --------------------------------------------------------------------------- #
# 3. AST → IR
# --------------------------------------------------------------------------- #
@v_args(inline=True)
class ASTBuilder(Transformer):
# helpers
def _strip(self, s):
if s.startswith('"') and s.endswith('"'):
return s[1:-1]
elif s.startswith('`') and s.endswith('`'):
return s[1:-1]
return s
def start(self,*i): return list(i)
def line(self,i): return i
def command(self,i): return i
# WAIT
def wait_cmd(self, rest, timeout=None):
rest_str = str(rest)
# Check if it's a number (including floats)
try:
num_val = float(rest_str)
payload = (num_val, "seconds")
except ValueError:
if rest_str.startswith('"') and rest_str.endswith('"'):
payload = (self._strip(rest_str), "text")
elif rest_str.startswith('`') and rest_str.endswith('`'):
payload = (self._strip(rest_str), "selector")
else:
payload = (rest_str, "selector")
return Cmd("WAIT", [payload, int(timeout) if timeout else None])
# NAV
def go(self,u): return Cmd("GO",[str(u)])
def reload(self): return Cmd("RELOAD",[])
def back(self): return Cmd("BACK",[])
def forward(self): return Cmd("FORWARD",[])
# CLICK, DOUBLE_CLICK, RIGHT_CLICK
def click(self, *args):
return self._handle_click("CLICK", args)
def double_click(self, *args):
return self._handle_click("DBLCLICK", args)
def right_click(self, *args):
return self._handle_click("RIGHTCLICK", args)
def _handle_click(self, op, args):
if len(args) == 1:
# Single argument - backtick string
target = self._strip(str(args[0]))
return Cmd(op, [("selector", target)])
else:
# Two arguments - coordinates
x, y = args
return Cmd(op, [("coords", int(x), int(y))])
# MOVE / DRAG / SCROLL
def coords(self,x,y): return ("coords",int(x),int(y))
def move(self,c): return Cmd("MOVE",[c])
def drag(self,c1,c2): return Cmd("DRAG",[c1,c2])
def scroll(self,dir_tok,amt=None):
return Cmd("SCROLL",[dir_tok.upper(), int(amt) if amt else 500])
# KEYS
def type(self,tok): return Cmd("TYPE",[self._strip(str(tok))])
def clear(self,sel): return Cmd("CLEAR",[self._strip(str(sel))])
def set_input(self,sel,val): return Cmd("SET",[self._strip(str(sel)), self._strip(str(val))])
def press(self,w): return Cmd("PRESS",[str(w)])
def key_down(self,w): return Cmd("KEYDOWN",[str(w)])
def key_up(self,w): return Cmd("KEYUP",[str(w)])
# FLOW
def eval_cmd(self,txt): return Cmd("EVAL",[self._strip(str(txt))])
def setvar(self,n,v):
# v might be a Token or a Tree, extract value properly
if hasattr(v, 'value'):
value = v.value
elif hasattr(v, 'children') and len(v.children) > 0:
value = v.children[0].value
else:
value = str(v)
return Cmd("SETVAR",[str(n), self._strip(value)])
def proc_call(self,n): return Cmd("CALL",[str(n)])
def proc_def(self,n,*body): return Proc(str(n),[b for b in body if isinstance(b,Cmd)])
def include(self,p): return Cmd("INCLUDE",[self._strip(p)])
def comment(self,*_): return Cmd("NOP",[])
# IF-THEN-ELSE and EXISTS
def if_cmd(self, condition, then_cmd, else_cmd=None):
return Cmd("IF", [condition, then_cmd, else_cmd])
def condition(self, cond):
return cond
def not_cond(self, cond):
return ("NOT", cond)
def exists_cond(self, selector):
return ("EXISTS", self._strip(str(selector)))
def js_cond(self, expr):
return ("JS", self._strip(str(expr)))
# REPEAT
def repeat_cmd(self, cmd, count):
return Cmd("REPEAT", [cmd, count])
def repeat_count(self, value):
return str(value)
# --------------------------------------------------------------------------- #
# 4. Compiler
# --------------------------------------------------------------------------- #
class Compiler:
def __init__(self, root: pathlib.Path|None=None):
self.parser = Lark(GRAMMAR,start="start",parser="lalr")
self.root = pathlib.Path(root or ".").resolve()
self.vars: Dict[str,Any] = {}
self.procs: Dict[str,Proc]= {}
def compile(self, text: Union[str, List[str]]) -> List[str]:
# Handle list input by joining with newlines
if isinstance(text, list):
text = '\n'.join(text)
ir = self._parse_with_includes(text)
ir = self._collect_procs(ir)
ir = self._inline_calls(ir)
ir = self._apply_set_vars(ir)
return [self._emit_js(c) for c in ir if isinstance(c,Cmd) and c.op!="NOP"]
# passes
def _parse_with_includes(self,txt,seen=None):
seen=seen or set()
cmds=ASTBuilder().transform(self.parser.parse(txt))
out=[]
for c in cmds:
if isinstance(c,Cmd) and c.op=="INCLUDE":
p=(self.root/c.args[0]).resolve()
if p in seen: raise ValueError(f"Circular include {p}")
seen.add(p); out+=self._parse_with_includes(p.read_text(),seen)
else: out.append(c)
return out
def _collect_procs(self,ir):
out=[]
for i in ir:
if isinstance(i,Proc): self.procs[i.name]=i
else: out.append(i)
return out
def _inline_calls(self,ir):
out=[]
for c in ir:
if isinstance(c,Cmd) and c.op=="CALL":
if c.args[0] not in self.procs:
raise ValueError(f"Unknown procedure {c.args[0]!r}")
out+=self._inline_calls(self.procs[c.args[0]].body)
else: out.append(c)
return out
def _apply_set_vars(self,ir):
def sub(s): return re.sub(r"\$(\w+)",lambda m:str(self.vars.get(m.group(1),m.group(0))) ,s) if isinstance(s,str) else s
out=[]
for c in ir:
if isinstance(c,Cmd):
if c.op=="SETVAR":
# Store variable
self.vars[c.args[0].lstrip('$')]=c.args[1]
else:
# Apply variable substitution to commands that use them
if c.op in("TYPE","EVAL","SET"): c.args=[sub(a) for a in c.args]
out.append(c)
return out
# JS emitter
def _emit_js(self, cmd: Cmd) -> str:
op, a = cmd.op, cmd.args
if op == "GO": return f"window.location.href = '{a[0]}';"
if op == "RELOAD": return "window.location.reload();"
if op == "BACK": return "window.history.back();"
if op == "FORWARD": return "window.history.forward();"
if op == "WAIT":
arg, kind = a[0]
timeout = a[1] or 10
if kind == "seconds":
return f"await new Promise(r=>setTimeout(r,{arg}*1000));"
if kind == "selector":
sel = arg.replace("\\","\\\\").replace("'","\\'")
return textwrap.dedent(f"""
await new Promise((res,rej)=>{{
const max = {timeout*1000}, t0 = performance.now();
const id = setInterval(()=>{{
if(document.querySelector('{sel}')){{clearInterval(id);res();}}
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT selector timeout');}}
}},100);
}});
""").strip()
if kind == "text":
txt = arg.replace('`', '\\`')
return textwrap.dedent(f"""
await new Promise((res,rej)=>{{
const max={timeout*1000},t0=performance.now();
const id=setInterval(()=>{{
if(document.body.innerText.includes(`{txt}`)){{clearInterval(id);res();}}
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT text timeout');}}
}},100);
}});
""").strip()
# click-style helpers
def _js_click(sel, evt="click", button=0, detail=1):
sel = sel.replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.querySelector('{sel}');
if(el){{
el.focus&&el.focus();
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
}}
}})();
""").strip()
def _js_click_xy(x, y, evt="click", button=0, detail=1):
return textwrap.dedent(f"""
(()=>{{
const el=document.elementFromPoint({x},{y});
if(el){{
el.focus&&el.focus();
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
}}
}})();
""").strip()
if op in ("CLICK", "DBLCLICK", "RIGHTCLICK"):
evt = {"CLICK":"click","DBLCLICK":"dblclick","RIGHTCLICK":"contextmenu"}[op]
btn = 2 if op=="RIGHTCLICK" else 0
det = 2 if op=="DBLCLICK" else 1
kind,*rest = a[0]
return _js_click_xy(*rest) if kind=="coords" else _js_click(rest[0],evt,btn,det)
if op == "MOVE":
_, x, y = a[0]
return textwrap.dedent(f"""
document.dispatchEvent(new MouseEvent('mousemove',{{clientX:{x},clientY:{y},bubbles:true}}));
""").strip()
if op == "DRAG":
(_, x1, y1), (_, x2, y2) = a
return textwrap.dedent(f"""
(()=>{{
const s=document.elementFromPoint({x1},{y1});
if(!s) return;
s.dispatchEvent(new MouseEvent('mousedown',{{bubbles:true,clientX:{x1},clientY:{y1}}}));
document.dispatchEvent(new MouseEvent('mousemove',{{bubbles:true,clientX:{x2},clientY:{y2}}}));
document.dispatchEvent(new MouseEvent('mouseup', {{bubbles:true,clientX:{x2},clientY:{y2}}}));
}})();
""").strip()
if op == "SCROLL":
dir_, amt = a
dx, dy = {"UP":(0,-amt),"DOWN":(0,amt),"LEFT":(-amt,0),"RIGHT":(amt,0)}[dir_]
return f"window.scrollBy({dx},{dy});"
if op == "TYPE":
txt = a[0].replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.activeElement;
if(el){{
el.value += '{txt}';
el.dispatchEvent(new Event('input',{{bubbles:true}}));
}}
}})();
""").strip()
if op == "CLEAR":
sel = a[0].replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.querySelector('{sel}');
if(el && 'value' in el){{
el.value = '';
el.dispatchEvent(new Event('input',{{bubbles:true}}));
el.dispatchEvent(new Event('change',{{bubbles:true}}));
}}
}})();
""").strip()
if op == "SET" and len(a) == 2:
# This is SET for input fields (SET `#field` "value")
sel = a[0].replace("'", "\\'")
val = a[1].replace("'", "\\'")
return textwrap.dedent(f"""
(()=>{{
const el=document.querySelector('{sel}');
if(el && 'value' in el){{
el.value = '';
el.focus&&el.focus();
el.value = '{val}';
el.dispatchEvent(new Event('input',{{bubbles:true}}));
el.dispatchEvent(new Event('change',{{bubbles:true}}));
}}
}})();
""").strip()
if op in ("PRESS","KEYDOWN","KEYUP"):
key = a[0]
evs = {"PRESS":("keydown","keyup"),"KEYDOWN":("keydown",),"KEYUP":("keyup",)}[op]
return ";".join([f"document.dispatchEvent(new KeyboardEvent('{e}',{{key:'{key}',bubbles:true}}))" for e in evs]) + ";"
if op == "EVAL":
return textwrap.dedent(f"""
(()=>{{
try {{
{a[0]};
}} catch (e) {{
console.error('C4A-Script EVAL error:', e);
}}
}})();
""").strip()
if op == "IF":
condition, then_cmd, else_cmd = a
# Generate condition JavaScript
js_condition = self._emit_condition(condition)
# Generate commands - handle both regular commands and procedure calls
then_js = self._handle_cmd_or_proc(then_cmd)
else_js = self._handle_cmd_or_proc(else_cmd) if else_cmd else ""
if else_cmd:
return textwrap.dedent(f"""
if ({js_condition}) {{
{then_js}
}} else {{
{else_js}
}}
""").strip()
else:
return textwrap.dedent(f"""
if ({js_condition}) {{
{then_js}
}}
""").strip()
if op == "REPEAT":
cmd, count = a
# Handle the count - could be number or JS expression
if count.isdigit():
# Simple number
repeat_js = self._handle_cmd_or_proc(cmd)
return textwrap.dedent(f"""
for (let _i = 0; _i < {count}; _i++) {{
{repeat_js}
}}
""").strip()
else:
# JS expression (from backticks)
count_expr = count[1:-1] if count.startswith('`') and count.endswith('`') else count
repeat_js = self._handle_cmd_or_proc(cmd)
return textwrap.dedent(f"""
(()=>{{
const _count = {count_expr};
if (typeof _count === 'number') {{
for (let _i = 0; _i < _count; _i++) {{
{repeat_js}
}}
}} else if (_count) {{
{repeat_js}
}}
}})();
""").strip()
raise ValueError(f"Unhandled op {op}")
def _emit_condition(self, condition):
"""Convert a condition tuple to JavaScript"""
cond_type = condition[0]
if cond_type == "EXISTS":
return f"!!document.querySelector('{condition[1]}')"
elif cond_type == "NOT":
# Recursively handle the negated condition
inner_condition = self._emit_condition(condition[1])
return f"!({inner_condition})"
else: # JS condition
return condition[1]
def _handle_cmd_or_proc(self, cmd):
"""Handle a command that might be a regular command or a procedure call"""
if not cmd:
return ""
if isinstance(cmd, Cmd):
if cmd.op == "CALL":
# Inline the procedure
if cmd.args[0] not in self.procs:
raise ValueError(f"Unknown procedure {cmd.args[0]!r}")
proc_body = self.procs[cmd.args[0]].body
return "\n".join([self._emit_js(c) for c in proc_body if c.op != "NOP"])
else:
return self._emit_js(cmd)
return ""
# --------------------------------------------------------------------------- #
# 5. Helpers + demo
# --------------------------------------------------------------------------- #
def compile_string(script: Union[str, List[str]], *, root: Union[pathlib.Path, None] = None) -> List[str]:
"""Compile C4A-Script from string or list of strings to JavaScript.
Args:
script: C4A-Script as a string or list of command strings
root: Root directory for resolving includes (optional)
Returns:
List of JavaScript command strings
Raises:
C4AScriptError: When compilation fails with detailed error information
"""
try:
return Compiler(root).compile(script)
except Exception as e:
# Wrap the error with better formatting
raise C4AScriptError.from_exception(e, script)
def compile_file(path: pathlib.Path) -> List[str]:
"""Compile C4A-Script from file to JavaScript.
Args:
path: Path to C4A-Script file
Returns:
List of JavaScript command strings
"""
return compile_string(path.read_text(), root=path.parent)
def compile_lines(lines: List[str], *, root: Union[pathlib.Path, None] = None) -> List[str]:
"""Compile C4A-Script from list of lines to JavaScript.
Args:
lines: List of C4A-Script command lines
root: Root directory for resolving includes (optional)
Returns:
List of JavaScript command strings
"""
return compile_string(lines, root=root)
DEMO = """
# quick sanity demo
PROC login
SET `input[name="username"]` $user
SET `input[name="password"]` $pass
CLICK `button.submit`
ENDPROC
SETVAR user = "tom@crawl4ai.com"
SETVAR pass = "hunter2"
GO https://example.com/login
WAIT `input[name="username"]` 10
login
WAIT 3
EVAL `console.log('logged in')`
"""
if __name__ == "__main__":
if len(sys.argv) == 2:
for js in compile_file(pathlib.Path(sys.argv[1])):
print(js)
else:
print("=== DEMO ===")
for js in compile_string(DEMO):
print(js)

View File

@@ -9,44 +9,83 @@ from urllib.parse import urlparse
import OpenSSL.crypto
from pathlib import Path
# === Inherit from dict ===
class SSLCertificate(dict):
"""
A class representing an SSL certificate, behaving like a dictionary
for direct JSON serialization. It stores the certificate information internally
and provides methods for export and property access.
Inherits from dict, so instances are directly JSON serializable.
class SSLCertificate:
"""
A class representing an SSL certificate with methods to export in various formats.
# Use __slots__ for potential memory optimization if desired, though less common when inheriting dict
# __slots__ = ("_cert_info",) # If using slots, be careful with dict inheritance interaction
Attributes:
cert_info (Dict[str, Any]): The certificate information.
Methods:
from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']: Create SSLCertificate instance from a URL.
from_file(file_path: str) -> Optional['SSLCertificate']: Create SSLCertificate instance from a file.
from_binary(binary_data: bytes) -> Optional['SSLCertificate']: Create SSLCertificate instance from binary data.
export_as_pem() -> str: Export the certificate as PEM format.
export_as_der() -> bytes: Export the certificate as DER format.
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
export_as_text() -> str: Export the certificate as text format.
"""
def __init__(self, cert_info: Dict[str, Any]):
self._cert_info = self._decode_cert_data(cert_info)
@staticmethod
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
"""
Initializes the SSLCertificate object.
Create SSLCertificate instance from a URL.
Args:
cert_info (Dict[str, Any]): The raw certificate dictionary.
url (str): URL of the website.
timeout (int): Timeout for the connection (default: 10).
Returns:
Optional[SSLCertificate]: SSLCertificate instance if successful, None otherwise.
"""
# 1. Decode the data (handle bytes -> str)
decoded_info = self._decode_cert_data(cert_info)
try:
hostname = urlparse(url).netloc
if ":" in hostname:
hostname = hostname.split(":")[0]
# 2. Store the decoded info internally (optional but good practice)
# self._cert_info = decoded_info # You can keep this if methods rely on it
context = ssl.create_default_context()
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
cert_binary = ssock.getpeercert(binary_form=True)
x509 = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
)
# 3. Initialize the dictionary part of the object with the decoded data
super().__init__(decoded_info)
cert_info = {
"subject": dict(x509.get_subject().get_components()),
"issuer": dict(x509.get_issuer().get_components()),
"version": x509.get_version(),
"serial_number": hex(x509.get_serial_number()),
"not_before": x509.get_notBefore(),
"not_after": x509.get_notAfter(),
"fingerprint": x509.digest("sha256").hex(),
"signature_algorithm": x509.get_signature_algorithm(),
"raw_cert": base64.b64encode(cert_binary),
}
# Add extensions
extensions = []
for i in range(x509.get_extension_count()):
ext = x509.get_extension(i)
extensions.append(
{"name": ext.get_short_name(), "value": str(ext)}
)
cert_info["extensions"] = extensions
return SSLCertificate(cert_info)
except Exception:
return None
@staticmethod
def _decode_cert_data(data: Any) -> Any:
"""Helper method to decode bytes in certificate data."""
if isinstance(data, bytes):
try:
# Try UTF-8 first, fallback to latin-1 for arbitrary bytes
return data.decode("utf-8")
except UnicodeDecodeError:
return data.decode("latin-1") # Or handle as needed, maybe hex representation
return data.decode("utf-8")
elif isinstance(data, dict):
return {
(
@@ -58,119 +97,36 @@ class SSLCertificate(dict):
return [SSLCertificate._decode_cert_data(item) for item in data]
return data
@staticmethod
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
"""
Create SSLCertificate instance from a URL. Fetches cert info and initializes.
(Fetching logic remains the same)
"""
cert_info_raw = None # Variable to hold the fetched dict
try:
hostname = urlparse(url).netloc
if ":" in hostname:
hostname = hostname.split(":")[0]
context = ssl.create_default_context()
# Set check_hostname to False and verify_mode to CERT_NONE temporarily
# for potentially problematic certificates during fetch, but parse the result regardless.
# context.check_hostname = False
# context.verify_mode = ssl.CERT_NONE
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
cert_binary = ssock.getpeercert(binary_form=True)
if not cert_binary:
print(f"Warning: No certificate returned for {hostname}")
return None
x509 = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
)
# Create the dictionary directly
cert_info_raw = {
"subject": dict(x509.get_subject().get_components()),
"issuer": dict(x509.get_issuer().get_components()),
"version": x509.get_version(),
"serial_number": hex(x509.get_serial_number()),
"not_before": x509.get_notBefore(), # Keep as bytes initially, _decode handles it
"not_after": x509.get_notAfter(), # Keep as bytes initially
"fingerprint": x509.digest("sha256").hex(), # hex() is already string
"signature_algorithm": x509.get_signature_algorithm(), # Keep as bytes
"raw_cert": base64.b64encode(cert_binary), # Base64 is bytes, _decode handles it
}
# Add extensions
extensions = []
for i in range(x509.get_extension_count()):
ext = x509.get_extension(i)
# get_short_name() returns bytes, str(ext) handles value conversion
extensions.append(
{"name": ext.get_short_name(), "value": str(ext)}
)
cert_info_raw["extensions"] = extensions
except ssl.SSLCertVerificationError as e:
print(f"SSL Verification Error for {url}: {e}")
# Decide if you want to proceed or return None based on your needs
# You might try fetching without verification here if needed, but be cautious.
return None
except socket.gaierror:
print(f"Could not resolve hostname: {hostname}")
return None
except socket.timeout:
print(f"Connection timed out for {url}")
return None
except Exception as e:
print(f"Error fetching/processing certificate for {url}: {e}")
# Log the full error details if needed: logging.exception("Cert fetch error")
return None
# If successful, create the SSLCertificate instance from the dictionary
if cert_info_raw:
return SSLCertificate(cert_info_raw)
else:
return None
# --- Properties now access the dictionary items directly via self[] ---
@property
def issuer(self) -> Dict[str, str]:
return self.get("issuer", {}) # Use self.get for safety
@property
def subject(self) -> Dict[str, str]:
return self.get("subject", {})
@property
def valid_from(self) -> str:
return self.get("not_before", "")
@property
def valid_until(self) -> str:
return self.get("not_after", "")
@property
def fingerprint(self) -> str:
return self.get("fingerprint", "")
# --- Export methods can use `self` directly as it is the dict ---
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
"""Export certificate as JSON."""
# `self` is already the dictionary we want to serialize
json_str = json.dumps(self, indent=2, ensure_ascii=False)
"""
Export certificate as JSON.
Args:
filepath (Optional[str]): Path to save the JSON file (default: None).
Returns:
Optional[str]: JSON string if successful, None otherwise.
"""
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
if filepath:
Path(filepath).write_text(json_str, encoding="utf-8")
return None
return json_str
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
"""Export certificate as PEM."""
"""
Export certificate as PEM.
Args:
filepath (Optional[str]): Path to save the PEM file (default: None).
Returns:
Optional[str]: PEM string if successful, None otherwise.
"""
try:
# Decode the raw_cert (which should be string due to _decode)
raw_cert_bytes = base64.b64decode(self.get("raw_cert", ""))
x509 = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, raw_cert_bytes
OpenSSL.crypto.FILETYPE_ASN1,
base64.b64decode(self._cert_info["raw_cert"]),
)
pem_data = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, x509
@@ -180,25 +136,49 @@ class SSLCertificate(dict):
Path(filepath).write_text(pem_data, encoding="utf-8")
return None
return pem_data
except Exception as e:
print(f"Error converting to PEM: {e}")
return None
except Exception:
return None
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
"""Export certificate as DER."""
"""
Export certificate as DER.
Args:
filepath (Optional[str]): Path to save the DER file (default: None).
Returns:
Optional[bytes]: DER bytes if successful, None otherwise.
"""
try:
# Decode the raw_cert (which should be string due to _decode)
der_data = base64.b64decode(self.get("raw_cert", ""))
der_data = base64.b64decode(self._cert_info["raw_cert"])
if filepath:
Path(filepath).write_bytes(der_data)
return None
return der_data
except Exception as e:
print(f"Error converting to DER: {e}")
return None
except Exception:
return None
# Optional: Add __repr__ for better debugging
def __repr__(self) -> str:
subject_cn = self.subject.get('CN', 'N/A')
issuer_cn = self.issuer.get('CN', 'N/A')
return f"<SSLCertificate Subject='{subject_cn}' Issuer='{issuer_cn}'>"
@property
def issuer(self) -> Dict[str, str]:
"""Get certificate issuer information."""
return self._cert_info.get("issuer", {})
@property
def subject(self) -> Dict[str, str]:
"""Get certificate subject information."""
return self._cert_info.get("subject", {})
@property
def valid_from(self) -> str:
"""Get certificate validity start date."""
return self._cert_info.get("not_before", "")
@property
def valid_until(self) -> str:
"""Get certificate validity end date."""
return self._cert_info.get("not_after", "")
@property
def fingerprint(self) -> str:
"""Get certificate fingerprint."""
return self._cert_info.get("fingerprint", "")

View File

@@ -1,195 +0,0 @@
from typing import TYPE_CHECKING, Union
# Logger types
AsyncLoggerBase = Union['AsyncLoggerBaseType']
AsyncLogger = Union['AsyncLoggerType']
# Crawler core types
AsyncWebCrawler = Union['AsyncWebCrawlerType']
CacheMode = Union['CacheModeType']
CrawlResult = Union['CrawlResultType']
CrawlerHub = Union['CrawlerHubType']
BrowserProfiler = Union['BrowserProfilerType']
# NEW: Add AsyncUrlSeederType
AsyncUrlSeeder = Union['AsyncUrlSeederType']
# Configuration types
BrowserConfig = Union['BrowserConfigType']
CrawlerRunConfig = Union['CrawlerRunConfigType']
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
LLMConfig = Union['LLMConfigType']
# NEW: Add SeedingConfigType
SeedingConfig = Union['SeedingConfigType']
# Content scraping types
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
# Backward compatibility alias
WebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
# Proxy types
ProxyRotationStrategy = Union['ProxyRotationStrategyType']
RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType']
# Extraction types
ExtractionStrategy = Union['ExtractionStrategyType']
LLMExtractionStrategy = Union['LLMExtractionStrategyType']
CosineStrategy = Union['CosineStrategyType']
JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType']
JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType']
# Chunking types
ChunkingStrategy = Union['ChunkingStrategyType']
RegexChunking = Union['RegexChunkingType']
# Markdown generation types
DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType']
MarkdownGenerationResult = Union['MarkdownGenerationResultType']
# Content filter types
RelevantContentFilter = Union['RelevantContentFilterType']
PruningContentFilter = Union['PruningContentFilterType']
BM25ContentFilter = Union['BM25ContentFilterType']
LLMContentFilter = Union['LLMContentFilterType']
# Dispatcher types
BaseDispatcher = Union['BaseDispatcherType']
MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType']
SemaphoreDispatcher = Union['SemaphoreDispatcherType']
RateLimiter = Union['RateLimiterType']
CrawlerMonitor = Union['CrawlerMonitorType']
DisplayMode = Union['DisplayModeType']
RunManyReturn = Union['RunManyReturnType']
# Docker client
Crawl4aiDockerClient = Union['Crawl4aiDockerClientType']
# Deep crawling types
DeepCrawlStrategy = Union['DeepCrawlStrategyType']
BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType']
FilterChain = Union['FilterChainType']
ContentTypeFilter = Union['ContentTypeFilterType']
DomainFilter = Union['DomainFilterType']
URLFilter = Union['URLFilterType']
FilterStats = Union['FilterStatsType']
SEOFilter = Union['SEOFilterType']
KeywordRelevanceScorer = Union['KeywordRelevanceScorerType']
URLScorer = Union['URLScorerType']
CompositeScorer = Union['CompositeScorerType']
DomainAuthorityScorer = Union['DomainAuthorityScorerType']
FreshnessScorer = Union['FreshnessScorerType']
PathDepthScorer = Union['PathDepthScorerType']
BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType']
DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType']
DeepCrawlDecorator = Union['DeepCrawlDecoratorType']
# Only import types during type checking to avoid circular imports
if TYPE_CHECKING:
# Logger imports
from .async_logger import (
AsyncLoggerBase as AsyncLoggerBaseType,
AsyncLogger as AsyncLoggerType,
)
# Crawler core imports
from .async_webcrawler import (
AsyncWebCrawler as AsyncWebCrawlerType,
CacheMode as CacheModeType,
)
from .models import CrawlResult as CrawlResultType
from .hub import CrawlerHub as CrawlerHubType
from .browser_profiler import BrowserProfiler as BrowserProfilerType
# NEW: Import AsyncUrlSeeder for type checking
from .async_url_seeder import AsyncUrlSeeder as AsyncUrlSeederType
# Configuration imports
from .async_configs import (
BrowserConfig as BrowserConfigType,
CrawlerRunConfig as CrawlerRunConfigType,
HTTPCrawlerConfig as HTTPCrawlerConfigType,
LLMConfig as LLMConfigType,
# NEW: Import SeedingConfig for type checking
SeedingConfig as SeedingConfigType,
)
# Content scraping imports
from .content_scraping_strategy import (
ContentScrapingStrategy as ContentScrapingStrategyType,
LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType,
)
# Proxy imports
from .proxy_strategy import (
ProxyRotationStrategy as ProxyRotationStrategyType,
RoundRobinProxyStrategy as RoundRobinProxyStrategyType,
)
# Extraction imports
from .extraction_strategy import (
ExtractionStrategy as ExtractionStrategyType,
LLMExtractionStrategy as LLMExtractionStrategyType,
CosineStrategy as CosineStrategyType,
JsonCssExtractionStrategy as JsonCssExtractionStrategyType,
JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType,
)
# Chunking imports
from .chunking_strategy import (
ChunkingStrategy as ChunkingStrategyType,
RegexChunking as RegexChunkingType,
)
# Markdown generation imports
from .markdown_generation_strategy import (
DefaultMarkdownGenerator as DefaultMarkdownGeneratorType,
)
from .models import MarkdownGenerationResult as MarkdownGenerationResultType
# Content filter imports
from .content_filter_strategy import (
RelevantContentFilter as RelevantContentFilterType,
PruningContentFilter as PruningContentFilterType,
BM25ContentFilter as BM25ContentFilterType,
LLMContentFilter as LLMContentFilterType,
)
# Dispatcher imports
from .async_dispatcher import (
BaseDispatcher as BaseDispatcherType,
MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType,
SemaphoreDispatcher as SemaphoreDispatcherType,
RateLimiter as RateLimiterType,
CrawlerMonitor as CrawlerMonitorType,
DisplayMode as DisplayModeType,
RunManyReturn as RunManyReturnType,
)
# Docker client
from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType
# Deep crawling imports
from .deep_crawling import (
DeepCrawlStrategy as DeepCrawlStrategyType,
BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType,
FilterChain as FilterChainType,
ContentTypeFilter as ContentTypeFilterType,
DomainFilter as DomainFilterType,
URLFilter as URLFilterType,
FilterStats as FilterStatsType,
SEOFilter as SEOFilterType,
KeywordRelevanceScorer as KeywordRelevanceScorerType,
URLScorer as URLScorerType,
CompositeScorer as CompositeScorerType,
DomainAuthorityScorer as DomainAuthorityScorerType,
FreshnessScorer as FreshnessScorerType,
PathDepthScorer as PathDepthScorerType,
BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType,
DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType,
DeepCrawlDecorator as DeepCrawlDecoratorType,
)
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
from .async_configs import LLMConfig
return LLMConfig(*args, **kwargs)

View File

@@ -3,11 +3,12 @@ from typing import Optional, Literal, List, Dict, Tuple
import re
from abc import ABC, abstractmethod
import random
from fake_useragent import UserAgent
import requests
from lxml import html
import json
from typing import Union
from typing import Optional, List, Union, Dict
class UAGen(ABC):
@abstractmethod

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@ from .extraction_strategy import *
from .crawler_strategy import *
from typing import List
from concurrent.futures import ThreadPoolExecutor
from ..content_scraping_strategy import LXMLWebScrapingStrategy as WebScrapingStrategy
from .content_scraping_strategy import WebScrapingStrategy
from .config import *
import warnings
import json

View File

@@ -1,31 +0,0 @@
# .dockerignore
*
# Allow specific files and directories when using local installation
!crawl4ai/
!docs/
!deploy/docker/
!setup.py
!pyproject.toml
!README.md
!LICENSE
!MANIFEST.in
!setup.cfg
!mkdocs.yml
.git/
__pycache__/
*.pyc
*.pyo
*.pyd
.DS_Store
.env
.venv
venv/
tests/
coverage.xml
*.log
*.swp
*.egg-info/
dist/
build/

View File

@@ -1,13 +0,0 @@
# LLM Provider Keys
OPENAI_API_KEY=your_openai_key_here
DEEPSEEK_API_KEY=your_deepseek_key_here
ANTHROPIC_API_KEY=your_anthropic_key_here
GROQ_API_KEY=your_groq_key_here
TOGETHER_API_KEY=your_together_key_here
MISTRAL_API_KEY=your_mistral_key_here
GEMINI_API_TOKEN=your_gemini_key_here
# Optional: Override the default LLM provider
# Examples: "openai/gpt-4", "anthropic/claude-3-opus", "deepseek/chat", etc.
# If not set, uses the provider specified in config.yml (default: openai/gpt-4o-mini)
# LLM_PROVIDER=anthropic/claude-3-opus

View File

@@ -1,846 +0,0 @@
# Crawl4AI Docker Guide 🐳
## Table of Contents
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Option 1: Using Pre-built Docker Hub Images (Recommended)](#option-1-using-pre-built-docker-hub-images-recommended)
- [Option 2: Using Docker Compose](#option-2-using-docker-compose)
- [Option 3: Manual Local Build & Run](#option-3-manual-local-build--run)
- [Dockerfile Parameters](#dockerfile-parameters)
- [Using the API](#using-the-api)
- [Playground Interface](#playground-interface)
- [Python SDK](#python-sdk)
- [Understanding Request Schema](#understanding-request-schema)
- [REST API Examples](#rest-api-examples)
- [Additional API Endpoints](#additional-api-endpoints)
- [HTML Extraction Endpoint](#html-extraction-endpoint)
- [Screenshot Endpoint](#screenshot-endpoint)
- [PDF Export Endpoint](#pdf-export-endpoint)
- [JavaScript Execution Endpoint](#javascript-execution-endpoint)
- [Library Context Endpoint](#library-context-endpoint)
- [MCP (Model Context Protocol) Support](#mcp-model-context-protocol-support)
- [What is MCP?](#what-is-mcp)
- [Connecting via MCP](#connecting-via-mcp)
- [Using with Claude Code](#using-with-claude-code)
- [Available MCP Tools](#available-mcp-tools)
- [Testing MCP Connections](#testing-mcp-connections)
- [MCP Schemas](#mcp-schemas)
- [Metrics & Monitoring](#metrics--monitoring)
- [Deployment Scenarios](#deployment-scenarios)
- [Complete Examples](#complete-examples)
- [Server Configuration](#server-configuration)
- [Understanding config.yml](#understanding-configyml)
- [JWT Authentication](#jwt-authentication)
- [Configuration Tips and Best Practices](#configuration-tips-and-best-practices)
- [Customizing Your Configuration](#customizing-your-configuration)
- [Configuration Recommendations](#configuration-recommendations)
- [Getting Help](#getting-help)
- [Summary](#summary)
## Prerequisites
Before we dive in, make sure you have:
- Docker installed and running (version 20.10.0 or higher), including `docker compose` (usually bundled with Docker Desktop).
- `git` for cloning the repository.
- At least 4GB of RAM available for the container (more recommended for heavy use).
- Python 3.10+ (if using the Python SDK).
- Node.js 16+ (if using the Node.js examples).
> 💡 **Pro tip**: Run `docker info` to check your Docker installation and available resources.
## Installation
We offer several ways to get the Crawl4AI server running. The quickest way is to use our pre-built Docker Hub images.
### Option 1: Using Pre-built Docker Hub Images (Recommended)
Pull and run images directly from Docker Hub without building locally.
#### 1. Pull the Image
Our latest release candidate is `0.7.0-r1`. Images are built with multi-arch manifests, so Docker automatically pulls the correct version for your system.
> ⚠️ **Important Note**: The `latest` tag currently points to the stable `0.6.0` version. After testing and validation, `0.7.0` (without -r1) will be released and `latest` will be updated. For now, please use `0.7.0-r1` to test the new features.
```bash
# Pull the release candidate (for testing new features)
docker pull unclecode/crawl4ai:0.7.0-r1
# Or pull the current stable version (0.6.0)
docker pull unclecode/crawl4ai:latest
```
#### 2. Setup Environment (API Keys)
If you plan to use LLMs, create a `.llm.env` file in your working directory:
```bash
# Create a .llm.env file with your API keys
cat > .llm.env << EOL
# OpenAI
OPENAI_API_KEY=sk-your-key
# Anthropic
ANTHROPIC_API_KEY=your-anthropic-key
# Other providers as needed
# DEEPSEEK_API_KEY=your-deepseek-key
# GROQ_API_KEY=your-groq-key
# TOGETHER_API_KEY=your-together-key
# MISTRAL_API_KEY=your-mistral-key
# GEMINI_API_TOKEN=your-gemini-token
EOL
```
> 🔑 **Note**: Keep your API keys secure! Never commit `.llm.env` to version control.
#### 3. Run the Container
* **Basic run:**
```bash
docker run -d \
-p 11235:11235 \
--name crawl4ai \
--shm-size=1g \
unclecode/crawl4ai:0.7.0-r1
```
* **With LLM support:**
```bash
# Make sure .llm.env is in the current directory
docker run -d \
-p 11235:11235 \
--name crawl4ai \
--env-file .llm.env \
--shm-size=1g \
unclecode/crawl4ai:0.7.0-r1
```
> The server will be available at `http://localhost:11235`. Visit `/playground` to access the interactive testing interface.
#### 4. Stopping the Container
```bash
docker stop crawl4ai && docker rm crawl4ai
```
#### Docker Hub Versioning Explained
* **Image Name:** `unclecode/crawl4ai`
* **Tag Format:** `LIBRARY_VERSION[-SUFFIX]` (e.g., `0.7.0-r1`)
* `LIBRARY_VERSION`: The semantic version of the core `crawl4ai` Python library
* `SUFFIX`: Optional tag for release candidates (``) and revisions (`r1`)
* **`latest` Tag:** Points to the most recent stable version
* **Multi-Architecture Support:** All images support both `linux/amd64` and `linux/arm64` architectures through a single tag
### Option 2: Using Docker Compose
Docker Compose simplifies building and running the service, especially for local development and testing.
#### 1. Clone Repository
```bash
git clone https://github.com/unclecode/crawl4ai.git
cd crawl4ai
```
#### 2. Environment Setup (API Keys)
If you plan to use LLMs, copy the example environment file and add your API keys. This file should be in the **project root directory**.
```bash
# Make sure you are in the 'crawl4ai' root directory
cp deploy/docker/.llm.env.example .llm.env
# Now edit .llm.env and add your API keys
```
**Flexible LLM Provider Configuration:**
The Docker setup now supports flexible LLM provider configuration through three methods:
1. **Environment Variable** (Highest Priority): Set `LLM_PROVIDER` to override the default
```bash
export LLM_PROVIDER="anthropic/claude-3-opus"
# Or in your .llm.env file:
# LLM_PROVIDER=anthropic/claude-3-opus
```
2. **API Request Parameter**: Specify provider per request
```json
{
"url": "https://example.com",
"provider": "groq/mixtral-8x7b"
}
```
3. **Config File Default**: Falls back to `config.yml` (default: `openai/gpt-4o-mini`)
The system automatically selects the appropriate API key based on the provider.
#### 3. Build and Run with Compose
The `docker-compose.yml` file in the project root provides a simplified approach that automatically handles architecture detection using buildx.
* **Run Pre-built Image from Docker Hub:**
```bash
# Pulls and runs the release candidate from Docker Hub
# Automatically selects the correct architecture
IMAGE=unclecode/crawl4ai:0.7.0-r1 docker compose up -d
```
* **Build and Run Locally:**
```bash
# Builds the image locally using Dockerfile and runs it
# Automatically uses the correct architecture for your machine
docker compose up --build -d
```
* **Customize the Build:**
```bash
# Build with all features (includes torch and transformers)
INSTALL_TYPE=all docker compose up --build -d
# Build with GPU support (for AMD64 platforms)
ENABLE_GPU=true docker compose up --build -d
```
> The server will be available at `http://localhost:11235`.
#### 4. Stopping the Service
```bash
# Stop the service
docker compose down
```
### Option 3: Manual Local Build & Run
If you prefer not to use Docker Compose for direct control over the build and run process.
#### 1. Clone Repository & Setup Environment
Follow steps 1 and 2 from the Docker Compose section above (clone repo, `cd crawl4ai`, create `.llm.env` in the root).
#### 2. Build the Image (Multi-Arch)
Use `docker buildx` to build the image. Crawl4AI now uses buildx to handle multi-architecture builds automatically.
```bash
# Make sure you are in the 'crawl4ai' root directory
# Build for the current architecture and load it into Docker
docker buildx build -t crawl4ai-local:latest --load .
# Or build for multiple architectures (useful for publishing)
docker buildx build --platform linux/amd64,linux/arm64 -t crawl4ai-local:latest --load .
# Build with additional options
docker buildx build \
--build-arg INSTALL_TYPE=all \
--build-arg ENABLE_GPU=false \
-t crawl4ai-local:latest --load .
```
#### 3. Run the Container
* **Basic run (no LLM support):**
```bash
docker run -d \
-p 11235:11235 \
--name crawl4ai-standalone \
--shm-size=1g \
crawl4ai-local:latest
```
* **With LLM support:**
```bash
# Make sure .llm.env is in the current directory (project root)
docker run -d \
-p 11235:11235 \
--name crawl4ai-standalone \
--env-file .llm.env \
--shm-size=1g \
crawl4ai-local:latest
```
> The server will be available at `http://localhost:11235`.
#### 4. Stopping the Manual Container
```bash
docker stop crawl4ai-standalone && docker rm crawl4ai-standalone
```
---
## MCP (Model Context Protocol) Support
Crawl4AI server includes support for the Model Context Protocol (MCP), allowing you to connect the server's capabilities directly to MCP-compatible clients like Claude Code.
### What is MCP?
MCP is an open protocol that standardizes how applications provide context to LLMs. It allows AI models to access external tools, data sources, and services through a standardized interface.
### Connecting via MCP
The Crawl4AI server exposes two MCP endpoints:
- **Server-Sent Events (SSE)**: `http://localhost:11235/mcp/sse`
- **WebSocket**: `ws://localhost:11235/mcp/ws`
### Using with Claude Code
You can add Crawl4AI as an MCP tool provider in Claude Code with a simple command:
```bash
# Add the Crawl4AI server as an MCP provider
claude mcp add --transport sse c4ai-sse http://localhost:11235/mcp/sse
# List all MCP providers to verify it was added
claude mcp list
```
Once connected, Claude Code can directly use Crawl4AI's capabilities like screenshot capture, PDF generation, and HTML processing without having to make separate API calls.
### Available MCP Tools
When connected via MCP, the following tools are available:
- `md` - Generate markdown from web content
- `html` - Extract preprocessed HTML
- `screenshot` - Capture webpage screenshots
- `pdf` - Generate PDF documents
- `execute_js` - Run JavaScript on web pages
- `crawl` - Perform multi-URL crawling
- `ask` - Query the Crawl4AI library context
### Testing MCP Connections
You can test the MCP WebSocket connection using the test file included in the repository:
```bash
# From the repository root
python tests/mcp/test_mcp_socket.py
```
### MCP Schemas
Access the MCP tool schemas at `http://localhost:11235/mcp/schema` for detailed information on each tool's parameters and capabilities.
---
## Additional API Endpoints
In addition to the core `/crawl` and `/crawl/stream` endpoints, the server provides several specialized endpoints:
### HTML Extraction Endpoint
```
POST /html
```
Crawls the URL and returns preprocessed HTML optimized for schema extraction.
```json
{
"url": "https://example.com"
}
```
### Screenshot Endpoint
```
POST /screenshot
```
Captures a full-page PNG screenshot of the specified URL.
```json
{
"url": "https://example.com",
"screenshot_wait_for": 2,
"output_path": "/path/to/save/screenshot.png"
}
```
- `screenshot_wait_for`: Optional delay in seconds before capture (default: 2)
- `output_path`: Optional path to save the screenshot (recommended)
### PDF Export Endpoint
```
POST /pdf
```
Generates a PDF document of the specified URL.
```json
{
"url": "https://example.com",
"output_path": "/path/to/save/document.pdf"
}
```
- `output_path`: Optional path to save the PDF (recommended)
### JavaScript Execution Endpoint
```
POST /execute_js
```
Executes JavaScript snippets on the specified URL and returns the full crawl result.
```json
{
"url": "https://example.com",
"scripts": [
"return document.title",
"return Array.from(document.querySelectorAll('a')).map(a => a.href)"
]
}
```
- `scripts`: List of JavaScript snippets to execute sequentially
---
## Dockerfile Parameters
You can customize the image build process using build arguments (`--build-arg`). These are typically used via `docker buildx build` or within the `docker-compose.yml` file.
```bash
# Example: Build with 'all' features using buildx
docker buildx build \
--platform linux/amd64,linux/arm64 \
--build-arg INSTALL_TYPE=all \
-t yourname/crawl4ai-all:latest \
--load \
. # Build from root context
```
### Build Arguments Explained
| Argument | Description | Default | Options |
| :----------- | :--------------------------------------- | :-------- | :--------------------------------- |
| INSTALL_TYPE | Feature set | `default` | `default`, `all`, `torch`, `transformer` |
| ENABLE_GPU | GPU support (CUDA for AMD64) | `false` | `true`, `false` |
| APP_HOME | Install path inside container (advanced) | `/app` | any valid path |
| USE_LOCAL | Install library from local source | `true` | `true`, `false` |
| GITHUB_REPO | Git repo to clone if USE_LOCAL=false | *(see Dockerfile)* | any git URL |
| GITHUB_BRANCH| Git branch to clone if USE_LOCAL=false | `main` | any branch name |
*(Note: PYTHON_VERSION is fixed by the `FROM` instruction in the Dockerfile)*
### Build Best Practices
1. **Choose the Right Install Type**
* `default`: Basic installation, smallest image size. Suitable for most standard web scraping and markdown generation.
* `all`: Full features including `torch` and `transformers` for advanced extraction strategies (e.g., CosineStrategy, certain LLM filters). Significantly larger image. Ensure you need these extras.
2. **Platform Considerations**
* Use `buildx` for building multi-architecture images, especially for pushing to registries.
* Use `docker compose` profiles (`local-amd64`, `local-arm64`) for easy platform-specific local builds.
3. **Performance Optimization**
* The image automatically includes platform-specific optimizations (OpenMP for AMD64, OpenBLAS for ARM64).
---
## Using the API
Communicate with the running Docker server via its REST API (defaulting to `http://localhost:11235`). You can use the Python SDK or make direct HTTP requests.
### Playground Interface
A built-in web playground is available at `http://localhost:11235/playground` for testing and generating API requests. The playground allows you to:
1. Configure `CrawlerRunConfig` and `BrowserConfig` using the main library's Python syntax
2. Test crawling operations directly from the interface
3. Generate corresponding JSON for REST API requests based on your configuration
This is the easiest way to translate Python configuration to JSON requests when building integrations.
### Python SDK
Install the SDK: `pip install crawl4ai`
```python
import asyncio
from crawl4ai.docker_client import Crawl4aiDockerClient
from crawl4ai import BrowserConfig, CrawlerRunConfig, CacheMode # Assuming you have crawl4ai installed
async def main():
# Point to the correct server port
async with Crawl4aiDockerClient(base_url="http://localhost:11235", verbose=True) as client:
# If JWT is enabled on the server, authenticate first:
# await client.authenticate("user@example.com") # See Server Configuration section
# Example Non-streaming crawl
print("--- Running Non-Streaming Crawl ---")
results = await client.crawl(
["https://httpbin.org/html"],
browser_config=BrowserConfig(headless=True), # Use library classes for config aid
crawler_config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
)
if results: # client.crawl returns None on failure
print(f"Non-streaming results success: {results.success}")
if results.success:
for result in results: # Iterate through the CrawlResultContainer
print(f"URL: {result.url}, Success: {result.success}")
else:
print("Non-streaming crawl failed.")
# Example Streaming crawl
print("\n--- Running Streaming Crawl ---")
stream_config = CrawlerRunConfig(stream=True, cache_mode=CacheMode.BYPASS)
try:
async for result in await client.crawl( # client.crawl returns an async generator for streaming
["https://httpbin.org/html", "https://httpbin.org/links/5/0"],
browser_config=BrowserConfig(headless=True),
crawler_config=stream_config
):
print(f"Streamed result: URL: {result.url}, Success: {result.success}")
except Exception as e:
print(f"Streaming crawl failed: {e}")
# Example Get schema
print("\n--- Getting Schema ---")
schema = await client.get_schema()
print(f"Schema received: {bool(schema)}") # Print whether schema was received
if __name__ == "__main__":
asyncio.run(main())
```
*(SDK parameters like timeout, verify_ssl etc. remain the same)*
### Second Approach: Direct API Calls
Crucially, when sending configurations directly via JSON, they **must** follow the `{"type": "ClassName", "params": {...}}` structure for any non-primitive value (like config objects or strategies). Dictionaries must be wrapped as `{"type": "dict", "value": {...}}`.
*(Keep the detailed explanation of Configuration Structure, Basic Pattern, Simple vs Complex, Strategy Pattern, Complex Nested Example, Quick Grammar Overview, Important Rules, Pro Tip)*
#### More Examples *(Ensure Schema example uses type/value wrapper)*
**Advanced Crawler Configuration**
*(Keep example, ensure cache_mode uses valid enum value like "bypass")*
**Extraction Strategy**
```json
{
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {
"extraction_strategy": {
"type": "JsonCssExtractionStrategy",
"params": {
"schema": {
"type": "dict",
"value": {
"baseSelector": "article.post",
"fields": [
{"name": "title", "selector": "h1", "type": "text"},
{"name": "content", "selector": ".content", "type": "html"}
]
}
}
}
}
}
}
}
```
**LLM Extraction Strategy** *(Keep example, ensure schema uses type/value wrapper)*
*(Keep Deep Crawler Example)*
### REST API Examples
Update URLs to use port `11235`.
#### Simple Crawl
```python
import requests
# Configuration objects converted to the required JSON structure
browser_config_payload = {
"type": "BrowserConfig",
"params": {"headless": True}
}
crawler_config_payload = {
"type": "CrawlerRunConfig",
"params": {"stream": False, "cache_mode": "bypass"} # Use string value of enum
}
crawl_payload = {
"urls": ["https://httpbin.org/html"],
"browser_config": browser_config_payload,
"crawler_config": crawler_config_payload
}
response = requests.post(
"http://localhost:11235/crawl", # Updated port
# headers={"Authorization": f"Bearer {token}"}, # If JWT is enabled
json=crawl_payload
)
print(f"Status Code: {response.status_code}")
if response.ok:
print(response.json())
else:
print(f"Error: {response.text}")
```
#### Streaming Results
```python
import json
import httpx # Use httpx for async streaming example
async def test_stream_crawl(token: str = None): # Made token optional
"""Test the /crawl/stream endpoint with multiple URLs."""
url = "http://localhost:11235/crawl/stream" # Updated port
payload = {
"urls": [
"https://httpbin.org/html",
"https://httpbin.org/links/5/0",
],
"browser_config": {
"type": "BrowserConfig",
"params": {"headless": True, "viewport": {"type": "dict", "value": {"width": 1200, "height": 800}}} # Viewport needs type:dict
},
"crawler_config": {
"type": "CrawlerRunConfig",
"params": {"stream": True, "cache_mode": "bypass"}
}
}
headers = {}
# if token:
# headers = {"Authorization": f"Bearer {token}"} # If JWT is enabled
try:
async with httpx.AsyncClient() as client:
async with client.stream("POST", url, json=payload, headers=headers, timeout=120.0) as response:
print(f"Status: {response.status_code} (Expected: 200)")
response.raise_for_status() # Raise exception for bad status codes
# Read streaming response line-by-line (NDJSON)
async for line in response.aiter_lines():
if line:
try:
data = json.loads(line)
# Check for completion marker
if data.get("status") == "completed":
print("Stream completed.")
break
print(f"Streamed Result: {json.dumps(data, indent=2)}")
except json.JSONDecodeError:
print(f"Warning: Could not decode JSON line: {line}")
except httpx.HTTPStatusError as e:
print(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
except Exception as e:
print(f"Error in streaming crawl test: {str(e)}")
# To run this example:
# import asyncio
# asyncio.run(test_stream_crawl())
```
---
## Metrics & Monitoring
Keep an eye on your crawler with these endpoints:
- `/health` - Quick health check
- `/metrics` - Detailed Prometheus metrics
- `/schema` - Full API schema
Example health check:
```bash
curl http://localhost:11235/health
```
---
*(Deployment Scenarios and Complete Examples sections remain the same, maybe update links if examples moved)*
---
## Server Configuration
The server's behavior can be customized through the `config.yml` file.
### Understanding config.yml
The configuration file is loaded from `/app/config.yml` inside the container. By default, the file from `deploy/docker/config.yml` in the repository is copied there during the build.
Here's a detailed breakdown of the configuration options (using defaults from `deploy/docker/config.yml`):
```yaml
# Application Configuration
app:
title: "Crawl4AI API"
version: "1.0.0" # Consider setting this to match library version, e.g., "0.5.1"
host: "0.0.0.0"
port: 8020 # NOTE: This port is used ONLY when running server.py directly. Gunicorn overrides this (see supervisord.conf).
reload: False # Default set to False - suitable for production
timeout_keep_alive: 300
# Default LLM Configuration
llm:
provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var
api_key_env: "OPENAI_API_KEY"
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
# Redis Configuration (Used by internal Redis server managed by supervisord)
redis:
host: "localhost"
port: 6379
db: 0
password: ""
# ... other redis options ...
# Rate Limiting Configuration
rate_limiting:
enabled: True
default_limit: "1000/minute"
trusted_proxies: []
storage_uri: "memory://" # Use "redis://localhost:6379" if you need persistent/shared limits
# Security Configuration
security:
enabled: false # Master toggle for security features
jwt_enabled: false # Enable JWT authentication (requires security.enabled=true)
https_redirect: false # Force HTTPS (requires security.enabled=true)
trusted_hosts: ["*"] # Allowed hosts (use specific domains in production)
headers: # Security headers (applied if security.enabled=true)
x_content_type_options: "nosniff"
x_frame_options: "DENY"
content_security_policy: "default-src 'self'"
strict_transport_security: "max-age=63072000; includeSubDomains"
# Crawler Configuration
crawler:
memory_threshold_percent: 95.0
rate_limiter:
base_delay: [1.0, 2.0] # Min/max delay between requests in seconds for dispatcher
timeouts:
stream_init: 30.0 # Timeout for stream initialization
batch_process: 300.0 # Timeout for non-streaming /crawl processing
# Logging Configuration
logging:
level: "INFO"
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# Observability Configuration
observability:
prometheus:
enabled: True
endpoint: "/metrics"
health_check:
endpoint: "/health"
```
*(JWT Authentication section remains the same, just note the default port is now 11235 for requests)*
*(Configuration Tips and Best Practices remain the same)*
### Customizing Your Configuration
You can override the default `config.yml`.
#### Method 1: Modify Before Build
1. Edit the `deploy/docker/config.yml` file in your local repository clone.
2. Build the image using `docker buildx` or `docker compose --profile local-... up --build`. The modified file will be copied into the image.
#### Method 2: Runtime Mount (Recommended for Custom Deploys)
1. Create your custom configuration file, e.g., `my-custom-config.yml` locally. Ensure it contains all necessary sections.
2. Mount it when running the container:
* **Using `docker run`:**
```bash
# Assumes my-custom-config.yml is in the current directory
docker run -d -p 11235:11235 \
--name crawl4ai-custom-config \
--env-file .llm.env \
--shm-size=1g \
-v $(pwd)/my-custom-config.yml:/app/config.yml \
unclecode/crawl4ai:latest # Or your specific tag
```
* **Using `docker-compose.yml`:** Add a `volumes` section to the service definition:
```yaml
services:
crawl4ai-hub-amd64: # Or your chosen service
image: unclecode/crawl4ai:latest
profiles: ["hub-amd64"]
<<: *base-config
volumes:
# Mount local custom config over the default one in the container
- ./my-custom-config.yml:/app/config.yml
# Keep the shared memory volume from base-config
- /dev/shm:/dev/shm
```
*(Note: Ensure `my-custom-config.yml` is in the same directory as `docker-compose.yml`)*
> 💡 When mounting, your custom file *completely replaces* the default one. Ensure it's a valid and complete configuration.
### Configuration Recommendations
1. **Security First** 🔒
- Always enable security in production
- Use specific trusted_hosts instead of wildcards
- Set up proper rate limiting to protect your server
- Consider your environment before enabling HTTPS redirect
2. **Resource Management** 💻
- Adjust memory_threshold_percent based on available RAM
- Set timeouts according to your content size and network conditions
- Use Redis for rate limiting in multi-container setups
3. **Monitoring** 📊
- Enable Prometheus if you need metrics
- Set DEBUG logging in development, INFO in production
- Regular health check monitoring is crucial
4. **Performance Tuning** ⚡
- Start with conservative rate limiter delays
- Increase batch_process timeout for large content
- Adjust stream_init timeout based on initial response times
## Getting Help
We're here to help you succeed with Crawl4AI! Here's how to get support:
- 📖 Check our [full documentation](https://docs.crawl4ai.com)
- 🐛 Found a bug? [Open an issue](https://github.com/unclecode/crawl4ai/issues)
- 💬 Join our [Discord community](https://discord.gg/crawl4ai)
- ⭐ Star us on GitHub to show support!
## Summary
In this guide, we've covered everything you need to get started with Crawl4AI's Docker deployment:
- Building and running the Docker container
- Configuring the environment
- Using the interactive playground for testing
- Making API requests with proper typing
- Using the Python SDK
- Leveraging specialized endpoints for screenshots, PDFs, and JavaScript execution
- Connecting via the Model Context Protocol (MCP)
- Monitoring your deployment
The new playground interface at `http://localhost:11235/playground` makes it much easier to test configurations and generate the corresponding JSON for API requests.
For AI application developers, the MCP integration allows tools like Claude Code to directly access Crawl4AI's capabilities without complex API handling.
Remember, the examples in the `examples` folder are your friends - they show real-world usage patterns that you can adapt for your needs.
Keep exploring, and don't hesitate to reach out if you need help! We're building something amazing together. 🚀
Happy crawling! 🕷️

View File

@@ -1,699 +0,0 @@
import os
import json
import orjson
import asyncio
from typing import List, Tuple, Dict
from functools import partial
from uuid import uuid4
from datetime import datetime
from base64 import b64encode
import logging
from typing import Optional, AsyncGenerator
from urllib.parse import unquote
from fastapi import HTTPException, Request, status
from fastapi.background import BackgroundTasks
from fastapi.responses import JSONResponse
from redis import asyncio as aioredis
from crawl4ai import (
AsyncWebCrawler,
CrawlerRunConfig,
LLMExtractionStrategy,
CacheMode,
BrowserConfig,
MemoryAdaptiveDispatcher,
RateLimiter,
LLMConfig
)
from crawl4ai.utils import perform_completion_with_backoff
from crawl4ai.content_filter_strategy import (
PruningContentFilter,
BM25ContentFilter,
LLMContentFilter
)
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy
from utils import (
TaskStatus,
FilterType,
get_base_url,
is_task_id,
should_cleanup_task,
decode_redis_hash,
get_llm_api_key,
validate_llm_provider
)
import psutil, time
logger = logging.getLogger(__name__)
# --- Helper to get memory ---
def _get_memory_mb():
try:
return psutil.Process().memory_info().rss / (1024 * 1024)
except Exception as e:
logger.warning(f"Could not get memory info: {e}")
return None
async def handle_llm_qa(
url: str,
query: str,
config: dict
) -> str:
"""Process QA using LLM with crawled content as context."""
try:
if not url.startswith(('http://', 'https://')) and not url.startswith(("raw:", "raw://")):
url = 'https://' + url
# Extract base URL by finding last '?q=' occurrence
last_q_index = url.rfind('?q=')
if last_q_index != -1:
url = url[:last_q_index]
# Get markdown content
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(url)
if not result.success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=result.error_message
)
content = result.markdown.fit_markdown or result.markdown.raw_markdown
# Create prompt and get LLM response
prompt = f"""Use the following content as context to answer the question.
Content:
{content}
Question: {query}
Answer:"""
# api_token=os.environ.get(config["llm"].get("api_key_env", ""))
response = perform_completion_with_backoff(
provider=config["llm"]["provider"],
prompt_with_variables=prompt,
api_token=get_llm_api_key(config)
)
return response.choices[0].message.content
except Exception as e:
logger.error(f"QA processing error: {str(e)}", exc_info=True)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e)
)
async def process_llm_extraction(
redis: aioredis.Redis,
config: dict,
task_id: str,
url: str,
instruction: str,
schema: Optional[str] = None,
cache: str = "0",
provider: Optional[str] = None
) -> None:
"""Process LLM extraction in background."""
try:
# Validate provider
is_valid, error_msg = validate_llm_provider(config, provider)
if not is_valid:
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.FAILED,
"error": error_msg
})
return
api_key = get_llm_api_key(config, provider)
llm_strategy = LLMExtractionStrategy(
llm_config=LLMConfig(
provider=provider or config["llm"]["provider"],
api_token=api_key
),
instruction=instruction,
schema=json.loads(schema) if schema else None,
)
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
config=CrawlerRunConfig(
extraction_strategy=llm_strategy,
scraping_strategy=LXMLWebScrapingStrategy(),
cache_mode=cache_mode
)
)
if not result.success:
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.FAILED,
"error": result.error_message
})
return
try:
content = json.loads(result.extracted_content)
except json.JSONDecodeError:
content = result.extracted_content
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.COMPLETED,
"result": json.dumps(content)
})
except Exception as e:
logger.error(f"LLM extraction error: {str(e)}", exc_info=True)
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.FAILED,
"error": str(e)
})
async def handle_markdown_request(
url: str,
filter_type: FilterType,
query: Optional[str] = None,
cache: str = "0",
config: Optional[dict] = None,
provider: Optional[str] = None
) -> str:
"""Handle markdown generation requests."""
try:
# Validate provider if using LLM filter
if filter_type == FilterType.LLM:
is_valid, error_msg = validate_llm_provider(config, provider)
if not is_valid:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=error_msg
)
decoded_url = unquote(url)
if not decoded_url.startswith(('http://', 'https://')) and not decoded_url.startswith(("raw:", "raw://")):
decoded_url = 'https://' + decoded_url
if filter_type == FilterType.RAW:
md_generator = DefaultMarkdownGenerator()
else:
content_filter = {
FilterType.FIT: PruningContentFilter(),
FilterType.BM25: BM25ContentFilter(user_query=query or ""),
FilterType.LLM: LLMContentFilter(
llm_config=LLMConfig(
provider=provider or config["llm"]["provider"],
api_token=get_llm_api_key(config, provider),
),
instruction=query or "Extract main content"
)
}[filter_type]
md_generator = DefaultMarkdownGenerator(content_filter=content_filter)
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=decoded_url,
config=CrawlerRunConfig(
markdown_generator=md_generator,
scraping_strategy=LXMLWebScrapingStrategy(),
cache_mode=cache_mode
)
)
if not result.success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=result.error_message
)
return (result.markdown.raw_markdown
if filter_type == FilterType.RAW
else result.markdown.fit_markdown)
except Exception as e:
logger.error(f"Markdown error: {str(e)}", exc_info=True)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e)
)
async def handle_llm_request(
redis: aioredis.Redis,
background_tasks: BackgroundTasks,
request: Request,
input_path: str,
query: Optional[str] = None,
schema: Optional[str] = None,
cache: str = "0",
config: Optional[dict] = None,
provider: Optional[str] = None
) -> JSONResponse:
"""Handle LLM extraction requests."""
base_url = get_base_url(request)
try:
if is_task_id(input_path):
return await handle_task_status(
redis, input_path, base_url
)
if not query:
return JSONResponse({
"message": "Please provide an instruction",
"_links": {
"example": {
"href": f"{base_url}/llm/{input_path}?q=Extract+main+content",
"title": "Try this example"
}
}
})
return await create_new_task(
redis,
background_tasks,
input_path,
query,
schema,
cache,
base_url,
config,
provider
)
except Exception as e:
logger.error(f"LLM endpoint error: {str(e)}", exc_info=True)
return JSONResponse({
"error": str(e),
"_links": {
"retry": {"href": str(request.url)}
}
}, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
async def handle_task_status(
redis: aioredis.Redis,
task_id: str,
base_url: str,
*,
keep: bool = False
) -> JSONResponse:
"""Handle task status check requests."""
task = await redis.hgetall(f"task:{task_id}")
if not task:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Task not found"
)
task = decode_redis_hash(task)
response = create_task_response(task, task_id, base_url)
if task["status"] in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
if not keep and should_cleanup_task(task["created_at"]):
await redis.delete(f"task:{task_id}")
return JSONResponse(response)
async def create_new_task(
redis: aioredis.Redis,
background_tasks: BackgroundTasks,
input_path: str,
query: str,
schema: Optional[str],
cache: str,
base_url: str,
config: dict,
provider: Optional[str] = None
) -> JSONResponse:
"""Create and initialize a new task."""
decoded_url = unquote(input_path)
if not decoded_url.startswith(('http://', 'https://')) and not decoded_url.startswith(("raw:", "raw://")):
decoded_url = 'https://' + decoded_url
from datetime import datetime
task_id = f"llm_{int(datetime.now().timestamp())}_{id(background_tasks)}"
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.PROCESSING,
"created_at": datetime.now().isoformat(),
"url": decoded_url
})
background_tasks.add_task(
process_llm_extraction,
redis,
config,
task_id,
decoded_url,
query,
schema,
cache,
provider
)
return JSONResponse({
"task_id": task_id,
"status": TaskStatus.PROCESSING,
"url": decoded_url,
"_links": {
"self": {"href": f"{base_url}/llm/{task_id}"},
"status": {"href": f"{base_url}/llm/{task_id}"}
}
})
def create_task_response(task: dict, task_id: str, base_url: str) -> dict:
"""Create response for task status check."""
response = {
"task_id": task_id,
"status": task["status"],
"created_at": task["created_at"],
"url": task["url"],
"_links": {
"self": {"href": f"{base_url}/llm/{task_id}"},
"refresh": {"href": f"{base_url}/llm/{task_id}"}
}
}
if task["status"] == TaskStatus.COMPLETED:
response["result"] = json.loads(task["result"])
elif task["status"] == TaskStatus.FAILED:
response["error"] = task["error"]
return response
async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator) -> AsyncGenerator[bytes, None]:
"""Stream results with heartbeats and completion markers."""
import orjson
from datetime import datetime
import inspect
def orjson_default(obj):
# Handle datetime (if not already handled by orjson)
if isinstance(obj, datetime):
return obj.isoformat()
# Handle property objects (convert to string or something else)
if isinstance(obj, property):
return str(obj)
# Last resort: convert to string
return str(obj)
try:
logger.info(f"Starting streaming with results_gen type: {type(results_gen)}")
logger.info(f"Is results_gen async generator: {inspect.isasyncgen(results_gen)}")
# Check if results_gen is actually an async generator vs another type
if inspect.isasyncgen(results_gen):
logger.info("Processing as async generator")
async for result in results_gen:
try:
logger.info(f"Processing streaming result of type: {type(result)}")
# Check if this result is actually a CrawlResult
if hasattr(result, 'model_dump_json'):
server_memory_mb = _get_memory_mb()
result_json = result.model_dump_json()
result_dict = orjson.loads(result_json)
result_dict['server_memory_mb'] = server_memory_mb
if result_dict.get('pdf') is not None:
result_dict['pdf'] = b64encode(result_dict['pdf']).decode('utf-8')
logger.info(f"Streaming result for {result_dict.get('url', 'unknown')}")
data = orjson.dumps(result_dict, default=orjson_default).decode('utf-8') + "\n"
yield data.encode('utf-8')
else:
logger.error(f"Result doesn't have model_dump_json method: {type(result)}")
error_response = {"error": f"Invalid result type: {type(result)}", "url": "unknown"}
yield (orjson.dumps(error_response).decode('utf-8') + "\n").encode('utf-8')
except Exception as e:
logger.error(f"Serialization error: {e}")
logger.error(f"Result type was: {type(result)}")
error_response = {"error": str(e), "url": getattr(result, 'url', 'unknown')}
yield (orjson.dumps(error_response).decode('utf-8') + "\n").encode('utf-8')
else:
logger.error(f"results_gen is not an async generator: {type(results_gen)}")
error_response = {"error": f"Invalid results_gen type: {type(results_gen)}"}
yield (orjson.dumps(error_response).decode('utf-8') + "\n").encode('utf-8')
yield orjson.dumps({"status": "completed"}).decode('utf-8').encode('utf-8')
except asyncio.CancelledError:
logger.warning("Client disconnected during streaming")
finally:
# try:
# await crawler.close()
# except Exception as e:
# logger.error(f"Crawler cleanup error: {e}")
pass
async def handle_crawl_request(
urls: List[str],
browser_config: dict,
crawler_config: dict,
config: dict
) -> dict:
"""Handle non-streaming crawl requests."""
start_mem_mb = _get_memory_mb() # <--- Get memory before
start_time = time.time()
mem_delta_mb = None
peak_mem_mb = start_mem_mb
try:
urls = [('https://' + url) if not url.startswith(('http://', 'https://')) and not url.startswith(("raw:", "raw://")) else url for url in urls]
browser_config = BrowserConfig.load(browser_config)
crawler_config = CrawlerRunConfig.load(crawler_config)
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
rate_limiter=RateLimiter(
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
) if config["crawler"]["rate_limiter"]["enabled"] else None
)
from crawler_pool import get_crawler
crawler = await get_crawler(browser_config)
# crawler: AsyncWebCrawler = AsyncWebCrawler(config=browser_config)
# await crawler.start()
base_config = config["crawler"]["base_config"]
# Iterate on key-value pairs in global_config then use haseattr to set them
for key, value in base_config.items():
if hasattr(crawler_config, key):
setattr(crawler_config, key, value)
results = []
func = getattr(crawler, "arun" if len(urls) == 1 else "arun_many")
partial_func = partial(func,
urls[0] if len(urls) == 1 else urls,
config=crawler_config,
dispatcher=dispatcher)
results = await partial_func()
# await crawler.close()
end_mem_mb = _get_memory_mb() # <--- Get memory after
end_time = time.time()
if start_mem_mb is not None and end_mem_mb is not None:
mem_delta_mb = end_mem_mb - start_mem_mb # <--- Calculate delta
peak_mem_mb = max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb) # <--- Get peak memory
logger.info(f"Memory usage: Start: {start_mem_mb} MB, End: {end_mem_mb} MB, Delta: {mem_delta_mb} MB, Peak: {peak_mem_mb} MB")
# Process results to handle PDF bytes
processed_results = []
for result in results:
# Use ORJSON serialization to handle property objects properly
result_json = result.model_dump_json()
result_dict = orjson.loads(result_json)
# If PDF exists, encode it to base64
if result_dict.get('pdf') is not None:
result_dict['pdf'] = b64encode(result_dict['pdf']).decode('utf-8')
processed_results.append(result_dict)
return {
"success": True,
"results": processed_results,
"server_processing_time_s": end_time - start_time,
"server_memory_delta_mb": mem_delta_mb,
"server_peak_memory_mb": peak_mem_mb
}
except Exception as e:
logger.error(f"Crawl error: {str(e)}", exc_info=True)
if 'crawler' in locals() and crawler.ready: # Check if crawler was initialized and started
# try:
# await crawler.close()
# except Exception as close_e:
# logger.error(f"Error closing crawler during exception handling: {close_e}")
logger.error(f"Error closing crawler during exception handling: {str(e)}")
# Measure memory even on error if possible
end_mem_mb_error = _get_memory_mb()
if start_mem_mb is not None and end_mem_mb_error is not None:
mem_delta_mb = end_mem_mb_error - start_mem_mb
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=json.dumps({ # Send structured error
"error": str(e),
"server_memory_delta_mb": mem_delta_mb,
"server_peak_memory_mb": max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb_error or 0)
})
)
async def handle_stream_crawl_request(
urls: List[str],
browser_config: dict,
crawler_config: dict,
config: dict
) -> Tuple[AsyncWebCrawler, AsyncGenerator]:
"""Handle streaming crawl requests."""
try:
browser_config = BrowserConfig.load(browser_config)
# browser_config.verbose = True # Set to False or remove for production stress testing
browser_config.verbose = False
crawler_config = CrawlerRunConfig.load(crawler_config)
crawler_config.scraping_strategy = LXMLWebScrapingStrategy()
# Don't force stream=True here - let the deep crawl strategy control its own streaming behavior
# Apply global base config (this was missing!)
base_config = config["crawler"]["base_config"]
for key, value in base_config.items():
if hasattr(crawler_config, key):
print(f"[DEBUG] Applying base_config: {key} = {value}")
setattr(crawler_config, key, value)
print(f"[DEBUG] Deep crawl strategy: {type(crawler_config.deep_crawl_strategy).__name__ if crawler_config.deep_crawl_strategy else 'None'}")
print(f"[DEBUG] Stream mode: {crawler_config.stream}")
print(f"[DEBUG] Simulate user: {getattr(crawler_config, 'simulate_user', 'Not set')}")
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
rate_limiter=RateLimiter(
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
)
)
from crawler_pool import get_crawler
crawler = await get_crawler(browser_config)
# crawler = AsyncWebCrawler(config=browser_config)
# await crawler.start()
# Use correct method based on URL count (same as regular endpoint)
if len(urls) == 1:
# For single URL, use arun to get CrawlResult, then wrap in async generator
single_result_container = await crawler.arun(
url=urls[0],
config=crawler_config,
dispatcher=dispatcher
)
async def single_result_generator():
# Handle CrawlResultContainer - extract the actual results
if hasattr(single_result_container, '_results'):
# It's a CrawlResultContainer - iterate over the internal results
for result in single_result_container._results:
# Check if the result is an async generator (from deep crawl)
if hasattr(result, '__aiter__'):
async for sub_result in result:
yield sub_result
else:
yield result
elif hasattr(single_result_container, '__aiter__'):
# It's an async generator (from streaming deep crawl)
async for result in single_result_container:
yield result
elif hasattr(single_result_container, '__iter__') and not hasattr(single_result_container, 'url'):
# It's iterable but not a CrawlResult itself
for result in single_result_container:
# Check if each result is an async generator
if hasattr(result, '__aiter__'):
async for sub_result in result:
yield sub_result
else:
yield result
else:
# It's a single CrawlResult
yield single_result_container
results_gen = single_result_generator()
else:
# For multiple URLs, use arun_many
results_gen = await crawler.arun_many(
urls=urls,
config=crawler_config,
dispatcher=dispatcher
)
# If results_gen is a list (e.g., from deep crawl), convert to async generator
if isinstance(results_gen, list):
async def convert_list_to_generator():
for result in results_gen:
yield result
results_gen = convert_list_to_generator()
return crawler, results_gen
except Exception as e:
# Make sure to close crawler if started during an error here
if 'crawler' in locals() and crawler.ready:
# try:
# await crawler.close()
# except Exception as close_e:
# logger.error(f"Error closing crawler during stream setup exception: {close_e}")
logger.error(f"Error closing crawler during stream setup exception: {str(e)}")
logger.error(f"Stream crawl error: {str(e)}", exc_info=True)
# Raising HTTPException here will prevent streaming response
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e)
)
async def handle_crawl_job(
redis,
background_tasks: BackgroundTasks,
urls: List[str],
browser_config: Dict,
crawler_config: Dict,
config: Dict,
) -> Dict:
"""
Fire-and-forget version of handle_crawl_request.
Creates a task in Redis, runs the heavy work in a background task,
lets /crawl/job/{task_id} polling fetch the result.
"""
task_id = f"crawl_{uuid4().hex[:8]}"
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.PROCESSING, # <-- keep enum values consistent
"created_at": datetime.utcnow().isoformat(),
"url": json.dumps(urls), # store list as JSON string
"result": "",
"error": "",
})
async def _runner():
try:
result = await handle_crawl_request(
urls=urls,
browser_config=browser_config,
crawler_config=crawler_config,
config=config,
)
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.COMPLETED,
"result": json.dumps(result),
})
await asyncio.sleep(5) # Give Redis time to process the update
except Exception as exc:
await redis.hset(f"task:{task_id}", mapping={
"status": TaskStatus.FAILED,
"error": str(exc),
})
background_tasks.add_task(_runner)
return {"task_id": task_id}

View File

@@ -1,55 +0,0 @@
import os
from datetime import datetime, timedelta, timezone
from typing import Dict, Optional
from jwt import JWT, jwk_from_dict
from jwt.utils import get_int_from_datetime
from fastapi import Depends, HTTPException
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import EmailStr
from pydantic.main import BaseModel
import base64
instance = JWT()
security = HTTPBearer(auto_error=False)
SECRET_KEY = os.environ.get("SECRET_KEY", "mysecret")
ACCESS_TOKEN_EXPIRE_MINUTES = 60
def get_jwk_from_secret(secret: str):
"""Convert a secret string into a JWK object."""
secret_bytes = secret.encode('utf-8')
b64_secret = base64.urlsafe_b64encode(secret_bytes).rstrip(b'=').decode('utf-8')
return jwk_from_dict({"kty": "oct", "k": b64_secret})
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
"""Create a JWT access token with an expiration."""
to_encode = data.copy()
expire = datetime.now(timezone.utc) + (expires_delta or timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES))
to_encode.update({"exp": get_int_from_datetime(expire)})
signing_key = get_jwk_from_secret(SECRET_KEY)
return instance.encode(to_encode, signing_key, alg='HS256')
def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)) -> Dict:
"""Verify the JWT token from the Authorization header."""
if credentials is None:
return None
token = credentials.credentials
verifying_key = get_jwk_from_secret(SECRET_KEY)
try:
payload = instance.decode(token, verifying_key, do_time_check=True, algorithms='HS256')
return payload
except Exception:
raise HTTPException(status_code=401, detail="Invalid or expired token")
def get_token_dependency(config: Dict):
"""Return the token dependency if JWT is enabled, else a function that returns None."""
if config.get("security", {}).get("jwt_enabled", False):
return verify_token
else:
return lambda: None
class TokenRequest(BaseModel):
email: EmailStr

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,91 +0,0 @@
# Application Configuration
app:
title: "Crawl4AI API"
version: "1.0.0"
host: "0.0.0.0"
port: 11234
reload: False
workers: 1
timeout_keep_alive: 300
# Default LLM Configuration
llm:
provider: "openai/gpt-4o-mini"
api_key_env: "OPENAI_API_KEY"
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
# Redis Configuration
redis:
host: "localhost"
port: 6379
db: 0
password: ""
ssl: False
ssl_cert_reqs: None
ssl_ca_certs: None
ssl_certfile: None
ssl_keyfile: None
ssl_cert_reqs: None
ssl_ca_certs: None
ssl_certfile: None
ssl_keyfile: None
# Rate Limiting Configuration
rate_limiting:
enabled: True
default_limit: "1000/minute"
trusted_proxies: []
storage_uri: "memory://" # Use "redis://localhost:6379" for production
# Security Configuration
security:
enabled: false
jwt_enabled: false
https_redirect: false
trusted_hosts: ["*"]
headers:
x_content_type_options: "nosniff"
x_frame_options: "DENY"
content_security_policy: "default-src 'self'"
strict_transport_security: "max-age=63072000; includeSubDomains"
# Crawler Configuration
crawler:
base_config:
simulate_user: true
memory_threshold_percent: 95.0
rate_limiter:
enabled: true
base_delay: [1.0, 2.0]
timeouts:
stream_init: 30.0 # Timeout for stream initialization
batch_process: 300.0 # Timeout for batch processing
pool:
max_pages: 40 # ← GLOBAL_SEM permits
idle_ttl_sec: 1800 # ← 30 min janitor cutoff
browser:
kwargs:
headless: true
text_mode: true
extra_args:
# - "--single-process"
- "--no-sandbox"
- "--disable-dev-shm-usage"
- "--disable-gpu"
- "--disable-software-rasterizer"
- "--disable-web-security"
- "--allow-insecure-localhost"
- "--ignore-certificate-errors"
# Logging Configuration
logging:
level: "INFO"
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# Observability Configuration
observability:
prometheus:
enabled: True
endpoint: "/metrics"
health_check:
endpoint: "/health"

View File

@@ -1,60 +0,0 @@
# crawler_pool.py (new file)
import asyncio, json, hashlib, time, psutil
from contextlib import suppress
from typing import Dict
from crawl4ai import AsyncWebCrawler, BrowserConfig
from typing import Dict
from utils import load_config
CONFIG = load_config()
POOL: Dict[str, AsyncWebCrawler] = {}
LAST_USED: Dict[str, float] = {}
LOCK = asyncio.Lock()
MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) # % RAM refuse new browsers above this
IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800) # close if unused for 30min
def _sig(cfg: BrowserConfig) -> str:
payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":"))
return hashlib.sha1(payload.encode()).hexdigest()
async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
try:
sig = _sig(cfg)
async with LOCK:
if sig in POOL:
LAST_USED[sig] = time.time();
return POOL[sig]
if psutil.virtual_memory().percent >= MEM_LIMIT:
raise MemoryError("RAM pressure new browser denied")
crawler = AsyncWebCrawler(config=cfg, thread_safe=False)
await crawler.start()
POOL[sig] = crawler; LAST_USED[sig] = time.time()
return crawler
except MemoryError as e:
raise MemoryError(f"RAM pressure new browser denied: {e}")
except Exception as e:
raise RuntimeError(f"Failed to start browser: {e}")
finally:
if sig in POOL:
LAST_USED[sig] = time.time()
else:
# If we failed to start the browser, we should remove it from the pool
POOL.pop(sig, None)
LAST_USED.pop(sig, None)
# If we failed to start the browser, we should remove it from the pool
async def close_all():
async with LOCK:
await asyncio.gather(*(c.close() for c in POOL.values()), return_exceptions=True)
POOL.clear(); LAST_USED.clear()
async def janitor():
while True:
await asyncio.sleep(60)
now = time.time()
async with LOCK:
for sig, crawler in list(POOL.items()):
if now - LAST_USED[sig] > IDLE_TTL:
with suppress(Exception): await crawler.close()
POOL.pop(sig, None); LAST_USED.pop(sig, None)

View File

@@ -1,101 +0,0 @@
"""
Job endpoints (enqueue + poll) for long-running LLM extraction and raw crawl.
Relies on the existing Redis task helpers in api.py
"""
from typing import Dict, Optional, Callable
from fastapi import APIRouter, BackgroundTasks, Depends, Request
from pydantic import BaseModel, HttpUrl
from api import (
handle_llm_request,
handle_crawl_job,
handle_task_status,
)
# ------------- dependency placeholders -------------
_redis = None # will be injected from server.py
_config = None
_token_dep: Callable = lambda: None # dummy until injected
# public router
router = APIRouter()
# === init hook called by server.py =========================================
def init_job_router(redis, config, token_dep) -> APIRouter:
"""Inject shared singletons and return the router for mounting."""
global _redis, _config, _token_dep
_redis, _config, _token_dep = redis, config, token_dep
return router
# ---------- payload models --------------------------------------------------
class LlmJobPayload(BaseModel):
url: HttpUrl
q: str
schema: Optional[str] = None
cache: bool = False
provider: Optional[str] = None
class CrawlJobPayload(BaseModel):
urls: list[HttpUrl]
browser_config: Dict = {}
crawler_config: Dict = {}
# ---------- LLM job ---------------------------------------------------------
@router.post("/llm/job", status_code=202)
async def llm_job_enqueue(
payload: LlmJobPayload,
background_tasks: BackgroundTasks,
request: Request,
_td: Dict = Depends(lambda: _token_dep()), # late-bound dep
):
return await handle_llm_request(
_redis,
background_tasks,
request,
str(payload.url),
query=payload.q,
schema=payload.schema,
cache=payload.cache,
config=_config,
provider=payload.provider,
)
@router.get("/llm/job/{task_id}")
async def llm_job_status(
request: Request,
task_id: str,
_td: Dict = Depends(lambda: _token_dep())
):
return await handle_task_status(_redis, task_id)
# ---------- CRAWL job -------------------------------------------------------
@router.post("/crawl/job", status_code=202)
async def crawl_job_enqueue(
payload: CrawlJobPayload,
background_tasks: BackgroundTasks,
_td: Dict = Depends(lambda: _token_dep()),
):
return await handle_crawl_job(
_redis,
background_tasks,
[str(u) for u in payload.urls],
payload.browser_config,
payload.crawler_config,
config=_config,
)
@router.get("/crawl/job/{task_id}")
async def crawl_job_status(
request: Request,
task_id: str,
_td: Dict = Depends(lambda: _token_dep())
):
return await handle_task_status(_redis, task_id, base_url=str(request.base_url))

View File

@@ -1,252 +0,0 @@
# deploy/docker/mcp_bridge.py
from __future__ import annotations
import inspect, json, re, anyio
from contextlib import suppress
from typing import Any, Callable, Dict, List, Tuple
import httpx
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
from fastapi.responses import JSONResponse
from fastapi import Request
from sse_starlette.sse import EventSourceResponse
from pydantic import BaseModel
from mcp.server.sse import SseServerTransport
import mcp.types as t
from mcp.server.lowlevel.server import Server, NotificationOptions
from mcp.server.models import InitializationOptions
# ── optin decorators ───────────────────────────────────────────
def mcp_resource(name: str | None = None):
def deco(fn):
fn.__mcp_kind__, fn.__mcp_name__ = "resource", name
return fn
return deco
def mcp_template(name: str | None = None):
def deco(fn):
fn.__mcp_kind__, fn.__mcp_name__ = "template", name
return fn
return deco
def mcp_tool(name: str | None = None):
def deco(fn):
fn.__mcp_kind__, fn.__mcp_name__ = "tool", name
return fn
return deco
# ── HTTPproxy helper for FastAPI endpoints ─────────────────────
def _make_http_proxy(base_url: str, route):
method = list(route.methods - {"HEAD", "OPTIONS"})[0]
async def proxy(**kwargs):
# replace `/items/{id}` style params first
path = route.path
for k, v in list(kwargs.items()):
placeholder = "{" + k + "}"
if placeholder in path:
path = path.replace(placeholder, str(v))
kwargs.pop(k)
url = base_url.rstrip("/") + path
async with httpx.AsyncClient() as client:
try:
r = (
await client.get(url, params=kwargs)
if method == "GET"
else await client.request(method, url, json=kwargs)
)
r.raise_for_status()
return r.text if method == "GET" else r.json()
except httpx.HTTPStatusError as e:
# surface FastAPI error details instead of plain 500
raise HTTPException(e.response.status_code, e.response.text)
return proxy
# ── main entry point ────────────────────────────────────────────
def attach_mcp(
app: FastAPI,
*, # keywordonly
base: str = "/mcp",
name: str | None = None,
base_url: str, # eg. "http://127.0.0.1:8020"
) -> None:
"""Call once after all routes are declared to expose WS+SSE MCP endpoints."""
server_name = name or app.title or "FastAPI-MCP"
mcp = Server(server_name)
# tools: Dict[str, Callable] = {}
tools: Dict[str, Tuple[Callable, Callable]] = {}
resources: Dict[str, Callable] = {}
templates: Dict[str, Callable] = {}
# register decorated FastAPI routes
for route in app.routes:
fn = getattr(route, "endpoint", None)
kind = getattr(fn, "__mcp_kind__", None)
if not kind:
continue
key = fn.__mcp_name__ or re.sub(r"[/{}}]", "_", route.path).strip("_")
# if kind == "tool":
# tools[key] = _make_http_proxy(base_url, route)
if kind == "tool":
proxy = _make_http_proxy(base_url, route)
tools[key] = (proxy, fn)
continue
if kind == "resource":
resources[key] = fn
if kind == "template":
templates[key] = fn
# helpers for JSONSchema
def _schema(model: type[BaseModel] | None) -> dict:
return {"type": "object"} if model is None else model.model_json_schema()
def _body_model(fn: Callable) -> type[BaseModel] | None:
for p in inspect.signature(fn).parameters.values():
a = p.annotation
if inspect.isclass(a) and issubclass(a, BaseModel):
return a
return None
# MCP handlers
@mcp.list_tools()
async def _list_tools() -> List[t.Tool]:
out = []
for k, (proxy, orig_fn) in tools.items():
desc = getattr(orig_fn, "__mcp_description__", None) or inspect.getdoc(orig_fn) or ""
schema = getattr(orig_fn, "__mcp_schema__", None) or _schema(_body_model(orig_fn))
out.append(
t.Tool(name=k, description=desc, inputSchema=schema)
)
return out
@mcp.call_tool()
async def _call_tool(name: str, arguments: Dict | None) -> List[t.TextContent]:
if name not in tools:
raise HTTPException(404, "tool not found")
proxy, _ = tools[name]
try:
res = await proxy(**(arguments or {}))
except HTTPException as exc:
# map serverside errors into MCP "text/error" payloads
err = {"error": exc.status_code, "detail": exc.detail}
return [t.TextContent(type = "text", text=json.dumps(err))]
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
@mcp.list_resources()
async def _list_resources() -> List[t.Resource]:
return [
t.Resource(name=k, description=inspect.getdoc(f) or "", mime_type="application/json")
for k, f in resources.items()
]
@mcp.read_resource()
async def _read_resource(name: str) -> List[t.TextContent]:
if name not in resources:
raise HTTPException(404, "resource not found")
res = resources[name]()
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
@mcp.list_resource_templates()
async def _list_templates() -> List[t.ResourceTemplate]:
return [
t.ResourceTemplate(
name=k,
description=inspect.getdoc(f) or "",
parameters={
p: {"type": "string"} for p in _path_params(app, f)
},
)
for k, f in templates.items()
]
init_opts = InitializationOptions(
server_name=server_name,
server_version="0.1.0",
capabilities=mcp.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
)
# ── WebSocket transport ────────────────────────────────────
@app.websocket_route(f"{base}/ws")
async def _ws(ws: WebSocket):
await ws.accept()
c2s_send, c2s_recv = anyio.create_memory_object_stream(100)
s2c_send, s2c_recv = anyio.create_memory_object_stream(100)
from pydantic import TypeAdapter
from mcp.types import JSONRPCMessage
adapter = TypeAdapter(JSONRPCMessage)
init_done = anyio.Event()
async def srv_to_ws():
first = True
try:
async for msg in s2c_recv:
await ws.send_json(msg.model_dump())
if first:
init_done.set()
first = False
finally:
# make sure cleanup survives TaskGroup cancellation
with anyio.CancelScope(shield=True):
with suppress(RuntimeError): # idempotent close
await ws.close()
async def ws_to_srv():
try:
# 1st frame is always "initialize"
first = adapter.validate_python(await ws.receive_json())
await c2s_send.send(first)
await init_done.wait() # block until server ready
while True:
data = await ws.receive_json()
await c2s_send.send(adapter.validate_python(data))
except WebSocketDisconnect:
await c2s_send.aclose()
async with anyio.create_task_group() as tg:
tg.start_soon(mcp.run, c2s_recv, s2c_send, init_opts)
tg.start_soon(ws_to_srv)
tg.start_soon(srv_to_ws)
# ── SSE transport (official) ─────────────────────────────
sse = SseServerTransport(f"{base}/messages/")
@app.get(f"{base}/sse")
async def _mcp_sse(request: Request):
async with sse.connect_sse(
request.scope, request.receive, request._send # starlette ASGI primitives
) as (read_stream, write_stream):
await mcp.run(read_stream, write_stream, init_opts)
# client → server frames are POSTed here
app.mount(f"{base}/messages", app=sse.handle_post_message)
# ── schema endpoint ───────────────────────────────────────
@app.get(f"{base}/schema")
async def _schema_endpoint():
return JSONResponse({
"tools": [x.model_dump() for x in await _list_tools()],
"resources": [x.model_dump() for x in await _list_resources()],
"resource_templates": [x.model_dump() for x in await _list_templates()],
})
# ── helpers ────────────────────────────────────────────────────
def _route_name(path: str) -> str:
return re.sub(r"[/{}}]", "_", path).strip("_")
def _path_params(app: FastAPI, fn: Callable) -> List[str]:
for r in app.routes:
if r.endpoint is fn:
return list(r.param_convertors.keys())
return []

View File

@@ -1,17 +0,0 @@
fastapi>=0.115.12
uvicorn>=0.34.2
gunicorn>=23.0.0
slowapi==0.1.9
prometheus-fastapi-instrumentator>=7.1.0
redis>=5.2.1
jwt>=1.3.1
dnspython>=2.7.0
email-validator==2.2.0
sse-starlette==2.2.1
pydantic>=2.11
rank-bm25==0.2.2
anyio==4.9.0
PyJWT==2.10.1
mcp>=1.6.0
websockets>=15.0.1
httpx[http2]>=0.27.2

Some files were not shown because too many files have changed in this diff Show More