Compare commits
190 Commits
unclecode-
...
vr0.6.0rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0aa8bc9f7 | ||
|
|
c98ffe2130 | ||
|
|
4812f08a73 | ||
|
|
f3ebb38edf | ||
|
|
0007aea204 | ||
|
|
b5c25731e6 | ||
|
|
5297e362f3 | ||
|
|
a58c8000aa | ||
|
|
b27bb367e8 | ||
|
|
d2648eaa39 | ||
|
|
c2902fd200 | ||
|
|
16b2318242 | ||
|
|
907cba194f | ||
|
|
3bf78ff47a | ||
|
|
921e0c46b6 | ||
|
|
fd899f66aa | ||
|
|
30ec4f571f | ||
|
|
7db6b468d9 | ||
|
|
eed7f88f29 | ||
|
|
94d486579c | ||
|
|
5206c6f2d6 | ||
|
|
230f22da86 | ||
|
|
793668a413 | ||
|
|
82aa53aa59 | ||
|
|
cd7ff6f9c1 | ||
|
|
c56974cf59 | ||
|
|
dcc265458c | ||
|
|
ecec53a8c1 | ||
|
|
7d8e81fb2e | ||
|
|
9fc5d315af | ||
|
|
d84508b4d5 | ||
|
|
022f5c9e25 | ||
|
|
3179d6ad0c | ||
|
|
18e8227dfb | ||
|
|
7c358a1aee | ||
|
|
108b2a8bfb | ||
|
|
66ac07b4f3 | ||
|
|
a2061bf31e | ||
|
|
6f7ab9c927 | ||
|
|
9038e9acbd | ||
|
|
02e627e0bd | ||
|
|
5b66208a7e | ||
|
|
591f55edc7 | ||
|
|
e1d9e2489c | ||
|
|
b1693b1c21 | ||
|
|
49d904ca0a | ||
|
|
ca9351252a | ||
|
|
935d9d39f8 | ||
|
|
f8213c32b9 | ||
|
|
14894b4d70 | ||
|
|
7155778eac | ||
|
|
4133e5460d | ||
|
|
73fda8a6ec | ||
|
|
86df20234b | ||
|
|
179921a131 | ||
|
|
9e16a4bb26 | ||
|
|
c5cac2b459 | ||
|
|
555455d710 | ||
|
|
765f856ed4 | ||
|
|
757e3177ed | ||
|
|
d8357e80d2 | ||
|
|
ef1f0c4102 | ||
|
|
1119f2f5b5 | ||
|
|
bb02398086 | ||
|
|
3ff7eec8f3 | ||
|
|
d8cbeff386 | ||
|
|
64f20ab44a | ||
|
|
57e0423b3a | ||
|
|
c635f6b9a2 | ||
|
|
7be5427283 | ||
|
|
7f93e88379 | ||
|
|
40d4dd36c9 | ||
|
|
d8f38f2298 | ||
|
|
5c88d1310d | ||
|
|
4a20d7f7c2 | ||
|
|
585e5e5973 | ||
|
|
e3111d0a32 | ||
|
|
2f0e217751 | ||
|
|
6405cf0a6f | ||
|
|
6eed4adc65 | ||
|
|
bdd9db579a | ||
|
|
1107fa1d62 | ||
|
|
efa73257c5 | ||
|
|
8c08521301 | ||
|
|
462d5765e2 | ||
|
|
6eeb2e4076 | ||
|
|
0094cac675 | ||
|
|
4ab0893ffb | ||
|
|
e01d1e73e1 | ||
|
|
471d110c5e | ||
|
|
f89113377a | ||
|
|
6740e87b4d | ||
|
|
8b761f232b | ||
|
|
e0c2a7c284 | ||
|
|
ac2f9ae533 | ||
|
|
eedda1ae5c | ||
|
|
8cecbec7a7 | ||
|
|
6432ff1257 | ||
|
|
4359b12003 | ||
|
|
5358ac0fc2 | ||
|
|
529a79725e | ||
|
|
9109ecd8fc | ||
|
|
84883be513 | ||
|
|
79328e4292 | ||
|
|
a24799918c | ||
|
|
a31d7b86be | ||
|
|
7884a98be7 | ||
|
|
c190ba816d | ||
|
|
a3954dd4c6 | ||
|
|
6e3c048328 | ||
|
|
b750542e6d | ||
|
|
cbb8755972 | ||
|
|
dc36997a08 | ||
|
|
1630fbdafe | ||
|
|
341b7a5f2a | ||
|
|
9547bada3a | ||
|
|
9d69fce834 | ||
|
|
c6a605ccce | ||
|
|
4aeb7ef9ad | ||
|
|
a68cbb232b | ||
|
|
e1b3bfe6fb | ||
|
|
f78c46446b | ||
|
|
1b72880007 | ||
|
|
29f7915b79 | ||
|
|
2327db6fdc | ||
|
|
fd02dc782d | ||
|
|
3a234ec950 | ||
|
|
9e89d27fcd | ||
|
|
b3ec7ce960 | ||
|
|
baee4949d3 | ||
|
|
14fe5ef873 | ||
|
|
fc425023f5 | ||
|
|
9c58e4ce2e | ||
|
|
df6a6d5f4f | ||
|
|
e896c08f9c | ||
|
|
56bc3c6e45 | ||
|
|
cbef406f9b | ||
|
|
8a76563018 | ||
|
|
415c1c5bee | ||
|
|
f334daa979 | ||
|
|
504207faa6 | ||
|
|
d024749633 | ||
|
|
f14e4a4b67 | ||
|
|
1e819cdb26 | ||
|
|
5edfea279d | ||
|
|
c612f9a852 | ||
|
|
95175cb394 | ||
|
|
cba4a466e5 | ||
|
|
7c1705712d | ||
|
|
a9e24307cc | ||
|
|
3a87b4e43b | ||
|
|
4bcd4cbda1 | ||
|
|
71ce01c9e1 | ||
|
|
c6d48080a4 | ||
|
|
46d2f12851 | ||
|
|
367cd71db9 | ||
|
|
2af958e12c | ||
|
|
3cb28875c3 | ||
|
|
dad592c801 | ||
|
|
c171891999 | ||
|
|
3b1025abbb | ||
|
|
f00dcc276f | ||
|
|
392c923980 | ||
|
|
2864015469 | ||
|
|
8bb799068e | ||
|
|
063df572b0 | ||
|
|
966fb47e64 | ||
|
|
43e09da694 | ||
|
|
69705df0b3 | ||
|
|
91a5fea11f | ||
|
|
467be9ac76 | ||
|
|
19df96ed56 | ||
|
|
b957ff2ecd | ||
|
|
91073c1244 | ||
|
|
926beee832 | ||
|
|
a9415aaaf6 | ||
|
|
c308a794e8 | ||
|
|
bc7559586f | ||
|
|
04bc643cec | ||
|
|
33a21d6a7a | ||
|
|
7b1ef07c41 | ||
|
|
2f15976b34 | ||
|
|
20920fa17b | ||
|
|
53ac3ec0b4 | ||
|
|
ce4f04dad2 | ||
|
|
f81712eb91 | ||
|
|
31938fb922 | ||
|
|
f8fd9d9eff | ||
|
|
dde14eba7d | ||
|
|
54c84079c4 |
35
.github/workflows/main.yml
vendored
Normal file
35
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Discord GitHub Notifications
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request:
|
||||
types: [opened]
|
||||
discussion:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set webhook based on event type
|
||||
id: set-webhook
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "discussion" ]; then
|
||||
echo "webhook=${{ secrets.DISCORD_DISCUSSIONS_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "webhook=${{ secrets.DISCORD_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Discord Notification
|
||||
uses: Ilshidur/action-discord@master
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ steps.set-webhook.outputs.webhook }}
|
||||
with:
|
||||
args: |
|
||||
${{ github.event_name == 'issues' && format('📣 New issue created: **{0}** by {1} - {2}', github.event.issue.title, github.event.issue.user.login, github.event.issue.html_url) ||
|
||||
github.event_name == 'issue_comment' && format('💬 New comment on issue **{0}** by {1} - {2}', github.event.issue.title, github.event.comment.user.login, github.event.comment.html_url) ||
|
||||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
|
||||
format('💬 New discussion started: **{0}** by {1} - {2}', github.event.discussion.title, github.event.discussion.user.login, github.event.discussion.html_url) }}
|
||||
32
.gitignore
vendored
32
.gitignore
vendored
@@ -232,5 +232,33 @@ plans/
|
||||
.codeiumignore
|
||||
todo/
|
||||
|
||||
# windsurf rules
|
||||
.windsurfrules
|
||||
# Continue development files
|
||||
.continue/
|
||||
.continuerc.json
|
||||
continue.lock
|
||||
continue_core.log
|
||||
contextProviders/
|
||||
continue_workspace/
|
||||
.continue-cache/
|
||||
continue_config.json
|
||||
|
||||
# Continue temporary files
|
||||
.continue-temp/
|
||||
.continue-logs/
|
||||
.continue-downloads/
|
||||
|
||||
# Continue VS Code specific
|
||||
.vscode-continue/
|
||||
.vscode-continue-cache/
|
||||
|
||||
.prompts/
|
||||
|
||||
.llm.env
|
||||
.private/
|
||||
|
||||
CLAUDE_MONITOR.md
|
||||
CLAUDE.md
|
||||
|
||||
tests/**/test_site
|
||||
tests/**/reports
|
||||
tests/**/benchmark_reports
|
||||
250
CHANGELOG.md
250
CHANGELOG.md
@@ -5,10 +5,250 @@ All notable changes to Crawl4AI will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
---
|
||||
## [0.6.0rc1] ‑ 2025‑04‑22
|
||||
|
||||
### Added
|
||||
- Browser pooling with page pre‑warming and fine‑grained **geolocation, locale, and timezone** controls
|
||||
- Crawler pool manager (SDK + Docker API) for smarter resource allocation
|
||||
- Network & console log capture plus MHTML snapshot export
|
||||
- **Table extractor**: turn HTML `<table>`s into DataFrames or CSV with one flag
|
||||
- High‑volume stress‑test framework in `tests/memory` and API load scripts
|
||||
- MCP protocol endpoints with socket & SSE support; playground UI scaffold
|
||||
- Docs v2 revamp: TOC, GitHub badge, copy‑code buttons, Docker API demo
|
||||
- “Ask AI” helper button *(work‑in‑progress, shipping soon)*
|
||||
- New examples: geo‑location usage, network/console capture, Docker API, markdown source selection, crypto analysis
|
||||
- Expanded automated test suites for browser, Docker, MCP and memory benchmarks
|
||||
|
||||
### Changed
|
||||
Okay, here's a detailed changelog in Markdown format, generated from the provided git diff and commit history. I've focused on user-facing changes, fixes, and features, and grouped them as requested:
|
||||
- Consolidated and renamed browser strategies; legacy docker strategy modules removed
|
||||
- `ProxyConfig` moved to `async_configs`
|
||||
- Server migrated to pool‑based crawler management
|
||||
- FastAPI validators replace custom query validation
|
||||
- Docker build now uses Chromium base image
|
||||
- Large‑scale repo tidy‑up (≈36 k insertions, ≈5 k deletions)
|
||||
|
||||
### Fixed
|
||||
- Async crawler session leak, duplicate‑visit handling, URL normalisation
|
||||
- Target‑element regressions in scraping strategies
|
||||
- Logged‑URL readability, encoded‑URL decoding, middle truncation for long URLs
|
||||
- Closed issues: #701, #733, #756, #774, #804, #822, #839, #841, #842, #843, #867, #902, #911
|
||||
|
||||
### Removed
|
||||
- Obsolete modules under `crawl4ai/browser/*` superseded by the new pooled browser layer
|
||||
|
||||
### Deprecated
|
||||
- Old markdown generator names now alias `DefaultMarkdownGenerator` and emit warnings
|
||||
|
||||
---
|
||||
|
||||
#### Upgrade notes
|
||||
1. Update any direct imports from `crawl4ai/browser/*` to the new pooled browser modules
|
||||
2. If you override `AsyncPlaywrightCrawlerStrategy.get_page`, adopt the new signature
|
||||
3. Rebuild Docker images to pull the new Chromium layer
|
||||
4. Switch to `DefaultMarkdownGenerator` (or silence the deprecation warning)
|
||||
|
||||
---
|
||||
|
||||
`121 files changed, ≈36 223 insertions, ≈4 975 deletions` :contentReference[oaicite:0]{index=0}​:contentReference[oaicite:1]{index=1}
|
||||
|
||||
|
||||
### [Feature] 2025-04-21
|
||||
- Implemented MCP protocol for machine-to-machine communication
|
||||
- Added WebSocket and SSE transport for MCP server
|
||||
- Exposed server endpoints via MCP protocol
|
||||
- Created tests for MCP socket and SSE communication
|
||||
- Enhanced Docker server with file handling and intelligent search
|
||||
- Added PDF and screenshot endpoints with file saving capability
|
||||
- Added JavaScript execution endpoint for page interaction
|
||||
- Implemented advanced context search with BM25 and code chunking
|
||||
- Added file path output support for generated assets
|
||||
- Improved server endpoints and API surface
|
||||
- Added intelligent context search with query filtering
|
||||
- Added syntax-aware code function chunking
|
||||
- Implemented efficient HTML processing pipeline
|
||||
- Added support for controlling browser geolocation via new GeolocationConfig class
|
||||
- Added locale and timezone configuration options to CrawlerRunConfig
|
||||
- Added example script demonstrating geolocation and locale usage
|
||||
- Added documentation for location-based identity features
|
||||
|
||||
### [Refactor] 2025-04-20
|
||||
- Replaced crawler_manager.py with simpler crawler_pool.py implementation
|
||||
- Added global page semaphore for hard concurrency cap
|
||||
- Implemented browser pool with idle cleanup
|
||||
- Added playground UI for testing and stress testing
|
||||
- Updated API handlers to use pooled crawlers
|
||||
- Enhanced logging levels and symbols
|
||||
- Added memory tests and stress test utilities
|
||||
|
||||
### [Added] 2025-04-17
|
||||
- Added content source selection feature for markdown generation
|
||||
- New `content_source` parameter allows choosing between `cleaned_html`, `raw_html`, and `fit_html`
|
||||
- Provides flexibility in how HTML content is processed before markdown conversion
|
||||
- Added examples and documentation for the new feature
|
||||
- Includes backward compatibility with default `cleaned_html` behavior
|
||||
|
||||
## Version 0.5.0.post5 (2025-03-14)
|
||||
|
||||
### Added
|
||||
|
||||
- *(crawler)* Add experimental parameters dictionary to CrawlerRunConfig to support beta features
|
||||
- *(tables)* Add comprehensive table detection and extraction functionality with scoring system
|
||||
- *(monitor)* Add real-time crawler monitoring system with memory management
|
||||
- *(content)* Add target_elements parameter for selective content extraction
|
||||
- *(browser)* Add standalone CDP browser launch capability
|
||||
- *(schema)* Add preprocess_html_for_schema utility for better HTML cleaning
|
||||
- *(api)* Add special handling for single URL requests in Docker API
|
||||
|
||||
### Changed
|
||||
|
||||
- *(filters)* Add reverse option to URLPatternFilter for inverting filter logic
|
||||
- *(browser)* Make CSP nonce headers optional via experimental config
|
||||
- *(browser)* Remove default cookie injection from page initialization
|
||||
- *(crawler)* Optimize response handling for single-URL processing
|
||||
- *(api)* Refactor crawl request handling to streamline processing
|
||||
- *(config)* Update default provider to gpt-4o
|
||||
- *(cache)* Change default cache_mode from aggressive to bypass in examples
|
||||
|
||||
### Fixed
|
||||
|
||||
- *(browser)* Clean up browser context creation code
|
||||
- *(api)* Improve code formatting in API handler
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- WebScrapingStrategy no longer returns 'scraped_html' in its output dictionary
|
||||
- Table extraction logic has been modified to better handle thead/tbody structures
|
||||
- Default cookie injection has been removed from page initialization
|
||||
|
||||
## Version 0.5.0 (2025-03-02)
|
||||
|
||||
### Added
|
||||
|
||||
- *(profiles)* Add BrowserProfiler class for dedicated browser profile management
|
||||
- *(cli)* Add interactive profile management to CLI with rich UI
|
||||
- *(profiles)* Add ability to crawl directly from profile management interface
|
||||
- *(browser)* Support identity-based browsing with persistent profiles
|
||||
- *(deep-crawling)* Add max_pages parameter to limit the number of pages crawled in all deep crawling strategies
|
||||
- *(deep-crawling)* Add score_threshold parameter to BFS and DFS strategies to filter URLs by score
|
||||
|
||||
### Changed
|
||||
|
||||
- *(browser)* Refactor profile management from ManagedBrowser to BrowserProfiler class
|
||||
- *(cli)* Enhance CLI with profile selection and status display for crawling
|
||||
- *(examples)* Update identity-based browsing example to use BrowserProfiler class
|
||||
- *(docs)* Update identity-based crawling documentation
|
||||
- *(docs)* Update deep crawling documentation with max_pages and score_threshold parameters
|
||||
- *(examples)* Add example demonstrating the use of max_pages and score_threshold parameters
|
||||
|
||||
### Fixed
|
||||
|
||||
- *(browser)* Fix profile detection and management on different platforms
|
||||
- *(cli)* Fix CLI command structure for better user experience
|
||||
- *(deep-crawling)* Improve BFS and DFS strategies to handle page count limits more efficiently
|
||||
|
||||
|
||||
## Version 0.5.0 (2025-02-21)
|
||||
|
||||
### Added
|
||||
|
||||
- *(crawler)* [**breaking**] Add memory-adaptive dispatcher with rate limiting
|
||||
- *(scraping)* [**breaking**] Add LXML-based scraping mode for improved performance
|
||||
- *(content-filter)* Add LLMContentFilter for intelligent markdown generation
|
||||
- *(dispatcher)* [**breaking**] Add streaming support for URL processing
|
||||
- *(browser)* [**breaking**] Improve browser context management and add shared data support
|
||||
- *(config)* [**breaking**] Add streaming support and config cloning
|
||||
- *(crawler)* Add URL redirection tracking
|
||||
- *(extraction)* Add LLM-powered schema generation utility
|
||||
- *(proxy)* Add proxy configuration support to CrawlerRunConfig
|
||||
- *(robots)* Add robots.txt compliance support
|
||||
- *(release)* [**breaking**] Prepare v0.4.3 beta release
|
||||
- *(proxy)* Add proxy rotation support and documentation
|
||||
- *(browser)* Add CDP URL configuration support
|
||||
- *(demo)* Uncomment feature demos and add fake-useragent dependency
|
||||
- *(pdf)* Add PDF processing capabilities
|
||||
- *(crawler)* [**breaking**] Enhance JavaScript execution and PDF processing
|
||||
- *(docker)* Add Docker deployment configuration and API server
|
||||
- *(docker)* Add Docker service integration and config serialization
|
||||
- *(docker)* [**breaking**] Enhance Docker deployment setup and configuration
|
||||
- *(api)* Improve cache handling and add API tests
|
||||
- *(crawler)* [**breaking**] Add deep crawling capabilities with BFS strategy
|
||||
- *(proxy)* [**breaking**] Add proxy rotation strategy
|
||||
- *(deep-crawling)* Add DFS strategy and update exports; refactor CLI entry point
|
||||
- *(cli)* Add command line interface with comprehensive features
|
||||
- *(config)* Enhance serialization and add deep crawling exports
|
||||
- *(crawler)* Add HTTP crawler strategy for lightweight web scraping
|
||||
- *(docker)* [**breaking**] Implement supervisor and secure API endpoints
|
||||
- *(docker)* [**breaking**] Add JWT authentication and improve server architecture
|
||||
|
||||
### Changed
|
||||
|
||||
- *(browser)* Update browser channel default to 'chromium' in BrowserConfig.from_args method
|
||||
- *(crawler)* Optimize response handling and default settings
|
||||
- *(crawler)* - Update hello_world example with proper content filtering
|
||||
- - Update hello_world.py example
|
||||
- *(docs)* [**breaking**] Reorganize documentation structure and update styles
|
||||
- *(dispatcher)* [**breaking**] Migrate to modular dispatcher system with enhanced monitoring
|
||||
- *(scraping)* [**breaking**] Replace ScrapingMode enum with strategy pattern
|
||||
- *(browser)* Improve browser path management
|
||||
- *(models)* Rename final_url to redirected_url for consistency
|
||||
- *(core)* [**breaking**] Improve type hints and remove unused file
|
||||
- *(docs)* Improve code formatting in features demo
|
||||
- *(user-agent)* Improve user agent generation system
|
||||
- *(core)* [**breaking**] Reorganize project structure and remove legacy code
|
||||
- *(docker)* Clean up import statements in server.py
|
||||
- *(docker)* Remove unused models and utilities for cleaner codebase
|
||||
- *(docker)* [**breaking**] Improve server architecture and configuration
|
||||
- *(deep-crawl)* [**breaking**] Reorganize deep crawling functionality into dedicated module
|
||||
- *(deep-crawling)* [**breaking**] Reorganize deep crawling strategies and add new implementations
|
||||
- *(crawling)* [**breaking**] Improve type hints and code cleanup
|
||||
- *(crawler)* [**breaking**] Improve HTML handling and cleanup codebase
|
||||
- *(crawler)* [**breaking**] Remove content filter functionality
|
||||
- *(examples)* Update API usage in features demo
|
||||
- *(config)* [**breaking**] Enhance serialization and config handling
|
||||
|
||||
### Docs
|
||||
|
||||
- Add Code of Conduct for the project (#410)
|
||||
|
||||
### Documentation
|
||||
|
||||
- *(extraction)* Add clarifying comments for CSS selector behavior
|
||||
- *(readme)* Update personal story and project vision
|
||||
- *(urls)* [**breaking**] Update documentation URLs to new domain
|
||||
- *(api)* Add streaming mode documentation and examples
|
||||
- *(readme)* Update version and feature announcements for v0.4.3b1
|
||||
- *(examples)* Update demo scripts and fix output formats
|
||||
- *(examples)* Update v0.4.3 features demo to v0.4.3b2
|
||||
- *(readme)* Update version references and fix links
|
||||
- *(multi-url)* [**breaking**] Improve documentation clarity and update examples
|
||||
- *(examples)* Update proxy rotation demo and disable other demos
|
||||
- *(api)* Improve formatting and readability of API documentation
|
||||
- *(examples)* Add SERP API project example
|
||||
- *(urls)* Update documentation URLs to new domain
|
||||
- *(readme)* Resolve merge conflict and update version info
|
||||
|
||||
### Fixed
|
||||
|
||||
- *(browser)* Update default browser channel to chromium and simplify channel selection logic
|
||||
- *(browser)* [**breaking**] Default to Chromium channel for new headless mode (#387)
|
||||
- *(browser)* Resolve merge conflicts in browser channel configuration
|
||||
- Prevent memory leaks by ensuring proper closure of Playwright pages
|
||||
- Not working long page screenshot (#403)
|
||||
- *(extraction)* JsonCss selector and crawler improvements
|
||||
- *(models)* [**breaking**] Make model fields optional with default values
|
||||
- *(dispatcher)* Adjust memory threshold and fix dispatcher initialization
|
||||
- *(install)* Ensure proper exit after running doctor command
|
||||
|
||||
### Miscellaneous Tasks
|
||||
|
||||
- *(cleanup)* Remove unused files and improve type hints
|
||||
- Add .gitattributes file
|
||||
|
||||
## License Update
|
||||
|
||||
Crawl4AI v0.5.0 updates the license to Apache 2.0 *with a required attribution clause*. This means you are free to use, modify, and distribute Crawl4AI (even commercially), but you *must* clearly attribute the project in any public use or distribution. See the updated `LICENSE` file for the full legal text and specific requirements.
|
||||
|
||||
---
|
||||
|
||||
## Version 0.4.3b2 (2025-01-21)
|
||||
|
||||
@@ -286,12 +526,6 @@ This release introduces several powerful new features, including robots.txt comp
|
||||
- Fixed potential viewport mismatches by ensuring consistent use of `self.viewport_width` and `self.viewport_height` throughout the code.
|
||||
- Improved robustness of dynamic content loading to avoid timeouts and failed evaluations.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## [0.3.75] December 1, 2024
|
||||
|
||||
### PruningContentFilter
|
||||
|
||||
@@ -24,6 +24,14 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
||||
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
|
||||
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
|
||||
|
||||
#### Feb-Alpha-1
|
||||
- [sufianuddin](https://github.com/sufianuddin) - fix: [Documentation for JsonCssExtractionStrategy](https://github.com/unclecode/crawl4ai/issues/651)
|
||||
- [tautikAg](https://github.com/tautikAg) - fix: [Markdown output has incorect spacing](https://github.com/unclecode/crawl4ai/issues/599)
|
||||
- [cardit1](https://github.com/cardit1) - fix: ['AsyncPlaywrightCrawlerStrategy' object has no attribute 'downloads_path'](https://github.com/unclecode/crawl4ai/issues/585)
|
||||
- [dmurat](https://github.com/dmurat) - fix: [ Incorrect rendering of inline code inside of links ](https://github.com/unclecode/crawl4ai/issues/583)
|
||||
- [Sparshsing](https://github.com/Sparshsing) - fix: [Relative Urls in the webpage not extracted properly ](https://github.com/unclecode/crawl4ai/issues/570)
|
||||
|
||||
|
||||
|
||||
## Other Contributors
|
||||
|
||||
@@ -31,6 +39,11 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
||||
- [Shiv Kumar](https://github.com/shivkumar0757)
|
||||
- [QIN2DIM](https://github.com/QIN2DIM)
|
||||
|
||||
#### Typo fixes
|
||||
- [ssoydan](https://github.com/ssoydan)
|
||||
- [Darshan](https://github.com/Darshan2104)
|
||||
- [tuhinmallick](https://github.com/tuhinmallick)
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better.
|
||||
|
||||
173
Dockerfile
173
Dockerfile
@@ -1,32 +1,36 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
FROM python:3.10-slim
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
# C4ai version
|
||||
ARG C4AI_VER=0.6.0
|
||||
ENV C4AI_VERSION=$C4AI_VER
|
||||
LABEL c4ai.version=$C4AI_VER
|
||||
|
||||
# Set build arguments
|
||||
ARG APP_HOME=/app
|
||||
ARG GITHUB_REPO=https://github.com/unclecode/crawl4ai.git
|
||||
ARG GITHUB_BRANCH=main
|
||||
ARG USE_LOCAL=true
|
||||
|
||||
ENV PYTHONFAULTHANDLER=1 \
|
||||
PYTHONHASHSEED=random \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_DEFAULT_TIMEOUT=100 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
REDIS_HOST=localhost \
|
||||
REDIS_PORT=6379
|
||||
|
||||
# Other build arguments
|
||||
ARG PYTHON_VERSION=3.10
|
||||
|
||||
# Base stage with system dependencies
|
||||
FROM python:${PYTHON_VERSION}-slim as base
|
||||
|
||||
# Declare ARG variables again within the build stage
|
||||
ARG INSTALL_TYPE=all
|
||||
ARG INSTALL_TYPE=default
|
||||
ARG ENABLE_GPU=false
|
||||
ARG TARGETARCH
|
||||
|
||||
# Platform-specific labels
|
||||
LABEL maintainer="unclecode"
|
||||
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
|
||||
LABEL version="1.0"
|
||||
|
||||
# Environment setup
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_DEFAULT_TIMEOUT=100 \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
@@ -37,10 +41,11 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
libjpeg-dev \
|
||||
libpng-dev \
|
||||
redis-server \
|
||||
supervisor \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Playwright system dependencies for Linux
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libglib2.0-0 \
|
||||
libnss3 \
|
||||
@@ -63,30 +68,63 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libcairo2 \
|
||||
libasound2 \
|
||||
libatspi2.0-0 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# GPU support if enabled and architecture is supported
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
nvidia-cuda-toolkit \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* ; \
|
||||
else \
|
||||
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
|
||||
fi
|
||||
|
||||
# Create and set working directory
|
||||
WORKDIR /app
|
||||
RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
echo "🦾 Installing ARM-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libopenblas-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
elif [ "$TARGETARCH" = "amd64" ]; then \
|
||||
echo "🖥️ Installing AMD64-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libomp-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
echo "Skipping platform-specific optimizations (unsupported platform)"; \
|
||||
fi
|
||||
|
||||
# Copy the entire project
|
||||
COPY . .
|
||||
# Create a non-root user and group
|
||||
RUN groupadd -r appuser && useradd --no-log-init -r -g appuser appuser
|
||||
|
||||
# Install base requirements
|
||||
# Create and set permissions for appuser home directory
|
||||
RUN mkdir -p /home/appuser && chown -R appuser:appuser /home/appuser
|
||||
|
||||
WORKDIR ${APP_HOME}
|
||||
|
||||
RUN echo '#!/bin/bash\n\
|
||||
if [ "$USE_LOCAL" = "true" ]; then\n\
|
||||
echo "📦 Installing from local source..."\n\
|
||||
pip install --no-cache-dir /tmp/project/\n\
|
||||
else\n\
|
||||
echo "🌐 Installing from GitHub..."\n\
|
||||
for i in {1..3}; do \n\
|
||||
git clone --branch ${GITHUB_BRANCH} ${GITHUB_REPO} /tmp/crawl4ai && break || \n\
|
||||
{ echo "Attempt $i/3 failed! Taking a short break... ☕"; sleep 5; }; \n\
|
||||
done\n\
|
||||
pip install --no-cache-dir /tmp/crawl4ai\n\
|
||||
fi' > /tmp/install.sh && chmod +x /tmp/install.sh
|
||||
|
||||
COPY . /tmp/project/
|
||||
|
||||
# Copy supervisor config first (might need root later, but okay for now)
|
||||
COPY deploy/docker/supervisord.conf .
|
||||
|
||||
COPY deploy/docker/requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Install required library for FastAPI
|
||||
RUN pip install fastapi uvicorn psutil
|
||||
|
||||
# Install ML dependencies first for better layer caching
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install --no-cache-dir \
|
||||
torch \
|
||||
@@ -99,38 +137,61 @@ RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
python -m nltk.downloader punkt stopwords ; \
|
||||
fi
|
||||
|
||||
# Install the package
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install ".[all]" && \
|
||||
pip install "/tmp/project/[all]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
|
||||
pip install ".[torch]" ; \
|
||||
pip install "/tmp/project/[torch]" ; \
|
||||
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
|
||||
pip install ".[transformer]" && \
|
||||
pip install "/tmp/project/[transformer]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
else \
|
||||
pip install "." ; \
|
||||
pip install "/tmp/project" ; \
|
||||
fi
|
||||
|
||||
# Install MkDocs and required plugins
|
||||
RUN pip install --no-cache-dir \
|
||||
mkdocs \
|
||||
mkdocs-material \
|
||||
mkdocs-terminal \
|
||||
pymdown-extensions
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
/tmp/install.sh && \
|
||||
python -c "import crawl4ai; print('✅ crawl4ai is ready to rock!')" && \
|
||||
python -c "from playwright.sync_api import sync_playwright; print('✅ Playwright is feeling dramatic!')"
|
||||
|
||||
# Build MkDocs documentation
|
||||
RUN mkdocs build
|
||||
RUN crawl4ai-setup
|
||||
|
||||
# Install Playwright and browsers
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
playwright install chromium; \
|
||||
elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
playwright install chromium; \
|
||||
fi
|
||||
RUN playwright install --with-deps
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000 11235 9222 8080
|
||||
RUN mkdir -p /home/appuser/.cache/ms-playwright \
|
||||
&& cp -r /root/.cache/ms-playwright/chromium-* /home/appuser/.cache/ms-playwright/ \
|
||||
&& chown -R appuser:appuser /home/appuser/.cache/ms-playwright
|
||||
|
||||
# Start the FastAPI server
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "11235"]
|
||||
RUN crawl4ai-doctor
|
||||
|
||||
# Copy application code
|
||||
COPY deploy/docker/* ${APP_HOME}/
|
||||
|
||||
# copy the playground + any future static assets
|
||||
COPY deploy/docker/static ${APP_HOME}/static
|
||||
|
||||
# Change ownership of the application directory to the non-root user
|
||||
RUN chown -R appuser:appuser ${APP_HOME}
|
||||
|
||||
# give permissions to redis persistence dirs if used
|
||||
RUN mkdir -p /var/lib/redis /var/log/redis && chown -R appuser:appuser /var/lib/redis /var/log/redis
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD bash -c '\
|
||||
MEM=$(free -m | awk "/^Mem:/{print \$2}"); \
|
||||
if [ $MEM -lt 2048 ]; then \
|
||||
echo "⚠️ Warning: Less than 2GB RAM available! Your container might need a memory boost! 🚀"; \
|
||||
exit 1; \
|
||||
fi && \
|
||||
redis-cli ping > /dev/null && \
|
||||
curl -f http://localhost:11235/health || exit 1'
|
||||
|
||||
EXPOSE 6379
|
||||
# Switch to the non-root user before starting the application
|
||||
USER appuser
|
||||
|
||||
# Set environment variables to ptoduction
|
||||
ENV PYTHON_ENV=production
|
||||
|
||||
# Start the application using supervisord
|
||||
CMD ["supervisord", "-c", "supervisord.conf"]
|
||||
339
JOURNAL.md
Normal file
339
JOURNAL.md
Normal file
@@ -0,0 +1,339 @@
|
||||
# Development Journal
|
||||
|
||||
This journal tracks significant feature additions, bug fixes, and architectural decisions in the crawl4ai project. It serves as both documentation and a historical record of the project's evolution.
|
||||
|
||||
## [2025-04-17] Added Content Source Selection for Markdown Generation
|
||||
|
||||
**Feature:** Configurable content source for markdown generation
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `content_source: str = "cleaned_html"` parameter to `MarkdownGenerationStrategy` class
|
||||
2. Updated `DefaultMarkdownGenerator` to accept and pass the content source parameter
|
||||
3. Renamed the `cleaned_html` parameter to `input_html` in the `generate_markdown` method
|
||||
4. Modified `AsyncWebCrawler.aprocess_html` to select the appropriate HTML source based on the generator's config
|
||||
5. Added `preprocess_html_for_schema` import in `async_webcrawler.py`
|
||||
|
||||
**Implementation Details:**
|
||||
- Added a new `content_source` parameter to specify which HTML input to use for markdown generation
|
||||
- Options include: "cleaned_html" (default), "raw_html", and "fit_html"
|
||||
- Used a dictionary dispatch pattern in `aprocess_html` to select the appropriate HTML source
|
||||
- Added proper error handling with fallback to cleaned_html if content source selection fails
|
||||
- Ensured backward compatibility by defaulting to "cleaned_html" option
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/markdown_generation_strategy.py`: Added content_source parameter and updated the method signature
|
||||
- `crawl4ai/async_webcrawler.py`: Added HTML source selection logic and updated imports
|
||||
|
||||
**Examples:**
|
||||
- Created `docs/examples/content_source_example.py` demonstrating how to use the new parameter
|
||||
|
||||
**Challenges:**
|
||||
- Maintaining backward compatibility while reorganizing the parameter flow
|
||||
- Ensuring proper error handling for all content source options
|
||||
- Making the change with minimal code modifications
|
||||
|
||||
**Why This Feature:**
|
||||
The content source selection feature allows users to choose which HTML content to use as input for markdown generation:
|
||||
1. "cleaned_html" - Uses the post-processed HTML after scraping strategy (original behavior)
|
||||
2. "raw_html" - Uses the original raw HTML directly from the web page
|
||||
3. "fit_html" - Uses the preprocessed HTML optimized for schema extraction
|
||||
|
||||
This feature provides greater flexibility in how users generate markdown, enabling them to:
|
||||
- Capture more detailed content from the original HTML when needed
|
||||
- Use schema-optimized HTML when working with structured data
|
||||
- Choose the approach that best suits their specific use case
|
||||
## [2025-04-17] Implemented High Volume Stress Testing Solution for SDK
|
||||
|
||||
**Feature:** Comprehensive stress testing framework using `arun_many` and the dispatcher system to evaluate performance, concurrency handling, and identify potential issues under high-volume crawling scenarios.
|
||||
|
||||
**Changes Made:**
|
||||
1. Created a dedicated stress testing framework in the `benchmarking/` (or similar) directory.
|
||||
2. Implemented local test site generation (`SiteGenerator`) with configurable heavy HTML pages.
|
||||
3. Added basic memory usage tracking (`SimpleMemoryTracker`) using platform-specific commands (avoiding `psutil` dependency for this specific test).
|
||||
4. Utilized `CrawlerMonitor` from `crawl4ai` for rich terminal UI and real-time monitoring of test progress and dispatcher activity.
|
||||
5. Implemented detailed result summary saving (JSON) and memory sample logging (CSV).
|
||||
6. Developed `run_benchmark.py` to orchestrate tests with predefined configurations.
|
||||
7. Created `run_all.sh` as a simple wrapper for `run_benchmark.py`.
|
||||
|
||||
**Implementation Details:**
|
||||
- Generates a local test site with configurable pages containing heavy text and image content.
|
||||
- Uses Python's built-in `http.server` for local serving, minimizing network variance.
|
||||
- Leverages `crawl4ai`'s `arun_many` method for processing URLs.
|
||||
- Utilizes `MemoryAdaptiveDispatcher` to manage concurrency via the `max_sessions` parameter (note: memory adaptation features require `psutil`, not used by `SimpleMemoryTracker`).
|
||||
- Tracks memory usage via `SimpleMemoryTracker`, recording samples throughout test execution to a CSV file.
|
||||
- Uses `CrawlerMonitor` (which uses the `rich` library) for clear terminal visualization and progress reporting directly from the dispatcher.
|
||||
- Stores detailed final metrics in a JSON summary file.
|
||||
|
||||
**Files Created/Updated:**
|
||||
- `stress_test_sdk.py`: Main stress testing implementation using `arun_many`.
|
||||
- `benchmark_report.py`: (Assumed) Report generator for comparing test results.
|
||||
- `run_benchmark.py`: Test runner script with predefined configurations.
|
||||
- `run_all.sh`: Simple bash script wrapper for `run_benchmark.py`.
|
||||
- `USAGE.md`: Comprehensive documentation on usage and interpretation (updated).
|
||||
|
||||
**Testing Approach:**
|
||||
- Creates a controlled, reproducible test environment with a local HTTP server.
|
||||
- Processes URLs using `arun_many`, allowing the dispatcher to manage concurrency up to `max_sessions`.
|
||||
- Optionally logs per-batch summaries (when not in streaming mode) after processing chunks.
|
||||
- Supports different test sizes via `run_benchmark.py` configurations.
|
||||
- Records memory samples via platform commands for basic trend analysis.
|
||||
- Includes cleanup functionality for the test environment.
|
||||
|
||||
**Challenges:**
|
||||
- Ensuring proper cleanup of HTTP server processes.
|
||||
- Getting reliable memory tracking across platforms without adding heavy dependencies (`psutil`) to this specific test script.
|
||||
- Designing `run_benchmark.py` to correctly pass arguments to `stress_test_sdk.py`.
|
||||
|
||||
**Why This Feature:**
|
||||
The high volume stress testing solution addresses critical needs for ensuring Crawl4AI's `arun_many` reliability:
|
||||
1. Provides a reproducible way to evaluate performance under concurrent load.
|
||||
2. Allows testing the dispatcher's concurrency control (`max_session_permit`) and queue management.
|
||||
3. Enables performance tuning by observing throughput (`URLs/sec`) under different `max_sessions` settings.
|
||||
4. Creates a controlled environment for testing `arun_many` behavior.
|
||||
5. Supports continuous integration by providing deterministic test conditions for `arun_many`.
|
||||
|
||||
**Design Decisions:**
|
||||
- Chose local site generation for reproducibility and isolation from network issues.
|
||||
- Utilized the built-in `CrawlerMonitor` for real-time feedback, leveraging its `rich` integration.
|
||||
- Implemented optional per-batch logging in `stress_test_sdk.py` (when not streaming) to provide chunk-level summaries alongside the continuous monitor.
|
||||
- Adopted `arun_many` with a `MemoryAdaptiveDispatcher` as the core mechanism for parallel execution, reflecting the intended SDK usage.
|
||||
- Created `run_benchmark.py` to simplify running standard test configurations.
|
||||
- Used `SimpleMemoryTracker` to provide basic memory insights without requiring `psutil` for this particular test runner.
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Create a separate test variant that *does* use `psutil` to specifically stress the memory-adaptive features of the dispatcher.
|
||||
- Add support for generated JavaScript content.
|
||||
- Add support for Docker-based testing with explicit memory limits.
|
||||
- Enhance `benchmark_report.py` to provide more sophisticated analysis of performance and memory trends from the generated JSON/CSV files.
|
||||
|
||||
---
|
||||
|
||||
## [2025-04-17] Refined Stress Testing System Parameters and Execution
|
||||
|
||||
**Changes Made:**
|
||||
1. Corrected `run_benchmark.py` and `stress_test_sdk.py` to use `--max-sessions` instead of the incorrect `--workers` parameter, accurately reflecting dispatcher configuration.
|
||||
2. Updated `run_benchmark.py` argument handling to correctly pass all relevant custom parameters (including `--stream`, `--monitor-mode`, etc.) to `stress_test_sdk.py`.
|
||||
3. (Assuming changes in `benchmark_report.py`) Applied dark theme to benchmark reports for better readability.
|
||||
4. (Assuming changes in `benchmark_report.py`) Improved visualization code to eliminate matplotlib warnings.
|
||||
5. Updated `run_benchmark.py` to provide clickable `file://` links to generated reports in the terminal output.
|
||||
6. Updated `USAGE.md` with comprehensive parameter descriptions reflecting the final script arguments.
|
||||
7. Updated `run_all.sh` wrapper to correctly invoke `run_benchmark.py` with flexible arguments.
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Parameter Correction (`--max-sessions`)**:
|
||||
* Identified the fundamental misunderstanding where `--workers` was used incorrectly.
|
||||
* Refactored `stress_test_sdk.py` to accept `--max-sessions` and configure the `MemoryAdaptiveDispatcher`'s `max_session_permit` accordingly.
|
||||
* Updated `run_benchmark.py` argument parsing and command construction to use `--max-sessions`.
|
||||
* Updated `TEST_CONFIGS` in `run_benchmark.py` to use `max_sessions`.
|
||||
|
||||
2. **Argument Handling (`run_benchmark.py`)**:
|
||||
* Improved logic to collect all command-line arguments provided to `run_benchmark.py`.
|
||||
* Ensured all relevant arguments (like `--stream`, `--monitor-mode`, `--port`, `--use-rate-limiter`, etc.) are correctly forwarded when calling `stress_test_sdk.py` as a subprocess.
|
||||
|
||||
3. **Dark Theme & Visualization Fixes (Assumed in `benchmark_report.py`)**:
|
||||
* (Describes changes assumed to be made in the separate reporting script).
|
||||
|
||||
4. **Clickable Links (`run_benchmark.py`)**:
|
||||
* Added logic to find the latest HTML report and PNG chart in the `benchmark_reports` directory after `benchmark_report.py` runs.
|
||||
* Used `pathlib` to generate correct `file://` URLs for terminal output.
|
||||
|
||||
5. **Documentation Improvements (`USAGE.md`)**:
|
||||
* Rewrote sections to explain `arun_many`, dispatchers, and `--max-sessions`.
|
||||
* Updated parameter tables for all scripts (`stress_test_sdk.py`, `run_benchmark.py`).
|
||||
* Clarified the difference between batch and streaming modes and their effect on logging.
|
||||
* Updated examples to use correct arguments.
|
||||
|
||||
**Files Modified:**
|
||||
- `stress_test_sdk.py`: Changed `--workers` to `--max-sessions`, added new arguments, used `arun_many`.
|
||||
- `run_benchmark.py`: Changed argument handling, updated configs, calls `stress_test_sdk.py`.
|
||||
- `run_all.sh`: Updated to call `run_benchmark.py` correctly.
|
||||
- `USAGE.md`: Updated documentation extensively.
|
||||
- `benchmark_report.py`: (Assumed modifications for dark theme and viz fixes).
|
||||
|
||||
**Testing:**
|
||||
- Verified that `--max-sessions` correctly limits concurrency via the `CrawlerMonitor` output.
|
||||
- Confirmed that custom arguments passed to `run_benchmark.py` are forwarded to `stress_test_sdk.py`.
|
||||
- Validated clickable links work in supporting terminals.
|
||||
- Ensured documentation matches the final script parameters and behavior.
|
||||
|
||||
**Why These Changes:**
|
||||
These refinements correct the fundamental approach of the stress test to align with `crawl4ai`'s actual architecture and intended usage:
|
||||
1. Ensures the test evaluates the correct components (`arun_many`, `MemoryAdaptiveDispatcher`).
|
||||
2. Makes test configurations more accurate and flexible.
|
||||
3. Improves the usability of the testing framework through better argument handling and documentation.
|
||||
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add support for generated JavaScript content to test JS rendering performance
|
||||
- Implement more sophisticated memory analysis like generational garbage collection tracking
|
||||
- Add support for Docker-based testing with memory limits to force OOM conditions
|
||||
- Create visualization tools for analyzing memory usage patterns across test runs
|
||||
- Add benchmark comparisons between different crawler versions or configurations
|
||||
|
||||
## [2025-04-17] Fixed Issues in Stress Testing System
|
||||
|
||||
**Changes Made:**
|
||||
1. Fixed custom parameter handling in run_benchmark.py
|
||||
2. Applied dark theme to benchmark reports for better readability
|
||||
3. Improved visualization code to eliminate matplotlib warnings
|
||||
4. Added clickable links to generated reports in terminal output
|
||||
5. Enhanced documentation with comprehensive parameter descriptions
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Custom Parameter Handling Fix**
|
||||
- Identified bug where custom URL count was being ignored in run_benchmark.py
|
||||
- Rewrote argument handling to use a custom args dictionary
|
||||
- Properly passed parameters to the test_simple_stress.py command
|
||||
- Added better UI indication of custom parameters in use
|
||||
|
||||
2. **Dark Theme Implementation**
|
||||
- Added complete dark theme to HTML benchmark reports
|
||||
- Applied dark styling to all visualization components
|
||||
- Used Nord-inspired color palette for charts and graphs
|
||||
- Improved contrast and readability for data visualization
|
||||
- Updated text colors and backgrounds for better eye comfort
|
||||
|
||||
3. **Matplotlib Warning Fixes**
|
||||
- Resolved warnings related to improper use of set_xticklabels()
|
||||
- Implemented correct x-axis positioning for bar charts
|
||||
- Ensured proper alignment of bar labels and data points
|
||||
- Updated plotting code to use modern matplotlib practices
|
||||
|
||||
4. **Documentation Improvements**
|
||||
- Created comprehensive USAGE.md with detailed instructions
|
||||
- Added parameter documentation for all scripts
|
||||
- Included examples for all common use cases
|
||||
- Provided detailed explanations for interpreting results
|
||||
- Added troubleshooting guide for common issues
|
||||
|
||||
**Files Modified:**
|
||||
- `tests/memory/run_benchmark.py`: Fixed custom parameter handling
|
||||
- `tests/memory/benchmark_report.py`: Added dark theme and fixed visualization warnings
|
||||
- `tests/memory/run_all.sh`: Added clickable links to reports
|
||||
- `tests/memory/USAGE.md`: Created comprehensive documentation
|
||||
|
||||
**Testing:**
|
||||
- Verified that custom URL counts are now correctly used
|
||||
- Confirmed dark theme is properly applied to all report elements
|
||||
- Checked that matplotlib warnings are no longer appearing
|
||||
- Validated clickable links to reports work in terminals that support them
|
||||
|
||||
**Why These Changes:**
|
||||
These improvements address several usability issues with the stress testing system:
|
||||
1. Better parameter handling ensures test configurations work as expected
|
||||
2. Dark theme reduces eye strain during extended test review sessions
|
||||
3. Fixing visualization warnings improves code quality and output clarity
|
||||
4. Enhanced documentation makes the system more accessible for future use
|
||||
|
||||
**Future Enhancements:**
|
||||
- Add additional visualization options for different types of analysis
|
||||
- Implement theme toggle to support both light and dark preferences
|
||||
- Add export options for embedding reports in other documentation
|
||||
- Create dedicated CI/CD integration templates for automated testing
|
||||
|
||||
## [2025-04-09] Added MHTML Capture Feature
|
||||
|
||||
**Feature:** MHTML snapshot capture of crawled pages
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_mhtml: bool = False` parameter to `CrawlerRunConfig` class
|
||||
2. Added `mhtml: Optional[str] = None` field to `CrawlResult` model
|
||||
3. Added `mhtml_data: Optional[str] = None` field to `AsyncCrawlResponse` class
|
||||
4. Implemented `capture_mhtml()` method in `AsyncPlaywrightCrawlerStrategy` class to capture MHTML via CDP
|
||||
5. Modified the crawler to capture MHTML when enabled and pass it to the result
|
||||
|
||||
**Implementation Details:**
|
||||
- MHTML capture uses Chrome DevTools Protocol (CDP) via Playwright's CDP session API
|
||||
- The implementation waits for page to fully load before capturing MHTML content
|
||||
- Enhanced waiting for JavaScript content with requestAnimationFrame for better JS content capture
|
||||
- We ensure all browser resources are properly cleaned up after capture
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added the mhtml field to CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added capture_mhtml parameter to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented MHTML capture logic
|
||||
- `crawl4ai/async_webcrawler.py`: Added mapping from AsyncCrawlResponse.mhtml_data to CrawlResult.mhtml
|
||||
|
||||
**Testing:**
|
||||
- Created comprehensive tests in `tests/20241401/test_mhtml.py` covering:
|
||||
- Capturing MHTML when enabled
|
||||
- Ensuring mhtml is None when disabled explicitly
|
||||
- Ensuring mhtml is None by default
|
||||
- Capturing MHTML on JavaScript-enabled pages
|
||||
|
||||
**Challenges:**
|
||||
- Had to improve page loading detection to ensure JavaScript content was fully rendered
|
||||
- Tests needed to be run independently due to Playwright browser instance management
|
||||
- Modified test expected content to match actual MHTML output
|
||||
|
||||
**Why This Feature:**
|
||||
The MHTML capture feature allows users to capture complete web pages including all resources (CSS, images, etc.) in a single file. This is valuable for:
|
||||
1. Offline viewing of captured pages
|
||||
2. Creating permanent snapshots of web content for archival
|
||||
3. Ensuring consistent content for later analysis, even if the original site changes
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add option to save MHTML to file
|
||||
- Support for filtering what resources get included in MHTML
|
||||
- Add support for specifying MHTML capture options
|
||||
|
||||
## [2025-04-10] Added Network Request and Console Message Capturing
|
||||
|
||||
**Feature:** Comprehensive capturing of network requests/responses and browser console messages during crawling
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_network_requests: bool = False` and `capture_console_messages: bool = False` parameters to `CrawlerRunConfig` class
|
||||
2. Added `network_requests: Optional[List[Dict[str, Any]]] = None` and `console_messages: Optional[List[Dict[str, Any]]] = None` fields to both `AsyncCrawlResponse` and `CrawlResult` models
|
||||
3. Implemented event listeners in `AsyncPlaywrightCrawlerStrategy._crawl_web()` to capture browser network events and console messages
|
||||
4. Added proper event listener cleanup in the finally block to prevent resource leaks
|
||||
5. Modified the crawler flow to pass captured data from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Implementation Details:**
|
||||
- Network capture uses Playwright event listeners (`request`, `response`, and `requestfailed`) to record all network activity
|
||||
- Console capture uses Playwright event listeners (`console` and `pageerror`) to record console messages and errors
|
||||
- Each network event includes metadata like URL, headers, status, and timing information
|
||||
- Each console message includes type, text content, and source location when available
|
||||
- All captured events include timestamps for chronological analysis
|
||||
- Error handling ensures even failed capture attempts won't crash the main crawling process
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added new fields to AsyncCrawlResponse and CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added new configuration parameters to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented capture logic using event listeners
|
||||
- `crawl4ai/async_webcrawler.py`: Added data transfer from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Documentation:**
|
||||
- Created detailed documentation in `docs/md_v2/advanced/network-console-capture.md`
|
||||
- Added feature to site navigation in `mkdocs.yml`
|
||||
- Updated CrawlResult documentation in `docs/md_v2/api/crawl-result.md`
|
||||
- Created comprehensive example in `docs/examples/network_console_capture_example.py`
|
||||
|
||||
**Testing:**
|
||||
- Created `tests/general/test_network_console_capture.py` with tests for:
|
||||
- Verifying capture is disabled by default
|
||||
- Testing network request capturing
|
||||
- Testing console message capturing
|
||||
- Ensuring both capture types can be enabled simultaneously
|
||||
- Checking correct content is captured in expected formats
|
||||
|
||||
**Challenges:**
|
||||
- Initial implementation had synchronous/asynchronous mismatches in event handlers
|
||||
- Needed to fix type of property access vs. method calls in handlers
|
||||
- Required careful cleanup of event listeners to prevent memory leaks
|
||||
|
||||
**Why This Feature:**
|
||||
The network and console capture feature provides deep visibility into web page activity, enabling:
|
||||
1. Debugging complex web applications by seeing all network requests and errors
|
||||
2. Security analysis to detect unexpected third-party requests and data flows
|
||||
3. Performance profiling to identify slow-loading resources
|
||||
4. API discovery in single-page applications
|
||||
5. Comprehensive analysis of web application behavior
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Option to filter captured events by type, domain, or content
|
||||
- Support for capturing response bodies (with size limits)
|
||||
- Aggregate statistics calculation for performance metrics
|
||||
- Integration with visualization tools for network waterfall analysis
|
||||
- Exporting captures in HAR format for use with external tools
|
||||
20
LICENSE
20
LICENSE
@@ -48,4 +48,22 @@ You may add Your own copyright statement to Your modifications and may provide a
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
---
|
||||
Attribution Requirement
|
||||
|
||||
All distributions, publications, or public uses of this software, or derivative works based on this software, must include the following attribution:
|
||||
|
||||
"This product includes software developed by UncleCode (https://x.com/unclecode) as part of the Crawl4AI project (https://github.com/unclecode/crawl4ai)."
|
||||
|
||||
This attribution must be displayed in a prominent and easily accessible location, such as:
|
||||
|
||||
- For software distributions: In a NOTICE file, README file, or equivalent documentation.
|
||||
- For publications (research papers, articles, blog posts): In the acknowledgments section or a footnote.
|
||||
- For websites/web applications: In an "About" or "Credits" section.
|
||||
- For command-line tools: In the help/usage output.
|
||||
|
||||
This requirement ensures proper credit is given for the use of Crawl4AI and helps promote the project.
|
||||
|
||||
---
|
||||
207
README.md
207
README.md
@@ -21,9 +21,9 @@
|
||||
|
||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
||||
|
||||
[✨ Check out latest update v0.4.3bx](#-recent-updates)
|
||||
[✨ Check out latest update v0.6.0rc1](#-recent-updates)
|
||||
|
||||
🎉 **Version 0.4.3bx is out!** This release brings exciting new features like a Memory Dispatcher System, Streaming Support, LLM-Powered Markdown Generation, Schema Generation, and Robots.txt Compliance! [Read the release notes →](https://docs.crawl4ai.com/blog)
|
||||
🎉 **Version 0.6.0rc1 is now available!** This release candidate introduces World-aware Crawling with geolocation and locale settings, Table-to-DataFrame extraction, Browser pooling with pre-warming, Network and console traffic capture, MCP integration for AI tools, and a completely revamped Docker deployment! [Read the release notes →](https://docs.crawl4ai.com/blog)
|
||||
|
||||
<details>
|
||||
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||
@@ -68,7 +68,7 @@ If you encounter any browser-related issues, you can install them manually:
|
||||
python -m playwright install --with-deps chromium
|
||||
```
|
||||
|
||||
2. Run a simple web crawl:
|
||||
2. Run a simple web crawl with Python:
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
@@ -84,6 +84,18 @@ if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
3. Or use the new command-line interface:
|
||||
```bash
|
||||
# Basic crawl with markdown output
|
||||
crwl https://www.nbcnews.com/business -o markdown
|
||||
|
||||
# Deep crawl with BFS strategy, max 10 pages
|
||||
crwl https://docs.crawl4ai.com --deep-crawl bfs --max-pages 10
|
||||
|
||||
# Use LLM extraction with a specific question
|
||||
crwl https://www.example.com/products -q "Extract all product prices"
|
||||
```
|
||||
|
||||
## ✨ Features
|
||||
|
||||
<details>
|
||||
@@ -112,6 +124,7 @@ if __name__ == "__main__":
|
||||
|
||||
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
|
||||
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
|
||||
- 👤 **Browser Profiler**: Create and manage persistent profiles with saved authentication states, cookies, and settings.
|
||||
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
|
||||
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
|
||||
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
|
||||
@@ -140,10 +153,11 @@ if __name__ == "__main__":
|
||||
<details>
|
||||
<summary>🚀 <strong>Deployment</strong></summary>
|
||||
|
||||
- 🐳 **Dockerized Setup**: Optimized Docker image with API server for easy deployment.
|
||||
- 🐳 **Dockerized Setup**: Optimized Docker image with FastAPI server for easy deployment.
|
||||
- 🔑 **Secure Authentication**: Built-in JWT token authentication for API security.
|
||||
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
|
||||
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
|
||||
- ⚙️ **DigitalOcean Deployment**: Ready-to-deploy configurations for DigitalOcean and similar platforms.
|
||||
- ☁️ **Cloud Deployment**: Ready-to-deploy configurations for major cloud platforms.
|
||||
|
||||
</details>
|
||||
|
||||
@@ -239,24 +253,29 @@ pip install -e ".[all]" # Install all optional features
|
||||
<details>
|
||||
<summary>🐳 <strong>Docker Deployment</strong></summary>
|
||||
|
||||
> 🚀 **Major Changes Coming!** We're developing a completely new Docker implementation that will make deployment even more efficient and seamless. The current Docker setup is being deprecated in favor of this new solution.
|
||||
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
|
||||
|
||||
### Current Docker Support
|
||||
### New Docker Features
|
||||
|
||||
The existing Docker implementation is being deprecated and will be replaced soon. If you still need to use Docker with the current version:
|
||||
The new Docker implementation includes:
|
||||
- **Browser pooling** with page pre-warming for faster response times
|
||||
- **Interactive playground** to test and generate request code
|
||||
- **MCP integration** for direct connection to AI tools like Claude Code
|
||||
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
|
||||
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
|
||||
- **Optimized resources** with improved memory management
|
||||
|
||||
- 📚 [Deprecated Docker Setup](./docs/deprecated/docker-deployment.md) - Instructions for the current Docker implementation
|
||||
- ⚠️ Note: This setup will be replaced in the next major release
|
||||
### Getting Started
|
||||
|
||||
### What's Coming Next?
|
||||
```bash
|
||||
# Pull and run the latest release candidate
|
||||
docker pull unclecode/crawl4ai:0.6.0rc1-r1
|
||||
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:0.6.0rc1-r1
|
||||
|
||||
Our new Docker implementation will bring:
|
||||
- Improved performance and resource efficiency
|
||||
- Streamlined deployment process
|
||||
- Better integration with Crawl4AI features
|
||||
- Enhanced scalability options
|
||||
# Visit the playground at http://localhost:11235/playground
|
||||
```
|
||||
|
||||
Stay connected with our [GitHub repository](https://github.com/unclecode/crawl4ai) for updates!
|
||||
For complete documentation, see our [Docker Deployment Guide](https://docs.crawl4ai.com/core/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
@@ -318,9 +337,8 @@ async def main():
|
||||
url="https://docs.micronaut.io/4.7.6/guide/",
|
||||
config=run_config
|
||||
)
|
||||
print(len(result.markdown))
|
||||
print(len(result.fit_markdown))
|
||||
print(len(result.markdown_v2.fit_markdown))
|
||||
print(len(result.markdown.raw_markdown))
|
||||
print(len(result.markdown.fit_markdown))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -407,7 +425,7 @@ if __name__ == "__main__":
|
||||
```python
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -423,7 +441,7 @@ async def main():
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
|
||||
# provider="ollama/qwen2", api_token="no-token",
|
||||
provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'),
|
||||
llm_config = LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||
schema=OpenAIModelFee.schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
@@ -487,21 +505,60 @@ async def test_news_crawl():
|
||||
|
||||
## ✨ Recent Updates
|
||||
|
||||
- **🚀 New Dispatcher System**: Scale to thousands of URLs with intelligent **memory monitoring**, **concurrency control**, and optional **rate limiting**. (See `MemoryAdaptiveDispatcher`, `SemaphoreDispatcher`, `RateLimiter`, `CrawlerMonitor`)
|
||||
- **⚡ Streaming Mode**: Process results **as they arrive** instead of waiting for an entire batch to complete. (Set `stream=True` in `CrawlerRunConfig`)
|
||||
- **🤖 Enhanced LLM Integration**:
|
||||
- **Automatic schema generation**: Create extraction rules from HTML using OpenAI or Ollama, no manual CSS/XPath needed.
|
||||
- **LLM-powered Markdown filtering**: Refine your markdown output with a new `LLMContentFilter` that understands content relevance.
|
||||
- **Ollama Support**: Use open-source or self-hosted models for private or cost-effective extraction.
|
||||
- **🏎️ Faster Scraping Option**: New `LXMLWebScrapingStrategy` offers **10-20x speedup** for large, complex pages (experimental).
|
||||
- **🤖 robots.txt Compliance**: Respect website rules with `check_robots_txt=True` and efficient local caching.
|
||||
- **🔄 Proxy Rotation**: Built-in support for dynamic proxy switching and IP verification, with support for authenticated proxies and session persistence.
|
||||
- **➡️ URL Redirection Tracking**: The `redirected_url` field now captures the final destination after any redirects.
|
||||
- **🪞 Improved Mirroring**: The `LXMLWebScrapingStrategy` now has much greater fidelity, allowing for almost pixel-perfect mirroring of websites.
|
||||
- **📈 Enhanced Monitoring**: Track memory, CPU, and individual crawler status with `CrawlerMonitor`.
|
||||
- **📝 Improved Documentation**: More examples, clearer explanations, and updated tutorials.
|
||||
### Version 0.6.0rc1 Release Highlights
|
||||
|
||||
Read the full details in our [0.4.3bx Release Notes](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
- **🌎 World-aware Crawling**: Set geolocation, language, and timezone for authentic locale-specific content:
|
||||
```python
|
||||
crawler_config = CrawlerRunConfig(
|
||||
geo_locale={"city": "Tokyo", "lang": "ja", "timezone": "Asia/Tokyo"}
|
||||
)
|
||||
```
|
||||
|
||||
- **📊 Table-to-DataFrame Extraction**: Extract HTML tables directly to CSV or pandas DataFrames:
|
||||
```python
|
||||
crawler_config = CrawlerRunConfig(extract_tables=True)
|
||||
# Access tables via result.tables or result.tables_as_dataframe
|
||||
```
|
||||
|
||||
- **🚀 Browser Pooling**: Pages launch hot with pre-warmed browser instances for lower latency and memory usage
|
||||
|
||||
- **🕸️ Network and Console Capture**: Full traffic logs and MHTML snapshots for debugging:
|
||||
```python
|
||||
crawler_config = CrawlerRunConfig(
|
||||
capture_network=True,
|
||||
capture_console=True,
|
||||
mhtml=True
|
||||
)
|
||||
```
|
||||
|
||||
- **🔌 MCP Integration**: Connect to AI tools like Claude Code through the Model Context Protocol
|
||||
```bash
|
||||
# Add Crawl4AI to Claude Code
|
||||
claude mcp add --transport sse c4ai-sse http://localhost:11235/mcp/sse
|
||||
```
|
||||
|
||||
- **🖥️ Interactive Playground**: Test configurations and generate API requests with the built-in web interface at `/playground`
|
||||
|
||||
- **🐳 Revamped Docker Deployment**: Streamlined multi-architecture Docker image with improved resource efficiency
|
||||
|
||||
- **📱 Multi-stage Build System**: Optimized Dockerfile with platform-specific performance enhancements
|
||||
|
||||
Read the full details in our [0.6.0rc1 Release Notes](https://docs.crawl4ai.com/blog/releases/0.6.0.html) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
|
||||
### Previous Version: 0.5.0 Major Release Highlights
|
||||
|
||||
- **🚀 Deep Crawling System**: Explore websites beyond initial URLs with BFS, DFS, and BestFirst strategies
|
||||
- **⚡ Memory-Adaptive Dispatcher**: Dynamically adjusts concurrency based on system memory
|
||||
- **🔄 Multiple Crawling Strategies**: Browser-based and lightweight HTTP-only crawlers
|
||||
- **💻 Command-Line Interface**: New `crwl` CLI provides convenient terminal access
|
||||
- **👤 Browser Profiler**: Create and manage persistent browser profiles
|
||||
- **🧠 Crawl4AI Coding Assistant**: AI-powered coding assistant
|
||||
- **🏎️ LXML Scraping Mode**: Fast HTML parsing using the `lxml` library
|
||||
- **🌐 Proxy Rotation**: Built-in support for proxy switching
|
||||
- **🤖 LLM Content Filter**: Intelligent markdown generation using LLMs
|
||||
- **📄 PDF Processing**: Extract text, images, and metadata from PDF files
|
||||
|
||||
Read the full details in our [0.5.0 Release Notes](https://docs.crawl4ai.com/blog/releases/0.5.0.html).
|
||||
|
||||
## Version Numbering in Crawl4AI
|
||||
|
||||
@@ -574,9 +631,83 @@ To check our development plans and upcoming features, visit our [Roadmap](https:
|
||||
|
||||
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTORS.md) for more information.
|
||||
|
||||
## 📄 License
|
||||
I'll help modify the license section with badges. For the halftone effect, here's a version with it:
|
||||
|
||||
Crawl4AI is released under the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE).
|
||||
Here's the updated license section:
|
||||
|
||||
## 📄 License & Attribution
|
||||
|
||||
This project is licensed under the Apache License 2.0 with a required attribution clause. See the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) file for details.
|
||||
|
||||
### Attribution Requirements
|
||||
When using Crawl4AI, you must include one of the following attribution methods:
|
||||
|
||||
#### 1. Badge Attribution (Recommended)
|
||||
Add one of these badges to your README, documentation, or website:
|
||||
|
||||
| Theme | Badge |
|
||||
|-------|-------|
|
||||
| **Disco Theme (Animated)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Night Theme (Dark with Neon)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Dark Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Light Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
|
||||
|
||||
HTML code for adding the badges:
|
||||
```html
|
||||
<!-- Disco Theme (Animated) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Night Theme (Dark with Neon) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Dark Theme (Classic) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Light Theme (Classic) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Simple Shield Badge -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Powered%20by-Crawl4AI-blue?style=flat-square" alt="Powered by Crawl4AI"/>
|
||||
</a>
|
||||
```
|
||||
|
||||
#### 2. Text Attribution
|
||||
Add this line to your documentation:
|
||||
```
|
||||
This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
|
||||
```
|
||||
|
||||
## 📚 Citation
|
||||
|
||||
If you use Crawl4AI in your research or project, please cite:
|
||||
|
||||
```bibtex
|
||||
@software{crawl4ai2024,
|
||||
author = {UncleCode},
|
||||
title = {Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper},
|
||||
year = {2024},
|
||||
publisher = {GitHub},
|
||||
journal = {GitHub Repository},
|
||||
howpublished = {\url{https://github.com/unclecode/crawl4ai}},
|
||||
commit = {Please use the commit hash you're working with}
|
||||
}
|
||||
```
|
||||
|
||||
Text citation format:
|
||||
```
|
||||
UncleCode. (2024). Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper [Computer software].
|
||||
GitHub. https://github.com/unclecode/crawl4ai
|
||||
```
|
||||
|
||||
## 📧 Contact
|
||||
|
||||
|
||||
24
cliff.toml
Normal file
24
cliff.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[changelog]
|
||||
# Template format
|
||||
header = """
|
||||
# Changelog\n
|
||||
All notable changes to this project will be documented in this file.\n
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n
|
||||
"""
|
||||
|
||||
# Organize commits by type
|
||||
[git]
|
||||
conventional_commits = true
|
||||
filter_unconventional = true
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Added"},
|
||||
{ message = "^fix", group = "Fixed"},
|
||||
{ message = "^doc", group = "Documentation"},
|
||||
{ message = "^perf", group = "Performance"},
|
||||
{ message = "^refactor", group = "Changed"},
|
||||
{ message = "^style", group = "Changed"},
|
||||
{ message = "^test", group = "Testing"},
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||
]
|
||||
@@ -1,46 +1,110 @@
|
||||
# __init__.py
|
||||
import warnings
|
||||
|
||||
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig
|
||||
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy,
|
||||
WebScrapingStrategy,
|
||||
LXMLWebScrapingStrategy,
|
||||
)
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase,
|
||||
AsyncLogger,
|
||||
)
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy,
|
||||
RoundRobinProxyStrategy,
|
||||
)
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
CosineStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy
|
||||
JsonXPathExtractionStrategy,
|
||||
JsonLxmlExtractionStrategy
|
||||
)
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from .content_filter_strategy import PruningContentFilter, BM25ContentFilter, LLMContentFilter, RelevantContentFilter
|
||||
from .models import CrawlResult, MarkdownGenerationResult
|
||||
from .content_filter_strategy import (
|
||||
PruningContentFilter,
|
||||
BM25ContentFilter,
|
||||
LLMContentFilter,
|
||||
RelevantContentFilter,
|
||||
)
|
||||
from .models import CrawlResult, MarkdownGenerationResult, DisplayMode
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
from .async_dispatcher import (
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
RateLimiter,
|
||||
CrawlerMonitor,
|
||||
DisplayMode,
|
||||
BaseDispatcher
|
||||
BaseDispatcher,
|
||||
)
|
||||
from .docker_client import Crawl4aiDockerClient
|
||||
from .hub import CrawlerHub
|
||||
from .browser_profiler import BrowserProfiler
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy,
|
||||
BFSDeepCrawlStrategy,
|
||||
FilterChain,
|
||||
URLPatternFilter,
|
||||
DomainFilter,
|
||||
ContentTypeFilter,
|
||||
URLFilter,
|
||||
FilterStats,
|
||||
SEOFilter,
|
||||
KeywordRelevanceScorer,
|
||||
URLScorer,
|
||||
CompositeScorer,
|
||||
DomainAuthorityScorer,
|
||||
FreshnessScorer,
|
||||
PathDepthScorer,
|
||||
BestFirstCrawlingStrategy,
|
||||
DFSDeepCrawlStrategy,
|
||||
DeepCrawlDecorator,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AsyncLoggerBase",
|
||||
"AsyncLogger",
|
||||
"AsyncWebCrawler",
|
||||
"BrowserProfiler",
|
||||
"LLMConfig",
|
||||
"GeolocationConfig",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
"DFSDeepCrawlStrategy",
|
||||
"FilterChain",
|
||||
"URLPatternFilter",
|
||||
"ContentTypeFilter",
|
||||
"DomainFilter",
|
||||
"FilterStats",
|
||||
"URLFilter",
|
||||
"SEOFilter",
|
||||
"KeywordRelevanceScorer",
|
||||
"URLScorer",
|
||||
"CompositeScorer",
|
||||
"DomainAuthorityScorer",
|
||||
"FreshnessScorer",
|
||||
"PathDepthScorer",
|
||||
"DeepCrawlDecorator",
|
||||
"CrawlResult",
|
||||
"CrawlerHub",
|
||||
"CacheMode",
|
||||
"ContentScrapingStrategy",
|
||||
"WebScrapingStrategy",
|
||||
"LXMLWebScrapingStrategy",
|
||||
"BrowserConfig",
|
||||
"CrawlerRunConfig",
|
||||
"HTTPCrawlerConfig",
|
||||
"ExtractionStrategy",
|
||||
"LLMExtractionStrategy",
|
||||
"CosineStrategy",
|
||||
"JsonCssExtractionStrategy",
|
||||
"JsonXPathExtractionStrategy",
|
||||
"JsonLxmlExtractionStrategy",
|
||||
"ChunkingStrategy",
|
||||
"RegexChunking",
|
||||
"DefaultMarkdownGenerator",
|
||||
@@ -55,35 +119,36 @@ __all__ = [
|
||||
"CrawlerMonitor",
|
||||
"DisplayMode",
|
||||
"MarkdownGenerationResult",
|
||||
"Crawl4aiDockerClient",
|
||||
"ProxyRotationStrategy",
|
||||
"RoundRobinProxyStrategy",
|
||||
"ProxyConfig"
|
||||
]
|
||||
|
||||
|
||||
def is_sync_version_installed():
|
||||
try:
|
||||
import selenium
|
||||
# def is_sync_version_installed():
|
||||
# try:
|
||||
# import selenium # noqa
|
||||
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
# return True
|
||||
# except ImportError:
|
||||
# return False
|
||||
|
||||
|
||||
if is_sync_version_installed():
|
||||
try:
|
||||
from .web_crawler import WebCrawler
|
||||
# if is_sync_version_installed():
|
||||
# try:
|
||||
# from .web_crawler import WebCrawler
|
||||
|
||||
__all__.append("WebCrawler")
|
||||
except ImportError:
|
||||
print(
|
||||
"Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
|
||||
)
|
||||
else:
|
||||
WebCrawler = None
|
||||
# import warnings
|
||||
# print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||
|
||||
import warnings
|
||||
from pydantic import warnings as pydantic_warnings
|
||||
# __all__.append("WebCrawler")
|
||||
# except ImportError:
|
||||
# print(
|
||||
# "Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
|
||||
# )
|
||||
# else:
|
||||
# WebCrawler = None
|
||||
# # import warnings
|
||||
# # print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||
|
||||
# Disable all Pydantic warnings
|
||||
warnings.filterwarnings("ignore", module="pydantic")
|
||||
# pydantic_warnings.filter_warnings()
|
||||
# pydantic_warnings.filter_warnings()
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
# crawl4ai/_version.py
|
||||
__version__ = "0.4.3b3"
|
||||
__version__ = "0.6.0rc1"
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -4,19 +4,14 @@ import aiosqlite
|
||||
import asyncio
|
||||
from typing import Optional, Dict
|
||||
from contextlib import asynccontextmanager
|
||||
import logging
|
||||
import json # Added for serialization/deserialization
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
from .models import CrawlResult, MarkdownGenerationResult
|
||||
import json
|
||||
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
|
||||
import aiofiles
|
||||
from .version_manager import VersionManager
|
||||
from .async_logger import AsyncLogger
|
||||
from .utils import get_error_context, create_box_message
|
||||
|
||||
# Set up logging
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
# logger = logging.getLogger(__name__)
|
||||
# logger.setLevel(logging.INFO)
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
from .utils import VersionManager
|
||||
from .utils import get_error_context, create_box_message
|
||||
|
||||
base_directory = DB_PATH = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
@@ -336,12 +331,17 @@ class AsyncDatabaseManager:
|
||||
except json.JSONDecodeError:
|
||||
# Very UGLY, never mention it to me please
|
||||
if field == "markdown" and isinstance(row_dict[field], str):
|
||||
row_dict[field] = row_dict[field]
|
||||
row_dict[field] = MarkdownGenerationResult(
|
||||
raw_markdown=row_dict[field] or "",
|
||||
markdown_with_citations="",
|
||||
references_markdown="",
|
||||
fit_markdown="",
|
||||
fit_html="",
|
||||
)
|
||||
else:
|
||||
row_dict[field] = {}
|
||||
|
||||
if isinstance(row_dict["markdown"], Dict):
|
||||
row_dict["markdown_v2"] = row_dict["markdown"]
|
||||
if row_dict["markdown"].get("raw_markdown"):
|
||||
row_dict["markdown"] = row_dict["markdown"]["raw_markdown"]
|
||||
|
||||
@@ -358,7 +358,7 @@ class AsyncDatabaseManager:
|
||||
# Remove any fields not in CrawlResult model
|
||||
valid_fields = CrawlResult.__annotations__.keys()
|
||||
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
|
||||
|
||||
filtered_dict["markdown"] = row_dict["markdown"]
|
||||
return CrawlResult(**filtered_dict)
|
||||
|
||||
try:
|
||||
@@ -384,14 +384,14 @@ class AsyncDatabaseManager:
|
||||
}
|
||||
|
||||
try:
|
||||
if isinstance(result.markdown, MarkdownGenerationResult):
|
||||
if isinstance(result.markdown, StringCompatibleMarkdown):
|
||||
content_map["markdown"] = (
|
||||
result.markdown.model_dump_json(),
|
||||
result.markdown,
|
||||
"markdown",
|
||||
)
|
||||
elif hasattr(result, "markdown_v2"):
|
||||
elif isinstance(result.markdown, MarkdownGenerationResult):
|
||||
content_map["markdown"] = (
|
||||
result.markdown_v2.model_dump_json(),
|
||||
result.markdown.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
elif isinstance(result.markdown, str):
|
||||
|
||||
@@ -4,17 +4,15 @@ from .models import (
|
||||
CrawlResult,
|
||||
CrawlerTaskResult,
|
||||
CrawlStatus,
|
||||
DisplayMode,
|
||||
CrawlStats,
|
||||
DomainState,
|
||||
)
|
||||
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
from rich.console import Console
|
||||
from rich import box
|
||||
from datetime import datetime, timedelta
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
|
||||
from .types import AsyncWebCrawler
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
import time
|
||||
import psutil
|
||||
import asyncio
|
||||
@@ -25,7 +23,6 @@ import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
@@ -86,201 +83,6 @@ class RateLimiter:
|
||||
return True
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
def __init__(
|
||||
self,
|
||||
max_visible_rows: int = 15,
|
||||
display_mode: DisplayMode = DisplayMode.DETAILED,
|
||||
):
|
||||
self.console = Console()
|
||||
self.max_visible_rows = max_visible_rows
|
||||
self.display_mode = display_mode
|
||||
self.stats: Dict[str, CrawlStats] = {}
|
||||
self.process = psutil.Process()
|
||||
self.start_time = datetime.now()
|
||||
self.live = Live(self._create_table(), refresh_per_second=2)
|
||||
|
||||
def start(self):
|
||||
self.live.start()
|
||||
|
||||
def stop(self):
|
||||
self.live.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
self.stats[task_id] = CrawlStats(
|
||||
task_id=task_id, url=url, status=CrawlStatus.QUEUED
|
||||
)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def update_task(self, task_id: str, **kwargs):
|
||||
if task_id in self.stats:
|
||||
for key, value in kwargs.items():
|
||||
setattr(self.stats[task_id], key, value)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def _create_aggregated_table(self) -> Table:
|
||||
"""Creates a compact table showing only aggregated statistics"""
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Status Overview",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
show_lines=True,
|
||||
)
|
||||
|
||||
# Calculate statistics
|
||||
total_tasks = len(self.stats)
|
||||
queued = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
|
||||
)
|
||||
in_progress = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
# Memory statistics
|
||||
current_memory = self.process.memory_info().rss / (1024 * 1024)
|
||||
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
peak_memory = max(
|
||||
(stat.peak_memory for stat in self.stats.values()), default=0.0
|
||||
)
|
||||
|
||||
# Duration
|
||||
duration = datetime.now() - self.start_time
|
||||
|
||||
# Create status row
|
||||
table.add_column("Status", style="bold cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
|
||||
table.add_row("Total Tasks", str(total_tasks), "100%")
|
||||
table.add_row(
|
||||
"[yellow]In Queue[/yellow]",
|
||||
str(queued),
|
||||
f"{(queued/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[blue]In Progress[/blue]",
|
||||
str(in_progress),
|
||||
f"{(in_progress/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[green]Completed[/green]",
|
||||
str(completed),
|
||||
f"{(completed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[red]Failed[/red]",
|
||||
str(failed),
|
||||
f"{(failed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
|
||||
# Add memory information
|
||||
table.add_section()
|
||||
table.add_row(
|
||||
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[yellow]Runtime[/yellow]",
|
||||
str(timedelta(seconds=int(duration.total_seconds()))),
|
||||
"",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_detailed_table(self) -> Table:
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Performance Monitor",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
)
|
||||
|
||||
# Add columns
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True)
|
||||
table.add_column("URL", style="cyan", no_wrap=True)
|
||||
table.add_column("Status", style="bold")
|
||||
table.add_column("Memory (MB)", justify="right")
|
||||
table.add_column("Peak (MB)", justify="right")
|
||||
table.add_column("Duration", justify="right")
|
||||
table.add_column("Info", style="italic")
|
||||
|
||||
# Add summary row
|
||||
total_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
active_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"[bold yellow]SUMMARY",
|
||||
f"Total: {len(self.stats)}",
|
||||
f"Active: {active_count}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
|
||||
str(
|
||||
timedelta(
|
||||
seconds=int((datetime.now() - self.start_time).total_seconds())
|
||||
)
|
||||
),
|
||||
f"✓{completed_count} ✗{failed_count}",
|
||||
style="bold",
|
||||
)
|
||||
|
||||
table.add_section()
|
||||
|
||||
# Add rows for each task
|
||||
visible_stats = sorted(
|
||||
self.stats.values(),
|
||||
key=lambda x: (
|
||||
x.status != CrawlStatus.IN_PROGRESS,
|
||||
x.status != CrawlStatus.QUEUED,
|
||||
x.end_time or datetime.max,
|
||||
),
|
||||
)[: self.max_visible_rows]
|
||||
|
||||
for stat in visible_stats:
|
||||
status_style = {
|
||||
CrawlStatus.QUEUED: "white",
|
||||
CrawlStatus.IN_PROGRESS: "yellow",
|
||||
CrawlStatus.COMPLETED: "green",
|
||||
CrawlStatus.FAILED: "red",
|
||||
}[stat.status]
|
||||
|
||||
table.add_row(
|
||||
stat.task_id[:8], # Show first 8 chars of task ID
|
||||
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
|
||||
f"[{status_style}]{stat.status.value}[/{status_style}]",
|
||||
f"{stat.memory_usage:.1f}",
|
||||
f"{stat.peak_memory:.1f}",
|
||||
stat.duration,
|
||||
stat.error_message[:40] if stat.error_message else "",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_table(self) -> Table:
|
||||
"""Creates the appropriate table based on display mode"""
|
||||
if self.display_mode == DisplayMode.AGGREGATED:
|
||||
return self._create_aggregated_table()
|
||||
return self._create_detailed_table()
|
||||
|
||||
|
||||
class BaseDispatcher(ABC):
|
||||
def __init__(
|
||||
@@ -308,7 +110,7 @@ class BaseDispatcher(ABC):
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
@@ -319,71 +121,144 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
memory_threshold_percent: float = 90.0,
|
||||
critical_threshold_percent: float = 95.0, # New critical threshold
|
||||
recovery_threshold_percent: float = 85.0, # New recovery threshold
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
|
||||
fairness_timeout: float = 600.0, # 10 minutes before prioritizing long-waiting URLs
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.memory_threshold_percent = memory_threshold_percent
|
||||
self.critical_threshold_percent = critical_threshold_percent
|
||||
self.recovery_threshold_percent = recovery_threshold_percent
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.memory_wait_timeout = memory_wait_timeout
|
||||
self.result_queue = asyncio.Queue() # Queue for storing results
|
||||
|
||||
self.fairness_timeout = fairness_timeout
|
||||
self.result_queue = asyncio.Queue()
|
||||
self.task_queue = asyncio.PriorityQueue() # Priority queue for better management
|
||||
self.memory_pressure_mode = False # Flag to indicate when we're in memory pressure mode
|
||||
self.current_memory_percent = 0.0 # Track current memory usage
|
||||
|
||||
async def _memory_monitor_task(self):
|
||||
"""Background task to continuously monitor memory usage and update state"""
|
||||
while True:
|
||||
self.current_memory_percent = psutil.virtual_memory().percent
|
||||
|
||||
# Enter memory pressure mode if we cross the threshold
|
||||
if not self.memory_pressure_mode and self.current_memory_percent >= self.memory_threshold_percent:
|
||||
self.memory_pressure_mode = True
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("PRESSURE")
|
||||
|
||||
# Exit memory pressure mode if we go below recovery threshold
|
||||
elif self.memory_pressure_mode and self.current_memory_percent <= self.recovery_threshold_percent:
|
||||
self.memory_pressure_mode = False
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("NORMAL")
|
||||
|
||||
# In critical mode, we might need to take more drastic action
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("CRITICAL")
|
||||
# We could implement additional memory-saving measures here
|
||||
|
||||
await asyncio.sleep(self.check_interval)
|
||||
|
||||
def _get_priority_score(self, wait_time: float, retry_count: int) -> float:
|
||||
"""Calculate priority score (lower is higher priority)
|
||||
- URLs waiting longer than fairness_timeout get higher priority
|
||||
- More retry attempts decreases priority
|
||||
"""
|
||||
if wait_time > self.fairness_timeout:
|
||||
# High priority for long-waiting URLs
|
||||
return -wait_time
|
||||
# Standard priority based on retries
|
||||
return retry_count
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
retry_count: int = 0,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
start_time = time.time()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
|
||||
# Get starting memory for accurate measurement
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
self.concurrent_sessions += 1
|
||||
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
# Check if we're in critical memory state
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
# Requeue this task with increased priority and retry count
|
||||
enqueue_time = time.time()
|
||||
priority = self._get_priority_score(enqueue_time - start_time, retry_count + 1)
|
||||
await self.task_queue.put((priority, (url, task_id, retry_count + 1, enqueue_time)))
|
||||
|
||||
# Update monitoring
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.QUEUED,
|
||||
error_message="Requeued due to critical memory pressure"
|
||||
)
|
||||
|
||||
# Return placeholder result with requeued status
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=CrawlResult(
|
||||
url=url, html="", metadata={"status": "requeued"},
|
||||
success=False, error_message="Requeued due to critical memory pressure"
|
||||
),
|
||||
memory_usage=0,
|
||||
peak_memory=0,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message="Requeued due to critical memory pressure",
|
||||
retry_count=retry_count + 1
|
||||
)
|
||||
|
||||
# Execute the crawl
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
|
||||
# Measure memory usage
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
|
||||
# Handle rate limiting
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
error_message=error_message,
|
||||
)
|
||||
await self.result_queue.put(result)
|
||||
return result
|
||||
|
||||
|
||||
# Update status based on result
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
@@ -391,9 +266,9 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
end_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
@@ -401,9 +276,10 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
self.concurrent_sessions -= 1
|
||||
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
@@ -413,117 +289,240 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
pending_tasks = []
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while task_queue or active_tasks:
|
||||
wait_start_time = time.time()
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
# Check if we've exceeded the timeout
|
||||
if time.time() - wait_start_time > self.memory_wait_timeout:
|
||||
raise MemoryError(
|
||||
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
|
||||
)
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
pending_tasks.extend(done)
|
||||
active_tasks = list(pending)
|
||||
|
||||
return await asyncio.gather(*pending_tasks)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
async def run_urls_stream(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler",
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
|
||||
results = []
|
||||
|
||||
try:
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while completed_count < total_urls:
|
||||
# Start new tasks if memory permits
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks and not task_queue:
|
||||
break
|
||||
|
||||
# Wait for any task to complete and yield results
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
|
||||
# Process until both queues are empty
|
||||
while not self.task_queue.empty() or active_tasks:
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
# Try to get a task with timeout to avoid blocking indefinitely
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No tasks in queue, that's fine
|
||||
pass
|
||||
|
||||
# Wait for completion even if queue is starved
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks,
|
||||
timeout=0.1,
|
||||
return_when=asyncio.FIRST_COMPLETED
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
# Process completed tasks
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
completed_count += 1
|
||||
yield result
|
||||
results.append(result)
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
async def _update_queue_priorities(self):
|
||||
"""Periodically update priorities of items in the queue to prevent starvation"""
|
||||
# Skip if queue is empty
|
||||
if self.task_queue.empty():
|
||||
return
|
||||
|
||||
# Use a drain-and-refill approach to update all priorities
|
||||
temp_items = []
|
||||
|
||||
# Drain the queue (with a safety timeout to prevent blocking)
|
||||
try:
|
||||
drain_start = time.time()
|
||||
while not self.task_queue.empty() and time.time() - drain_start < 5.0: # 5 second safety timeout
|
||||
try:
|
||||
# Get item from queue with timeout
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Calculate new priority based on current wait time
|
||||
current_time = time.time()
|
||||
wait_time = current_time - enqueue_time
|
||||
new_priority = self._get_priority_score(wait_time, retry_count)
|
||||
|
||||
# Store with updated priority
|
||||
temp_items.append((new_priority, (url, task_id, retry_count, enqueue_time)))
|
||||
|
||||
# Update monitoring stats for this task
|
||||
if self.monitor and task_id in self.monitor.stats:
|
||||
self.monitor.update_task(task_id, wait_time=wait_time)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Queue might be empty or very slow
|
||||
break
|
||||
except Exception as e:
|
||||
# If anything goes wrong, make sure we refill the queue with what we've got
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
# Calculate queue statistics
|
||||
if temp_items and self.monitor:
|
||||
total_queued = len(temp_items)
|
||||
wait_times = [item[1][3] for item in temp_items]
|
||||
highest_wait_time = time.time() - min(wait_times) if wait_times else 0
|
||||
avg_wait_time = sum(time.time() - t for t in wait_times) / len(wait_times) if wait_times else 0
|
||||
|
||||
# Update queue statistics in monitor
|
||||
self.monitor.update_queue_statistics(
|
||||
total_queued=total_queued,
|
||||
highest_wait_time=highest_wait_time,
|
||||
avg_wait_time=avg_wait_time
|
||||
)
|
||||
|
||||
# Sort by priority (lowest number = highest priority)
|
||||
temp_items.sort(key=lambda x: x[0])
|
||||
|
||||
# Refill the queue with updated priorities
|
||||
for item in temp_items:
|
||||
await self.task_queue.put(item)
|
||||
|
||||
async def run_urls_stream(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||
self.crawler = crawler
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
while completed_count < total_urls:
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
# Try to get a task with timeout
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No tasks in queue, that's fine
|
||||
pass
|
||||
|
||||
# Process completed tasks and yield results
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
|
||||
# Only count as completed if it wasn't requeued
|
||||
if "requeued" not in result.error_message:
|
||||
completed_count += 1
|
||||
yield result
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
|
||||
class SemaphoreDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
@@ -544,7 +543,7 @@ class SemaphoreDispatcher(BaseDispatcher):
|
||||
task_id: str,
|
||||
semaphore: asyncio.Semaphore = None,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
start_time = time.time()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
@@ -577,7 +576,7 @@ class SemaphoreDispatcher(BaseDispatcher):
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
end_time=time.time(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
@@ -597,7 +596,7 @@ class SemaphoreDispatcher(BaseDispatcher):
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
end_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
@@ -620,7 +619,7 @@ class SemaphoreDispatcher(BaseDispatcher):
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
urls: List[str],
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
@@ -644,4 +643,4 @@ class SemaphoreDispatcher(BaseDispatcher):
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
self.monitor.stop()
|
||||
@@ -1,588 +0,0 @@
|
||||
from typing import Dict, Optional, List, Tuple
|
||||
from .async_configs import CrawlerRunConfig
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
CrawlerTaskResult,
|
||||
CrawlStatus,
|
||||
DisplayMode,
|
||||
CrawlStats,
|
||||
DomainState,
|
||||
)
|
||||
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
from rich.console import Console
|
||||
from rich import box
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import time
|
||||
import psutil
|
||||
import asyncio
|
||||
import uuid
|
||||
|
||||
from urllib.parse import urlparse
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
base_delay: Tuple[float, float] = (1.0, 3.0),
|
||||
max_delay: float = 60.0,
|
||||
max_retries: int = 3,
|
||||
rate_limit_codes: List[int] = None,
|
||||
):
|
||||
self.base_delay = base_delay
|
||||
self.max_delay = max_delay
|
||||
self.max_retries = max_retries
|
||||
self.rate_limit_codes = rate_limit_codes or [429, 503]
|
||||
self.domains: Dict[str, DomainState] = {}
|
||||
|
||||
def get_domain(self, url: str) -> str:
|
||||
return urlparse(url).netloc
|
||||
|
||||
async def wait_if_needed(self, url: str) -> None:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains.get(domain)
|
||||
|
||||
if not state:
|
||||
self.domains[domain] = DomainState()
|
||||
state = self.domains[domain]
|
||||
|
||||
now = time.time()
|
||||
if state.last_request_time:
|
||||
wait_time = max(0, state.current_delay - (now - state.last_request_time))
|
||||
if wait_time > 0:
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Random delay within base range if no current delay
|
||||
if state.current_delay == 0:
|
||||
state.current_delay = random.uniform(*self.base_delay)
|
||||
|
||||
state.last_request_time = time.time()
|
||||
|
||||
def update_delay(self, url: str, status_code: int) -> bool:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains[domain]
|
||||
|
||||
if status_code in self.rate_limit_codes:
|
||||
state.fail_count += 1
|
||||
if state.fail_count > self.max_retries:
|
||||
return False
|
||||
|
||||
# Exponential backoff with random jitter
|
||||
state.current_delay = min(
|
||||
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
|
||||
)
|
||||
else:
|
||||
# Gradually reduce delay on success
|
||||
state.current_delay = max(
|
||||
random.uniform(*self.base_delay), state.current_delay * 0.75
|
||||
)
|
||||
state.fail_count = 0
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
def __init__(
|
||||
self,
|
||||
max_visible_rows: int = 15,
|
||||
display_mode: DisplayMode = DisplayMode.DETAILED,
|
||||
):
|
||||
self.console = Console()
|
||||
self.max_visible_rows = max_visible_rows
|
||||
self.display_mode = display_mode
|
||||
self.stats: Dict[str, CrawlStats] = {}
|
||||
self.process = psutil.Process()
|
||||
self.start_time = datetime.now()
|
||||
self.live = Live(self._create_table(), refresh_per_second=2)
|
||||
|
||||
def start(self):
|
||||
self.live.start()
|
||||
|
||||
def stop(self):
|
||||
self.live.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
self.stats[task_id] = CrawlStats(
|
||||
task_id=task_id, url=url, status=CrawlStatus.QUEUED
|
||||
)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def update_task(self, task_id: str, **kwargs):
|
||||
if task_id in self.stats:
|
||||
for key, value in kwargs.items():
|
||||
setattr(self.stats[task_id], key, value)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def _create_aggregated_table(self) -> Table:
|
||||
"""Creates a compact table showing only aggregated statistics"""
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Status Overview",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
show_lines=True,
|
||||
)
|
||||
|
||||
# Calculate statistics
|
||||
total_tasks = len(self.stats)
|
||||
queued = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
|
||||
)
|
||||
in_progress = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
# Memory statistics
|
||||
current_memory = self.process.memory_info().rss / (1024 * 1024)
|
||||
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
peak_memory = max(
|
||||
(stat.peak_memory for stat in self.stats.values()), default=0.0
|
||||
)
|
||||
|
||||
# Duration
|
||||
duration = datetime.now() - self.start_time
|
||||
|
||||
# Create status row
|
||||
table.add_column("Status", style="bold cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
|
||||
table.add_row("Total Tasks", str(total_tasks), "100%")
|
||||
table.add_row(
|
||||
"[yellow]In Queue[/yellow]",
|
||||
str(queued),
|
||||
f"{(queued/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[blue]In Progress[/blue]",
|
||||
str(in_progress),
|
||||
f"{(in_progress/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[green]Completed[/green]",
|
||||
str(completed),
|
||||
f"{(completed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[red]Failed[/red]",
|
||||
str(failed),
|
||||
f"{(failed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
|
||||
# Add memory information
|
||||
table.add_section()
|
||||
table.add_row(
|
||||
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[yellow]Runtime[/yellow]",
|
||||
str(timedelta(seconds=int(duration.total_seconds()))),
|
||||
"",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_detailed_table(self) -> Table:
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Performance Monitor",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
)
|
||||
|
||||
# Add columns
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True)
|
||||
table.add_column("URL", style="cyan", no_wrap=True)
|
||||
table.add_column("Status", style="bold")
|
||||
table.add_column("Memory (MB)", justify="right")
|
||||
table.add_column("Peak (MB)", justify="right")
|
||||
table.add_column("Duration", justify="right")
|
||||
table.add_column("Info", style="italic")
|
||||
|
||||
# Add summary row
|
||||
total_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
active_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"[bold yellow]SUMMARY",
|
||||
f"Total: {len(self.stats)}",
|
||||
f"Active: {active_count}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
|
||||
str(
|
||||
timedelta(
|
||||
seconds=int((datetime.now() - self.start_time).total_seconds())
|
||||
)
|
||||
),
|
||||
f"✓{completed_count} ✗{failed_count}",
|
||||
style="bold",
|
||||
)
|
||||
|
||||
table.add_section()
|
||||
|
||||
# Add rows for each task
|
||||
visible_stats = sorted(
|
||||
self.stats.values(),
|
||||
key=lambda x: (
|
||||
x.status != CrawlStatus.IN_PROGRESS,
|
||||
x.status != CrawlStatus.QUEUED,
|
||||
x.end_time or datetime.max,
|
||||
),
|
||||
)[: self.max_visible_rows]
|
||||
|
||||
for stat in visible_stats:
|
||||
status_style = {
|
||||
CrawlStatus.QUEUED: "white",
|
||||
CrawlStatus.IN_PROGRESS: "yellow",
|
||||
CrawlStatus.COMPLETED: "green",
|
||||
CrawlStatus.FAILED: "red",
|
||||
}[stat.status]
|
||||
|
||||
table.add_row(
|
||||
stat.task_id[:8], # Show first 8 chars of task ID
|
||||
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
|
||||
f"[{status_style}]{stat.status.value}[/{status_style}]",
|
||||
f"{stat.memory_usage:.1f}",
|
||||
f"{stat.peak_memory:.1f}",
|
||||
stat.duration,
|
||||
stat.error_message[:40] if stat.error_message else "",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_table(self) -> Table:
|
||||
"""Creates the appropriate table based on display mode"""
|
||||
if self.display_mode == DisplayMode.AGGREGATED:
|
||||
return self._create_aggregated_table()
|
||||
return self._create_detailed_table()
|
||||
|
||||
|
||||
class BaseDispatcher(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
self.crawler = None
|
||||
self._domain_last_hit: Dict[str, float] = {}
|
||||
self.concurrent_sessions = 0
|
||||
self.rate_limiter = rate_limiter
|
||||
self.monitor = monitor
|
||||
|
||||
@abstractmethod
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> CrawlerTaskResult:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
pass
|
||||
|
||||
|
||||
class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
memory_threshold_percent: float = 90.0,
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.memory_threshold_percent = memory_threshold_percent
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.memory_wait_timeout = memory_wait_timeout
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
self.concurrent_sessions += 1
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
self.concurrent_sessions -= 1
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
pending_tasks = []
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while task_queue or active_tasks:
|
||||
wait_start_time = time.time()
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
# Check if we've exceeded the timeout
|
||||
if time.time() - wait_start_time > self.memory_wait_timeout:
|
||||
raise MemoryError(
|
||||
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
|
||||
)
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
pending_tasks.extend(done)
|
||||
active_tasks = list(pending)
|
||||
|
||||
return await asyncio.gather(*pending_tasks)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
|
||||
class SemaphoreDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
semaphore_count: int = 5,
|
||||
max_session_permit: int = 20,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.semaphore_count = semaphore_count
|
||||
self.max_session_permit = max_session_permit
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
semaphore: asyncio.Semaphore = None,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
async with semaphore:
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
urls: List[str],
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
semaphore = asyncio.Semaphore(self.semaphore_count)
|
||||
tasks = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, semaphore)
|
||||
)
|
||||
tasks.append(task)
|
||||
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
@@ -1,19 +1,58 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional, Dict, Any
|
||||
from colorama import Fore, Style, init
|
||||
import os
|
||||
from datetime import datetime
|
||||
from urllib.parse import unquote
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
DEFAULT = 0
|
||||
DEBUG = 1
|
||||
INFO = 2
|
||||
SUCCESS = 3
|
||||
WARNING = 4
|
||||
ERROR = 5
|
||||
CRITICAL = 6
|
||||
ALERT = 7
|
||||
NOTICE = 8
|
||||
EXCEPTION = 9
|
||||
FATAL = 10
|
||||
|
||||
|
||||
|
||||
class AsyncLogger:
|
||||
|
||||
class AsyncLoggerBase(ABC):
|
||||
@abstractmethod
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
pass
|
||||
|
||||
class AsyncLogger(AsyncLoggerBase):
|
||||
"""
|
||||
Asynchronous logger with support for colored console output and file logging.
|
||||
Supports templated messages with colored components.
|
||||
@@ -30,6 +69,13 @@ class AsyncLogger:
|
||||
"DEBUG": "⋯",
|
||||
"INFO": "ℹ",
|
||||
"WARNING": "⚠",
|
||||
"SUCCESS": "✔",
|
||||
"CRITICAL": "‼",
|
||||
"ALERT": "⚡",
|
||||
"NOTICE": "ℹ",
|
||||
"EXCEPTION": "❗",
|
||||
"FATAL": "☠",
|
||||
"DEFAULT": "•",
|
||||
}
|
||||
|
||||
DEFAULT_COLORS = {
|
||||
@@ -38,6 +84,12 @@ class AsyncLogger:
|
||||
LogLevel.SUCCESS: Fore.GREEN,
|
||||
LogLevel.WARNING: Fore.YELLOW,
|
||||
LogLevel.ERROR: Fore.RED,
|
||||
LogLevel.CRITICAL: Fore.RED + Style.BRIGHT,
|
||||
LogLevel.ALERT: Fore.RED + Style.BRIGHT,
|
||||
LogLevel.NOTICE: Fore.BLUE,
|
||||
LogLevel.EXCEPTION: Fore.RED + Style.BRIGHT,
|
||||
LogLevel.FATAL: Fore.RED + Style.BRIGHT,
|
||||
LogLevel.DEFAULT: Fore.WHITE,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
@@ -79,6 +131,14 @@ class AsyncLogger:
|
||||
def _get_icon(self, tag: str) -> str:
|
||||
"""Get the icon for a tag, defaulting to info icon if not found."""
|
||||
return self.icons.get(tag, self.icons["INFO"])
|
||||
|
||||
def _shorten(self, text, length, placeholder="..."):
|
||||
"""Truncate text in the middle if longer than length, or pad if shorter."""
|
||||
if len(text) <= length:
|
||||
return text.ljust(length) # Pad with spaces to reach desired length
|
||||
half = (length - len(placeholder)) // 2
|
||||
shortened = text[:half] + placeholder + text[-half:]
|
||||
return shortened.ljust(length) # Also pad shortened text to consistent length
|
||||
|
||||
def _write_to_file(self, message: str):
|
||||
"""Write a message to the log file if configured."""
|
||||
@@ -125,9 +185,22 @@ class AsyncLogger:
|
||||
formatted_message = message.format(**params)
|
||||
|
||||
# Then apply colors if specified
|
||||
color_map = {
|
||||
"green": Fore.GREEN,
|
||||
"red": Fore.RED,
|
||||
"yellow": Fore.YELLOW,
|
||||
"blue": Fore.BLUE,
|
||||
"cyan": Fore.CYAN,
|
||||
"magenta": Fore.MAGENTA,
|
||||
"white": Fore.WHITE,
|
||||
"black": Fore.BLACK,
|
||||
"reset": Style.RESET_ALL,
|
||||
}
|
||||
if colors:
|
||||
for key, color in colors.items():
|
||||
# Find the formatted value in the message and wrap it with color
|
||||
if color in color_map:
|
||||
color = color_map[color]
|
||||
if key in params:
|
||||
value_str = str(params[key])
|
||||
formatted_message = formatted_message.replace(
|
||||
@@ -168,6 +241,22 @@ class AsyncLogger:
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message."""
|
||||
self._log(LogLevel.WARNING, message, tag, **kwargs)
|
||||
|
||||
def critical(self, message: str, tag: str = "CRITICAL", **kwargs):
|
||||
"""Log a critical message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def exception(self, message: str, tag: str = "EXCEPTION", **kwargs):
|
||||
"""Log an exception message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def fatal(self, message: str, tag: str = "FATAL", **kwargs):
|
||||
"""Log a fatal message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def alert(self, message: str, tag: str = "ALERT", **kwargs):
|
||||
"""Log an alert message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def notice(self, message: str, tag: str = "NOTICE", **kwargs):
|
||||
"""Log a notice message."""
|
||||
self._log(LogLevel.INFO, message, tag, **kwargs)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message."""
|
||||
@@ -179,7 +268,7 @@ class AsyncLogger:
|
||||
success: bool,
|
||||
timing: float,
|
||||
tag: str = "FETCH",
|
||||
url_length: int = 50,
|
||||
url_length: int = 100,
|
||||
):
|
||||
"""
|
||||
Convenience method for logging URL fetch status.
|
||||
@@ -191,14 +280,15 @@ class AsyncLogger:
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Status: {status} | Time: {timing:.2f}s",
|
||||
message="{url} | {status} | ⏱: {timing:.2f}s",
|
||||
tag=tag,
|
||||
params={
|
||||
"url": url,
|
||||
"url_length": url_length,
|
||||
"status": success,
|
||||
"url": readable_url,
|
||||
"status": "✓" if success else "✗",
|
||||
"timing": timing,
|
||||
},
|
||||
colors={
|
||||
@@ -219,9 +309,63 @@ class AsyncLogger:
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Error: {error}",
|
||||
message="{url} | Error: {error}",
|
||||
tag=tag,
|
||||
params={"url": url, "url_length": url_length, "error": error},
|
||||
params={"url": readable_url, "error": error},
|
||||
)
|
||||
|
||||
class AsyncFileLogger(AsyncLoggerBase):
|
||||
"""
|
||||
File-only asynchronous logger that writes logs to a specified file.
|
||||
"""
|
||||
|
||||
def __init__(self, log_file: str):
|
||||
"""
|
||||
Initialize the file logger.
|
||||
|
||||
Args:
|
||||
log_file: File path for logging
|
||||
"""
|
||||
self.log_file = log_file
|
||||
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
|
||||
|
||||
def _write_to_file(self, level: str, message: str, tag: str):
|
||||
"""Write a message to the log file."""
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
f.write(f"[{timestamp}] [{level}] [{tag}] {message}\n")
|
||||
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
"""Log a debug message to file."""
|
||||
self._write_to_file("DEBUG", message, tag)
|
||||
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
"""Log an info message to file."""
|
||||
self._write_to_file("INFO", message, tag)
|
||||
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
"""Log a success message to file."""
|
||||
self._write_to_file("SUCCESS", message, tag)
|
||||
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message to file."""
|
||||
self._write_to_file("WARNING", message, tag)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message to file."""
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
"""Log URL fetch status to file."""
|
||||
status = "SUCCESS" if success else "FAILED"
|
||||
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
|
||||
self._write_to_file("URL_STATUS", message, tag)
|
||||
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
"""Log error status to file."""
|
||||
message = f"{url[:url_length]}... | Error: {error}"
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .__version__ import __version__ as crawl4ai_version
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
from colorama import Fore
|
||||
from pathlib import Path
|
||||
from typing import Optional, List
|
||||
@@ -10,30 +10,36 @@ import asyncio
|
||||
|
||||
# from contextlib import nullcontext, asynccontextmanager
|
||||
from contextlib import asynccontextmanager
|
||||
from .models import CrawlResult, MarkdownGenerationResult, CrawlerTaskResult, DispatchResult
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
MarkdownGenerationResult,
|
||||
DispatchResult,
|
||||
ScrapingResult,
|
||||
CrawlResultContainer,
|
||||
RunManyReturn
|
||||
)
|
||||
from .async_database import async_db_manager
|
||||
from .chunking_strategy import * # noqa: F403
|
||||
from .chunking_strategy import RegexChunking, ChunkingStrategy, IdentityChunking
|
||||
from .chunking_strategy import IdentityChunking
|
||||
from .content_filter_strategy import * # noqa: F403
|
||||
from .content_filter_strategy import RelevantContentFilter
|
||||
from .extraction_strategy import * # noqa: F403
|
||||
from .extraction_strategy import NoExtractionStrategy, ExtractionStrategy
|
||||
from .extraction_strategy import * # noqa: F403
|
||||
from .extraction_strategy import NoExtractionStrategy
|
||||
from .async_crawler_strategy import (
|
||||
AsyncCrawlerStrategy,
|
||||
AsyncPlaywrightCrawlerStrategy,
|
||||
AsyncCrawlResponse,
|
||||
)
|
||||
from .cache_context import CacheMode, CacheContext, _legacy_to_cache_mode
|
||||
from .cache_context import CacheMode, CacheContext
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator,
|
||||
MarkdownGenerationStrategy,
|
||||
)
|
||||
from .async_logger import AsyncLogger
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .async_dispatcher import * # noqa: F403
|
||||
from .deep_crawling import DeepCrawlDecorator
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, ProxyConfig
|
||||
from .async_dispatcher import * # noqa: F403
|
||||
from .async_dispatcher import BaseDispatcher, MemoryAdaptiveDispatcher, RateLimiter
|
||||
|
||||
from .config import MIN_WORD_THRESHOLD
|
||||
from .utils import (
|
||||
sanitize_input_encode,
|
||||
InvalidCSSSelectorError,
|
||||
@@ -41,16 +47,9 @@ from .utils import (
|
||||
create_box_message,
|
||||
get_error_context,
|
||||
RobotsParser,
|
||||
preprocess_html_for_schema,
|
||||
)
|
||||
|
||||
from typing import Union, AsyncGenerator, List, TypeVar
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
|
||||
RunManyReturn = Union[List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
|
||||
from .__version__ import __version__ as crawl4ai_version
|
||||
|
||||
|
||||
class AsyncWebCrawler:
|
||||
"""
|
||||
@@ -76,31 +75,21 @@ class AsyncWebCrawler:
|
||||
await crawler.close()
|
||||
```
|
||||
|
||||
Migration Guide:
|
||||
Old way (deprecated):
|
||||
crawler = AsyncWebCrawler(always_by_pass_cache=True, browser_type="chromium", headless=True)
|
||||
|
||||
New way (recommended):
|
||||
browser_config = BrowserConfig(browser_type="chromium", headless=True)
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
|
||||
|
||||
Attributes:
|
||||
browser_config (BrowserConfig): Configuration object for browser settings.
|
||||
crawler_strategy (AsyncCrawlerStrategy): Strategy for crawling web pages.
|
||||
logger (AsyncLogger): Logger instance for recording events and errors.
|
||||
always_bypass_cache (bool): Whether to always bypass cache.
|
||||
crawl4ai_folder (str): Directory for storing cache.
|
||||
base_directory (str): Base directory for storing cache.
|
||||
ready (bool): Whether the crawler is ready for use.
|
||||
|
||||
Methods:
|
||||
start(): Start the crawler explicitly without using context manager.
|
||||
close(): Close the crawler explicitly without using context manager.
|
||||
arun(): Run the crawler for a single source: URL (web, local file, or raw HTML).
|
||||
awarmup(): Perform warmup sequence.
|
||||
arun_many(): Run the crawler for multiple sources.
|
||||
aprocess_html(): Process HTML content.
|
||||
Methods:
|
||||
start(): Start the crawler explicitly without using context manager.
|
||||
close(): Close the crawler explicitly without using context manager.
|
||||
arun(): Run the crawler for a single source: URL (web, local file, or raw HTML).
|
||||
awarmup(): Perform warmup sequence.
|
||||
arun_many(): Run the crawler for multiple sources.
|
||||
aprocess_html(): Process HTML content.
|
||||
|
||||
Typical Usage:
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
@@ -121,81 +110,45 @@ class AsyncWebCrawler:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
crawler_strategy: Optional[AsyncCrawlerStrategy] = None,
|
||||
config: Optional[BrowserConfig] = None,
|
||||
always_bypass_cache: bool = False,
|
||||
always_by_pass_cache: Optional[bool] = None, # Deprecated parameter
|
||||
base_directory: str = str(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())),
|
||||
crawler_strategy: AsyncCrawlerStrategy = None,
|
||||
config: BrowserConfig = None,
|
||||
base_directory: str = str(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())),
|
||||
thread_safe: bool = False,
|
||||
logger: AsyncLoggerBase = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initialize the AsyncWebCrawler.
|
||||
|
||||
Args:
|
||||
crawler_strategy: Strategy for crawling web pages. If None, will create AsyncPlaywrightCrawlerStrategy
|
||||
config: Configuration object for browser settings. If None, will be created from kwargs
|
||||
always_bypass_cache: Whether to always bypass cache (new parameter)
|
||||
always_by_pass_cache: Deprecated, use always_bypass_cache instead
|
||||
crawler_strategy: Strategy for crawling web pages. Default AsyncPlaywrightCrawlerStrategy
|
||||
config: Configuration object for browser settings. Default BrowserConfig()
|
||||
base_directory: Base directory for storing cache
|
||||
thread_safe: Whether to use thread-safe operations
|
||||
**kwargs: Additional arguments for backwards compatibility
|
||||
"""
|
||||
# Handle browser configuration
|
||||
browser_config = config
|
||||
if browser_config is not None:
|
||||
if any(
|
||||
k in kwargs
|
||||
for k in [
|
||||
"browser_type",
|
||||
"headless",
|
||||
"viewport_width",
|
||||
"viewport_height",
|
||||
]
|
||||
):
|
||||
self.logger.warning(
|
||||
message="Both browser_config and legacy browser parameters provided. browser_config will take precedence.",
|
||||
tag="WARNING",
|
||||
)
|
||||
else:
|
||||
# Create browser config from kwargs for backwards compatibility
|
||||
browser_config = BrowserConfig.from_kwargs(kwargs)
|
||||
browser_config = config or BrowserConfig()
|
||||
|
||||
self.browser_config = browser_config
|
||||
|
||||
# Initialize logger first since other components may need it
|
||||
self.logger = AsyncLogger(
|
||||
self.logger = logger or AsyncLogger(
|
||||
log_file=os.path.join(base_directory, ".crawl4ai", "crawler.log"),
|
||||
verbose=self.browser_config.verbose,
|
||||
tag_width=10,
|
||||
)
|
||||
|
||||
# Initialize crawler strategy
|
||||
params = {k: v for k, v in kwargs.items() if k in ["browser_congig", "logger"]}
|
||||
params = {k: v for k, v in kwargs.items() if k in [
|
||||
"browser_config", "logger"]}
|
||||
self.crawler_strategy = crawler_strategy or AsyncPlaywrightCrawlerStrategy(
|
||||
browser_config=browser_config,
|
||||
logger=self.logger,
|
||||
**params, # Pass remaining kwargs for backwards compatibility
|
||||
)
|
||||
|
||||
# If craweler strategy doesnt have logger, use crawler logger
|
||||
if not self.crawler_strategy.logger:
|
||||
self.crawler_strategy.logger = self.logger
|
||||
|
||||
# Handle deprecated cache parameter
|
||||
if always_by_pass_cache is not None:
|
||||
if kwargs.get("warning", True):
|
||||
warnings.warn(
|
||||
"'always_by_pass_cache' is deprecated and will be removed in version 0.5.0. "
|
||||
"Use 'always_bypass_cache' instead. "
|
||||
"Pass warning=False to suppress this warning.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.always_bypass_cache = always_by_pass_cache
|
||||
else:
|
||||
self.always_bypass_cache = always_bypass_cache
|
||||
|
||||
# Thread safety setup
|
||||
self._lock = asyncio.Lock() if thread_safe else None
|
||||
|
||||
@@ -209,21 +162,20 @@ class AsyncWebCrawler:
|
||||
|
||||
self.ready = False
|
||||
|
||||
# Decorate arun method with deep crawling capabilities
|
||||
self._deep_handler = DeepCrawlDecorator(self)
|
||||
self.arun = self._deep_handler(self.arun)
|
||||
|
||||
async def start(self):
|
||||
"""
|
||||
Start the crawler explicitly without using context manager.
|
||||
This is equivalent to using 'async with' but gives more control over the lifecycle.
|
||||
|
||||
This method will:
|
||||
1. Initialize the browser and context
|
||||
2. Perform warmup sequence
|
||||
3. Return the crawler instance for method chaining
|
||||
|
||||
Returns:
|
||||
AsyncWebCrawler: The initialized crawler instance
|
||||
"""
|
||||
await self.crawler_strategy.__aenter__()
|
||||
await self.awarmup()
|
||||
self.logger.info(f"Crawl4AI {crawl4ai_version}", tag="INIT")
|
||||
self.ready = True
|
||||
return self
|
||||
|
||||
async def close(self):
|
||||
@@ -243,18 +195,6 @@ class AsyncWebCrawler:
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
async def awarmup(self):
|
||||
"""
|
||||
Initialize the crawler with warm-up sequence.
|
||||
|
||||
This method:
|
||||
1. Logs initialization info
|
||||
2. Sets up browser configuration
|
||||
3. Marks the crawler as ready
|
||||
"""
|
||||
self.logger.info(f"Crawl4AI {crawl4ai_version}", tag="INIT")
|
||||
self.ready = True
|
||||
|
||||
@asynccontextmanager
|
||||
async def nullcontext(self):
|
||||
"""异步空上下文管理器"""
|
||||
@@ -263,26 +203,9 @@ class AsyncWebCrawler:
|
||||
async def arun(
|
||||
self,
|
||||
url: str,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
# Legacy parameters maintained for backwards compatibility
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
content_filter: RelevantContentFilter = None,
|
||||
cache_mode: Optional[CacheMode] = None,
|
||||
# Deprecated cache parameters
|
||||
bypass_cache: bool = False,
|
||||
disable_cache: bool = False,
|
||||
no_cache_read: bool = False,
|
||||
no_cache_write: bool = False,
|
||||
# Other legacy parameters
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
pdf: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
config: CrawlerRunConfig = None,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Runs the crawler for a single source: URL (web, local file, or raw HTML).
|
||||
|
||||
@@ -311,70 +234,25 @@ class AsyncWebCrawler:
|
||||
Returns:
|
||||
CrawlResult: The result of crawling and processing
|
||||
"""
|
||||
crawler_config = config
|
||||
# Auto-start if not ready
|
||||
if not self.ready:
|
||||
await self.start()
|
||||
|
||||
config = config or CrawlerRunConfig()
|
||||
if not isinstance(url, str) or not url:
|
||||
raise ValueError("Invalid URL, make sure the URL is a non-empty string")
|
||||
raise ValueError(
|
||||
"Invalid URL, make sure the URL is a non-empty string")
|
||||
|
||||
async with self._lock or self.nullcontext():
|
||||
try:
|
||||
# Handle configuration
|
||||
if crawler_config is not None:
|
||||
# if any(param is not None for param in [
|
||||
# word_count_threshold, extraction_strategy, chunking_strategy,
|
||||
# content_filter, cache_mode, css_selector, screenshot, pdf
|
||||
# ]):
|
||||
# self.logger.warning(
|
||||
# message="Both crawler_config and legacy parameters provided. crawler_config will take precedence.",
|
||||
# tag="WARNING"
|
||||
# )
|
||||
config = crawler_config
|
||||
else:
|
||||
# Merge all parameters into a single kwargs dict for config creation
|
||||
config_kwargs = {
|
||||
"word_count_threshold": word_count_threshold,
|
||||
"extraction_strategy": extraction_strategy,
|
||||
"chunking_strategy": chunking_strategy,
|
||||
"content_filter": content_filter,
|
||||
"cache_mode": cache_mode,
|
||||
"bypass_cache": bypass_cache,
|
||||
"disable_cache": disable_cache,
|
||||
"no_cache_read": no_cache_read,
|
||||
"no_cache_write": no_cache_write,
|
||||
"css_selector": css_selector,
|
||||
"screenshot": screenshot,
|
||||
"pdf": pdf,
|
||||
"verbose": verbose,
|
||||
**kwargs,
|
||||
}
|
||||
config = CrawlerRunConfig.from_kwargs(config_kwargs)
|
||||
|
||||
# Handle deprecated cache parameters
|
||||
if any([bypass_cache, disable_cache, no_cache_read, no_cache_write]):
|
||||
if kwargs.get("warning", True):
|
||||
warnings.warn(
|
||||
"Cache control boolean flags are deprecated and will be removed in version 0.5.0. "
|
||||
"Use 'cache_mode' parameter instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Convert legacy parameters if cache_mode not provided
|
||||
if config.cache_mode is None:
|
||||
config.cache_mode = _legacy_to_cache_mode(
|
||||
disable_cache=disable_cache,
|
||||
bypass_cache=bypass_cache,
|
||||
no_cache_read=no_cache_read,
|
||||
no_cache_write=no_cache_write,
|
||||
)
|
||||
self.logger.verbose = config.verbose
|
||||
|
||||
# Default to ENABLED if no cache mode specified
|
||||
if config.cache_mode is None:
|
||||
config.cache_mode = CacheMode.ENABLED
|
||||
|
||||
# Create cache context
|
||||
cache_context = CacheContext(
|
||||
url, config.cache_mode, self.always_bypass_cache
|
||||
)
|
||||
cache_context = CacheContext(url, config.cache_mode, False)
|
||||
|
||||
# Initialize processing variables
|
||||
async_response: AsyncCrawlResponse = None
|
||||
@@ -401,7 +279,11 @@ class AsyncWebCrawler:
|
||||
# If screenshot is requested but its not in cache, then set cache_result to None
|
||||
screenshot_data = cached_result.screenshot
|
||||
pdf_data = cached_result.pdf
|
||||
if config.screenshot and not screenshot or config.pdf and not pdf:
|
||||
# if config.screenshot and not screenshot or config.pdf and not pdf:
|
||||
if config.screenshot and not screenshot_data:
|
||||
cached_result = None
|
||||
|
||||
if config.pdf and not pdf_data:
|
||||
cached_result = None
|
||||
|
||||
self.logger.url_status(
|
||||
@@ -411,26 +293,45 @@ class AsyncWebCrawler:
|
||||
tag="FETCH",
|
||||
)
|
||||
|
||||
# Update proxy configuration from rotation strategy if available
|
||||
if config and config.proxy_rotation_strategy:
|
||||
next_proxy: ProxyConfig = await config.proxy_rotation_strategy.get_next_proxy()
|
||||
if next_proxy:
|
||||
self.logger.info(
|
||||
message="Switch proxy: {proxy}",
|
||||
tag="PROXY",
|
||||
params={"proxy": next_proxy.server}
|
||||
)
|
||||
config.proxy_config = next_proxy
|
||||
# config = config.clone(proxy_config=next_proxy)
|
||||
|
||||
# Fetch fresh content if needed
|
||||
if not cached_result or not html:
|
||||
t1 = time.perf_counter()
|
||||
|
||||
if user_agent:
|
||||
self.crawler_strategy.update_user_agent(user_agent)
|
||||
if config.user_agent:
|
||||
self.crawler_strategy.update_user_agent(
|
||||
config.user_agent)
|
||||
|
||||
# Check robots.txt if enabled
|
||||
if config and config.check_robots_txt:
|
||||
if not await self.robots_parser.can_fetch(url, self.browser_config.user_agent):
|
||||
if not await self.robots_parser.can_fetch(
|
||||
url, self.browser_config.user_agent
|
||||
):
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html="",
|
||||
success=False,
|
||||
status_code=403,
|
||||
error_message="Access denied by robots.txt",
|
||||
response_headers={"X-Robots-Status": "Blocked by robots.txt"}
|
||||
response_headers={
|
||||
"X-Robots-Status": "Blocked by robots.txt"
|
||||
},
|
||||
)
|
||||
|
||||
# Pass config to crawl method
|
||||
##############################
|
||||
# Call CrawlerStrategy.crawl #
|
||||
##############################
|
||||
async_response = await self.crawler_strategy.crawl(
|
||||
url,
|
||||
config=config, # Pass the entire config object
|
||||
@@ -439,6 +340,7 @@ class AsyncWebCrawler:
|
||||
html = sanitize_input_encode(async_response.html)
|
||||
screenshot_data = async_response.screenshot
|
||||
pdf_data = async_response.pdf_data
|
||||
js_execution_result = async_response.js_execution_result
|
||||
|
||||
t2 = time.perf_counter()
|
||||
self.logger.url_status(
|
||||
@@ -448,16 +350,19 @@ class AsyncWebCrawler:
|
||||
tag="FETCH",
|
||||
)
|
||||
|
||||
# Process the HTML content
|
||||
crawl_result : CrawlResult = await self.aprocess_html(
|
||||
###############################################################
|
||||
# Process the HTML content, Call CrawlerStrategy.process_html #
|
||||
###############################################################
|
||||
crawl_result: CrawlResult = await self.aprocess_html(
|
||||
url=url,
|
||||
html=html,
|
||||
extracted_content=extracted_content,
|
||||
config=config, # Pass the config object instead of individual parameters
|
||||
screenshot=screenshot_data,
|
||||
screenshot_data=screenshot_data,
|
||||
pdf_data=pdf_data,
|
||||
verbose=config.verbose,
|
||||
is_raw_html=True if url.startswith("raw:") else False,
|
||||
redirected_url=async_response.redirected_url,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@@ -465,69 +370,42 @@ class AsyncWebCrawler:
|
||||
crawl_result.redirected_url = async_response.redirected_url or url
|
||||
crawl_result.response_headers = async_response.response_headers
|
||||
crawl_result.downloaded_files = async_response.downloaded_files
|
||||
crawl_result.ssl_certificate = (
|
||||
async_response.ssl_certificate
|
||||
) # Add SSL certificate
|
||||
|
||||
# # Check and set values from async_response to crawl_result
|
||||
# try:
|
||||
# for key in vars(async_response):
|
||||
# if hasattr(crawl_result, key):
|
||||
# value = getattr(async_response, key, None)
|
||||
# current_value = getattr(crawl_result, key, None)
|
||||
# if value is not None and not current_value:
|
||||
# try:
|
||||
# setattr(crawl_result, key, value)
|
||||
# except Exception as e:
|
||||
# self.logger.warning(
|
||||
# message=f"Failed to set attribute {key}: {str(e)}",
|
||||
# tag="WARNING"
|
||||
# )
|
||||
# except Exception as e:
|
||||
# self.logger.warning(
|
||||
# message=f"Error copying response attributes: {str(e)}",
|
||||
# tag="WARNING"
|
||||
# )
|
||||
crawl_result.js_execution_result = js_execution_result
|
||||
crawl_result.mhtml = async_response.mhtml_data
|
||||
crawl_result.ssl_certificate = async_response.ssl_certificate
|
||||
# Add captured network and console data if available
|
||||
crawl_result.network_requests = async_response.network_requests
|
||||
crawl_result.console_messages = async_response.console_messages
|
||||
|
||||
crawl_result.success = bool(html)
|
||||
crawl_result.session_id = getattr(config, "session_id", None)
|
||||
crawl_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
|
||||
self.logger.success(
|
||||
message="{url:.50}... | Status: {status} | Total: {timing}",
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=crawl_result.success,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE",
|
||||
params={
|
||||
"url": cache_context.display_url,
|
||||
"status": crawl_result.success,
|
||||
"timing": f"{time.perf_counter() - start_time:.2f}s",
|
||||
},
|
||||
colors={
|
||||
"status": Fore.GREEN if crawl_result.success else Fore.RED,
|
||||
"timing": Fore.YELLOW,
|
||||
},
|
||||
)
|
||||
|
||||
# Update cache if appropriate
|
||||
if cache_context.should_write() and not bool(cached_result):
|
||||
await async_db_manager.acache_url(crawl_result)
|
||||
|
||||
return crawl_result
|
||||
return CrawlResultContainer(crawl_result)
|
||||
|
||||
else:
|
||||
self.logger.success(
|
||||
message="{url:.50}... | Status: {status} | Total: {timing}",
|
||||
tag="COMPLETE",
|
||||
params={
|
||||
"url": cache_context.display_url,
|
||||
"status": True,
|
||||
"timing": f"{time.perf_counter() - start_time:.2f}s",
|
||||
},
|
||||
colors={"status": Fore.GREEN, "timing": Fore.YELLOW},
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=True,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE"
|
||||
)
|
||||
|
||||
cached_result.success = bool(html)
|
||||
cached_result.session_id = getattr(config, "session_id", None)
|
||||
cached_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
cached_result.redirected_url = cached_result.redirected_url or url
|
||||
return cached_result
|
||||
return CrawlResultContainer(cached_result)
|
||||
|
||||
except Exception as e:
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
@@ -538,8 +416,6 @@ class AsyncWebCrawler:
|
||||
f"Error: {str(e)}\n\n"
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
# if not hasattr(e, "msg"):
|
||||
# e.msg = str(e)
|
||||
|
||||
self.logger.error_status(
|
||||
url=url,
|
||||
@@ -547,8 +423,10 @@ class AsyncWebCrawler:
|
||||
tag="ERROR",
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url, html="", success=False, error_message=error_message
|
||||
return CrawlResultContainer(
|
||||
CrawlResult(
|
||||
url=url, html="", success=False, error_message=error_message
|
||||
)
|
||||
)
|
||||
|
||||
async def aprocess_html(
|
||||
@@ -557,7 +435,7 @@ class AsyncWebCrawler:
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
config: CrawlerRunConfig,
|
||||
screenshot: str,
|
||||
screenshot_data: str,
|
||||
pdf_data: str,
|
||||
verbose: bool,
|
||||
**kwargs,
|
||||
@@ -570,7 +448,7 @@ class AsyncWebCrawler:
|
||||
html: Raw HTML content
|
||||
extracted_content: Previously extracted content (if any)
|
||||
config: Configuration object controlling processing behavior
|
||||
screenshot: Screenshot data (if any)
|
||||
screenshot_data: Screenshot data (if any)
|
||||
pdf_data: PDF data (if any)
|
||||
verbose: Whether to enable verbose logging
|
||||
**kwargs: Additional parameters for backwards compatibility
|
||||
@@ -578,6 +456,7 @@ class AsyncWebCrawler:
|
||||
Returns:
|
||||
CrawlResult: Processed result containing extracted and formatted content
|
||||
"""
|
||||
cleaned_html = ""
|
||||
try:
|
||||
_url = url if not kwargs.get("is_raw_html", False) else "Raw HTML"
|
||||
t1 = time.perf_counter()
|
||||
@@ -588,11 +467,17 @@ class AsyncWebCrawler:
|
||||
scraping_strategy.logger = self.logger
|
||||
|
||||
# Process HTML content
|
||||
params = {k: v for k, v in config.to_dict().items() if k not in ["url"]}
|
||||
params = config.__dict__.copy()
|
||||
params.pop("url", None)
|
||||
# add keys from kwargs to params that doesn't exist in params
|
||||
params.update({k: v for k, v in kwargs.items() if k not in params.keys()})
|
||||
params.update({k: v for k, v in kwargs.items()
|
||||
if k not in params.keys()})
|
||||
|
||||
result = scraping_strategy.scrap(url, html, **params)
|
||||
################################
|
||||
# Scraping Strategy Execution #
|
||||
################################
|
||||
result: ScrapingResult = scraping_strategy.scrap(
|
||||
url, html, **params)
|
||||
|
||||
if result is None:
|
||||
raise ValueError(
|
||||
@@ -608,7 +493,8 @@ class AsyncWebCrawler:
|
||||
|
||||
# Extract results - handle both dict and ScrapingResult
|
||||
if isinstance(result, dict):
|
||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||
cleaned_html = sanitize_input_encode(
|
||||
result.get("cleaned_html", ""))
|
||||
media = result.get("media", {})
|
||||
links = result.get("links", {})
|
||||
metadata = result.get("metadata", {})
|
||||
@@ -618,40 +504,82 @@ class AsyncWebCrawler:
|
||||
links = result.links.model_dump()
|
||||
metadata = result.metadata
|
||||
|
||||
# Markdown Generation
|
||||
################################
|
||||
# Generate Markdown #
|
||||
################################
|
||||
markdown_generator: Optional[MarkdownGenerationStrategy] = (
|
||||
config.markdown_generator or DefaultMarkdownGenerator()
|
||||
)
|
||||
|
||||
# --- SELECT HTML SOURCE BASED ON CONTENT_SOURCE ---
|
||||
# Get the desired source from the generator config, default to 'cleaned_html'
|
||||
selected_html_source = getattr(markdown_generator, 'content_source', 'cleaned_html')
|
||||
|
||||
# Define the source selection logic using dict dispatch
|
||||
html_source_selector = {
|
||||
"raw_html": lambda: html, # The original raw HTML
|
||||
"cleaned_html": lambda: cleaned_html, # The HTML after scraping strategy
|
||||
"fit_html": lambda: preprocess_html_for_schema(html_content=html), # Preprocessed raw HTML
|
||||
}
|
||||
|
||||
markdown_input_html = cleaned_html # Default to cleaned_html
|
||||
|
||||
try:
|
||||
# Get the appropriate lambda function, default to returning cleaned_html if key not found
|
||||
source_lambda = html_source_selector.get(selected_html_source, lambda: cleaned_html)
|
||||
# Execute the lambda to get the selected HTML
|
||||
markdown_input_html = source_lambda()
|
||||
|
||||
# Log which source is being used (optional, but helpful for debugging)
|
||||
# if self.logger and verbose:
|
||||
# actual_source_used = selected_html_source if selected_html_source in html_source_selector else 'cleaned_html (default)'
|
||||
# self.logger.debug(f"Using '{actual_source_used}' as source for Markdown generation for {url}", tag="MARKDOWN_SRC")
|
||||
|
||||
except Exception as e:
|
||||
# Handle potential errors, especially from preprocess_html_for_schema
|
||||
if self.logger:
|
||||
self.logger.warning(
|
||||
f"Error getting/processing '{selected_html_source}' for markdown source: {e}. Falling back to cleaned_html.",
|
||||
tag="MARKDOWN_SRC"
|
||||
)
|
||||
# Ensure markdown_input_html is still the default cleaned_html in case of error
|
||||
markdown_input_html = cleaned_html
|
||||
# --- END: HTML SOURCE SELECTION ---
|
||||
|
||||
# Uncomment if by default we want to use PruningContentFilter
|
||||
# if not config.content_filter and not markdown_generator.content_filter:
|
||||
# markdown_generator.content_filter = PruningContentFilter()
|
||||
|
||||
markdown_result: MarkdownGenerationResult = (
|
||||
markdown_generator.generate_markdown(
|
||||
cleaned_html=cleaned_html,
|
||||
base_url=url,
|
||||
input_html=markdown_input_html,
|
||||
base_url=params.get("redirected_url", url)
|
||||
# html2text_options=kwargs.get('html2text', {})
|
||||
)
|
||||
)
|
||||
markdown_v2 = markdown_result
|
||||
markdown = sanitize_input_encode(markdown_result.raw_markdown)
|
||||
|
||||
# Log processing completion
|
||||
self.logger.info(
|
||||
message="Processed {url:.50}... | Time: {timing}ms",
|
||||
tag="SCRAPE",
|
||||
params={"url": _url, "timing": int((time.perf_counter() - t1) * 1000)},
|
||||
self.logger.url_status(
|
||||
url=_url,
|
||||
success=True,
|
||||
timing=int((time.perf_counter() - t1) * 1000) / 1000,
|
||||
tag="SCRAPE"
|
||||
)
|
||||
# self.logger.info(
|
||||
# message="{url:.50}... | Time: {timing}s",
|
||||
# tag="SCRAPE",
|
||||
# params={"url": _url, "timing": int((time.perf_counter() - t1) * 1000) / 1000},
|
||||
# )
|
||||
|
||||
# Handle content extraction if needed
|
||||
################################
|
||||
# Structured Content Extraction #
|
||||
################################
|
||||
if (
|
||||
not bool(extracted_content)
|
||||
and config.extraction_strategy
|
||||
and not isinstance(config.extraction_strategy, NoExtractionStrategy)
|
||||
):
|
||||
t1 = time.perf_counter()
|
||||
|
||||
# Choose content based on input_format
|
||||
content_format = config.extraction_strategy.input_format
|
||||
if content_format == "fit_markdown" and not markdown_result.fit_markdown:
|
||||
@@ -663,15 +591,16 @@ class AsyncWebCrawler:
|
||||
content_format = "markdown"
|
||||
|
||||
content = {
|
||||
"markdown": markdown,
|
||||
"markdown": markdown_result.raw_markdown,
|
||||
"html": html,
|
||||
"fit_markdown": markdown_result.raw_markdown,
|
||||
}.get(content_format, markdown)
|
||||
"cleaned_html": cleaned_html,
|
||||
"fit_markdown": markdown_result.fit_markdown,
|
||||
}.get(content_format, markdown_result.raw_markdown)
|
||||
|
||||
# Use IdentityChunking for HTML input, otherwise use provided chunking strategy
|
||||
chunking = (
|
||||
IdentityChunking()
|
||||
if content_format == "html"
|
||||
if content_format in ["html", "cleaned_html"]
|
||||
else config.chunking_strategy
|
||||
)
|
||||
sections = chunking.chunk(content)
|
||||
@@ -687,10 +616,6 @@ class AsyncWebCrawler:
|
||||
params={"url": _url, "timing": time.perf_counter() - t1},
|
||||
)
|
||||
|
||||
# Handle screenshot and PDF data
|
||||
screenshot_data = None if not screenshot else screenshot
|
||||
pdf_data = None if not pdf_data else pdf_data
|
||||
|
||||
# Apply HTML formatting if requested
|
||||
if config.prettiify:
|
||||
cleaned_html = fast_format_html(cleaned_html)
|
||||
@@ -700,10 +625,7 @@ class AsyncWebCrawler:
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=cleaned_html,
|
||||
markdown_v2=markdown_v2,
|
||||
markdown=markdown,
|
||||
fit_markdown=markdown_result.fit_markdown,
|
||||
fit_html=markdown_result.fit_html,
|
||||
markdown=markdown_result,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
@@ -717,22 +639,22 @@ class AsyncWebCrawler:
|
||||
async def arun_many(
|
||||
self,
|
||||
urls: List[str],
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
dispatcher: Optional[BaseDispatcher] = None,
|
||||
# Legacy parameters maintained for backwards compatibility
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
content_filter: RelevantContentFilter = None,
|
||||
cache_mode: Optional[CacheMode] = None,
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
pdf: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs
|
||||
) -> RunManyReturn:
|
||||
# word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
# extraction_strategy: ExtractionStrategy = None,
|
||||
# chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
# content_filter: RelevantContentFilter = None,
|
||||
# cache_mode: Optional[CacheMode] = None,
|
||||
# bypass_cache: bool = False,
|
||||
# css_selector: str = None,
|
||||
# screenshot: bool = False,
|
||||
# pdf: bool = False,
|
||||
# user_agent: str = None,
|
||||
# verbose=True,
|
||||
**kwargs,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Runs the crawler for multiple URLs concurrently using a configurable dispatcher strategy.
|
||||
|
||||
@@ -763,20 +685,21 @@ class AsyncWebCrawler:
|
||||
):
|
||||
print(f"Processed {result.url}: {len(result.markdown)} chars")
|
||||
"""
|
||||
if config is None:
|
||||
config = CrawlerRunConfig(
|
||||
word_count_threshold=word_count_threshold,
|
||||
extraction_strategy=extraction_strategy,
|
||||
chunking_strategy=chunking_strategy,
|
||||
content_filter=content_filter,
|
||||
cache_mode=cache_mode,
|
||||
bypass_cache=bypass_cache,
|
||||
css_selector=css_selector,
|
||||
screenshot=screenshot,
|
||||
pdf=pdf,
|
||||
verbose=verbose,
|
||||
**kwargs,
|
||||
)
|
||||
config = config or CrawlerRunConfig()
|
||||
# if config is None:
|
||||
# config = CrawlerRunConfig(
|
||||
# word_count_threshold=word_count_threshold,
|
||||
# extraction_strategy=extraction_strategy,
|
||||
# chunking_strategy=chunking_strategy,
|
||||
# content_filter=content_filter,
|
||||
# cache_mode=cache_mode,
|
||||
# bypass_cache=bypass_cache,
|
||||
# css_selector=css_selector,
|
||||
# screenshot=screenshot,
|
||||
# pdf=pdf,
|
||||
# verbose=verbose,
|
||||
# **kwargs,
|
||||
# )
|
||||
|
||||
if dispatcher is None:
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
@@ -785,38 +708,34 @@ class AsyncWebCrawler:
|
||||
),
|
||||
)
|
||||
|
||||
transform_result = lambda task_result: (
|
||||
setattr(task_result.result, 'dispatch_result',
|
||||
DispatchResult(
|
||||
task_id=task_result.task_id,
|
||||
memory_usage=task_result.memory_usage,
|
||||
peak_memory=task_result.peak_memory,
|
||||
start_time=task_result.start_time,
|
||||
end_time=task_result.end_time,
|
||||
error_message=task_result.error_message,
|
||||
def transform_result(task_result):
|
||||
return (
|
||||
setattr(
|
||||
task_result.result,
|
||||
"dispatch_result",
|
||||
DispatchResult(
|
||||
task_id=task_result.task_id,
|
||||
memory_usage=task_result.memory_usage,
|
||||
peak_memory=task_result.peak_memory,
|
||||
start_time=task_result.start_time,
|
||||
end_time=task_result.end_time,
|
||||
error_message=task_result.error_message,
|
||||
),
|
||||
)
|
||||
) or task_result.result
|
||||
)
|
||||
or task_result.result
|
||||
)
|
||||
|
||||
stream = config.stream
|
||||
|
||||
|
||||
if stream:
|
||||
|
||||
async def result_transformer():
|
||||
async for task_result in dispatcher.run_urls_stream(crawler=self, urls=urls, config=config):
|
||||
async for task_result in dispatcher.run_urls_stream(
|
||||
crawler=self, urls=urls, config=config
|
||||
):
|
||||
yield transform_result(task_result)
|
||||
|
||||
return result_transformer()
|
||||
else:
|
||||
_results = await dispatcher.run_urls(crawler=self, urls=urls, config=config)
|
||||
return [transform_result(res) for res in _results]
|
||||
|
||||
async def aclear_cache(self):
|
||||
"""Clear the cache database."""
|
||||
await async_db_manager.cleanup()
|
||||
|
||||
async def aflush_cache(self):
|
||||
"""Flush the cache database."""
|
||||
await async_db_manager.aflush_db()
|
||||
|
||||
async def aget_cache_size(self):
|
||||
"""Get the total number of cached items."""
|
||||
return await async_db_manager.aget_total_count()
|
||||
return [transform_result(res) for res in _results]
|
||||
|
||||
1008
crawl4ai/browser_manager.py
Normal file
1008
crawl4ai/browser_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
974
crawl4ai/browser_profiler.py
Normal file
974
crawl4ai/browser_profiler.py
Normal file
@@ -0,0 +1,974 @@
|
||||
"""
|
||||
Browser Profiler Module
|
||||
|
||||
This module provides a dedicated class for managing browser profiles
|
||||
that can be used for identity-based crawling with Crawl4AI.
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import signal
|
||||
import sys
|
||||
import datetime
|
||||
import uuid
|
||||
import shutil
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
from typing import List, Dict, Optional, Any, Tuple
|
||||
from colorama import Fore, Style, init
|
||||
|
||||
from .async_configs import BrowserConfig
|
||||
from .browser_manager import ManagedBrowser
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||
from .utils import get_home_folder
|
||||
|
||||
|
||||
class BrowserProfiler:
|
||||
"""
|
||||
A dedicated class for managing browser profiles for Crawl4AI.
|
||||
|
||||
The BrowserProfiler allows you to:
|
||||
- Create browser profiles interactively
|
||||
- List available profiles
|
||||
- Delete profiles when no longer needed
|
||||
- Get profile paths for use in BrowserConfig
|
||||
|
||||
Profiles are stored by default in ~/.crawl4ai/profiles/
|
||||
"""
|
||||
|
||||
def __init__(self, logger: Optional[AsyncLoggerBase] = None):
|
||||
"""
|
||||
Initialize the BrowserProfiler.
|
||||
|
||||
Args:
|
||||
logger (AsyncLoggerBase, optional): Logger for outputting messages.
|
||||
If None, a default AsyncLogger will be created.
|
||||
"""
|
||||
# Initialize colorama for colorful terminal output
|
||||
init()
|
||||
|
||||
# Create a logger if not provided
|
||||
if logger is None:
|
||||
self.logger = AsyncLogger(verbose=True)
|
||||
elif not isinstance(logger, AsyncLoggerBase):
|
||||
self.logger = AsyncLogger(verbose=True)
|
||||
else:
|
||||
self.logger = logger
|
||||
|
||||
# Ensure profiles directory exists
|
||||
self.profiles_dir = os.path.join(get_home_folder(), "profiles")
|
||||
os.makedirs(self.profiles_dir, exist_ok=True)
|
||||
|
||||
# Builtin browser config file
|
||||
self.builtin_browser_dir = os.path.join(get_home_folder(), "builtin-browser")
|
||||
self.builtin_config_file = os.path.join(self.builtin_browser_dir, "browser_config.json")
|
||||
os.makedirs(self.builtin_browser_dir, exist_ok=True)
|
||||
|
||||
async def create_profile(self,
|
||||
profile_name: Optional[str] = None,
|
||||
browser_config: Optional[BrowserConfig] = None) -> Optional[str]:
|
||||
"""
|
||||
Creates a browser profile by launching a browser for interactive user setup
|
||||
and waits until the user closes it. The profile is stored in a directory that
|
||||
can be used later with BrowserConfig.user_data_dir.
|
||||
|
||||
Args:
|
||||
profile_name (str, optional): Name for the profile directory.
|
||||
If None, a name is generated based on timestamp.
|
||||
browser_config (BrowserConfig, optional): Configuration for the browser.
|
||||
If None, a default configuration is used with headless=False.
|
||||
|
||||
Returns:
|
||||
str: Path to the created profile directory, or None if creation failed
|
||||
|
||||
Example:
|
||||
```python
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# Create a profile interactively
|
||||
profile_path = await profiler.create_profile(
|
||||
profile_name="my-login-profile"
|
||||
)
|
||||
|
||||
# Use the profile in a crawler
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
use_managed_browser=True,
|
||||
user_data_dir=profile_path
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
# The crawler will now use your profile with all your cookies and login state
|
||||
result = await crawler.arun("https://example.com/dashboard")
|
||||
```
|
||||
"""
|
||||
# Create default browser config if none provided
|
||||
if browser_config is None:
|
||||
from .async_configs import BrowserConfig
|
||||
browser_config = BrowserConfig(
|
||||
browser_type="chromium",
|
||||
headless=False, # Must be visible for user interaction
|
||||
verbose=True
|
||||
)
|
||||
else:
|
||||
# Ensure headless is False for user interaction
|
||||
browser_config.headless = False
|
||||
|
||||
# Generate profile name if not provided
|
||||
if not profile_name:
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
profile_name = f"profile_{timestamp}_{uuid.uuid4().hex[:6]}"
|
||||
|
||||
# Sanitize profile name (replace spaces and special chars)
|
||||
profile_name = "".join(c if c.isalnum() or c in "-_" else "_" for c in profile_name)
|
||||
|
||||
# Set user data directory
|
||||
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
|
||||
# Print instructions for the user with colorama formatting
|
||||
border = f"{Fore.CYAN}{'='*80}{Style.RESET_ALL}"
|
||||
self.logger.info(f"\n{border}", tag="PROFILE")
|
||||
self.logger.info(f"Creating browser profile: {Fore.GREEN}{profile_name}{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info(f"Profile directory: {Fore.YELLOW}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||
|
||||
self.logger.info("\nInstructions:", tag="PROFILE")
|
||||
self.logger.info("1. A browser window will open for you to set up your profile.", tag="PROFILE")
|
||||
self.logger.info(f"2. {Fore.CYAN}Log in to websites{Style.RESET_ALL}, configure settings, etc. as needed.", tag="PROFILE")
|
||||
self.logger.info(f"3. When you're done, {Fore.YELLOW}press 'q' in this terminal{Style.RESET_ALL} to close the browser.", tag="PROFILE")
|
||||
self.logger.info("4. The profile will be saved and ready to use with Crawl4AI.", tag="PROFILE")
|
||||
self.logger.info(f"{border}\n", tag="PROFILE")
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_config.browser_type,
|
||||
user_data_dir=profile_path,
|
||||
headless=False, # Must be visible
|
||||
logger=self.logger,
|
||||
debugging_port=browser_config.debugging_port
|
||||
)
|
||||
|
||||
# Set up signal handlers to ensure cleanup on interrupt
|
||||
original_sigint = signal.getsignal(signal.SIGINT)
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
|
||||
# Define cleanup handler for signals
|
||||
async def cleanup_handler(sig, frame):
|
||||
self.logger.warning("\nCleaning up browser process...", tag="PROFILE")
|
||||
await managed_browser.cleanup()
|
||||
# Restore original signal handlers
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
if sig == signal.SIGINT:
|
||||
self.logger.error("Profile creation interrupted. Profile may be incomplete.", tag="PROFILE")
|
||||
sys.exit(1)
|
||||
|
||||
# Set signal handlers
|
||||
def sigint_handler(sig, frame):
|
||||
asyncio.create_task(cleanup_handler(sig, frame))
|
||||
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
signal.signal(signal.SIGTERM, sigint_handler)
|
||||
|
||||
# Event to signal when user is done with the browser
|
||||
user_done_event = asyncio.Event()
|
||||
|
||||
# Run keyboard input loop in a separate task
|
||||
async def listen_for_quit_command():
|
||||
import termios
|
||||
import tty
|
||||
import select
|
||||
|
||||
# First output the prompt
|
||||
self.logger.info(f"{Fore.CYAN}Press '{Fore.WHITE}q{Fore.CYAN}' when you've finished using the browser...{Style.RESET_ALL}", tag="PROFILE")
|
||||
|
||||
# Save original terminal settings
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
|
||||
try:
|
||||
# Switch to non-canonical mode (no line buffering)
|
||||
tty.setcbreak(fd)
|
||||
|
||||
while True:
|
||||
# Check if input is available (non-blocking)
|
||||
readable, _, _ = select.select([sys.stdin], [], [], 0.5)
|
||||
if readable:
|
||||
key = sys.stdin.read(1)
|
||||
if key.lower() == 'q':
|
||||
self.logger.info(f"{Fore.GREEN}Closing browser and saving profile...{Style.RESET_ALL}", tag="PROFILE")
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
# Check if the browser process has already exited
|
||||
if managed_browser.browser_process and managed_browser.browser_process.poll() is not None:
|
||||
self.logger.info("Browser already closed. Ending input listener.", tag="PROFILE")
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
finally:
|
||||
# Restore terminal settings
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
try:
|
||||
# Start the browser
|
||||
await managed_browser.start()
|
||||
|
||||
# Check if browser started successfully
|
||||
browser_process = managed_browser.browser_process
|
||||
if not browser_process:
|
||||
self.logger.error("Failed to start browser process.", tag="PROFILE")
|
||||
return None
|
||||
|
||||
self.logger.info(f"Browser launched. {Fore.CYAN}Waiting for you to finish...{Style.RESET_ALL}", tag="PROFILE")
|
||||
|
||||
# Start listening for keyboard input
|
||||
listener_task = asyncio.create_task(listen_for_quit_command())
|
||||
|
||||
# Wait for either the user to press 'q' or for the browser process to exit naturally
|
||||
while not user_done_event.is_set() and browser_process.poll() is None:
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Cancel the listener task if it's still running
|
||||
if not listener_task.done():
|
||||
listener_task.cancel()
|
||||
try:
|
||||
await listener_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# If the browser is still running and the user pressed 'q', terminate it
|
||||
if browser_process.poll() is None and user_done_event.is_set():
|
||||
self.logger.info("Terminating browser process...", tag="PROFILE")
|
||||
await managed_browser.cleanup()
|
||||
|
||||
self.logger.success(f"Browser closed. Profile saved at: {Fore.GREEN}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error creating profile: {str(e)}", tag="PROFILE")
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
finally:
|
||||
# Restore original signal handlers
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
|
||||
# Make sure browser is fully cleaned up
|
||||
await managed_browser.cleanup()
|
||||
|
||||
# Return the profile path
|
||||
return profile_path
|
||||
|
||||
def list_profiles(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Lists all available browser profiles in the Crawl4AI profiles directory.
|
||||
|
||||
Returns:
|
||||
list: A list of dictionaries containing profile information:
|
||||
[{"name": "profile_name", "path": "/path/to/profile", "created": datetime, "type": "chromium|firefox"}]
|
||||
|
||||
Example:
|
||||
```python
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# List all available profiles
|
||||
profiles = profiler.list_profiles()
|
||||
|
||||
for profile in profiles:
|
||||
print(f"Profile: {profile['name']}")
|
||||
print(f" Path: {profile['path']}")
|
||||
print(f" Created: {profile['created']}")
|
||||
print(f" Browser type: {profile['type']}")
|
||||
```
|
||||
"""
|
||||
if not os.path.exists(self.profiles_dir):
|
||||
return []
|
||||
|
||||
profiles = []
|
||||
|
||||
for name in os.listdir(self.profiles_dir):
|
||||
profile_path = os.path.join(self.profiles_dir, name)
|
||||
|
||||
# Skip if not a directory
|
||||
if not os.path.isdir(profile_path):
|
||||
continue
|
||||
|
||||
# Check if this looks like a valid browser profile
|
||||
# For Chromium: Look for Preferences file
|
||||
# For Firefox: Look for prefs.js file
|
||||
is_valid = False
|
||||
|
||||
if os.path.exists(os.path.join(profile_path, "Preferences")) or \
|
||||
os.path.exists(os.path.join(profile_path, "Default", "Preferences")):
|
||||
is_valid = "chromium"
|
||||
elif os.path.exists(os.path.join(profile_path, "prefs.js")):
|
||||
is_valid = "firefox"
|
||||
|
||||
if is_valid:
|
||||
# Get creation time
|
||||
created = datetime.datetime.fromtimestamp(
|
||||
os.path.getctime(profile_path)
|
||||
)
|
||||
|
||||
profiles.append({
|
||||
"name": name,
|
||||
"path": profile_path,
|
||||
"created": created,
|
||||
"type": is_valid
|
||||
})
|
||||
|
||||
# Sort by creation time, newest first
|
||||
profiles.sort(key=lambda x: x["created"], reverse=True)
|
||||
|
||||
return profiles
|
||||
|
||||
def get_profile_path(self, profile_name: str) -> Optional[str]:
|
||||
"""
|
||||
Get the full path to a profile by name.
|
||||
|
||||
Args:
|
||||
profile_name (str): Name of the profile (not the full path)
|
||||
|
||||
Returns:
|
||||
str: Full path to the profile directory, or None if not found
|
||||
|
||||
Example:
|
||||
```python
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
path = profiler.get_profile_path("my-profile")
|
||||
if path:
|
||||
print(f"Profile path: {path}")
|
||||
else:
|
||||
print("Profile not found")
|
||||
```
|
||||
"""
|
||||
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||
|
||||
# Check if path exists and is a valid profile
|
||||
if not os.path.isdir(profile_path):
|
||||
# Chrck if profile_name itself is full path
|
||||
if os.path.isabs(profile_name):
|
||||
profile_path = profile_name
|
||||
else:
|
||||
return None
|
||||
|
||||
# Look for profile indicators
|
||||
is_profile = (
|
||||
os.path.exists(os.path.join(profile_path, "Preferences")) or
|
||||
os.path.exists(os.path.join(profile_path, "Default", "Preferences")) or
|
||||
os.path.exists(os.path.join(profile_path, "prefs.js"))
|
||||
)
|
||||
|
||||
if not is_profile:
|
||||
return None # Not a valid browser profile
|
||||
|
||||
return profile_path
|
||||
|
||||
def delete_profile(self, profile_name_or_path: str) -> bool:
|
||||
"""
|
||||
Delete a browser profile by name or path.
|
||||
|
||||
Args:
|
||||
profile_name_or_path (str): Name of the profile or full path to profile directory
|
||||
|
||||
Returns:
|
||||
bool: True if the profile was deleted successfully, False otherwise
|
||||
|
||||
Example:
|
||||
```python
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# Delete by name
|
||||
success = profiler.delete_profile("my-profile")
|
||||
|
||||
# Delete by path
|
||||
success = profiler.delete_profile("/path/to/.crawl4ai/profiles/my-profile")
|
||||
```
|
||||
"""
|
||||
# Determine if input is a name or a path
|
||||
if os.path.isabs(profile_name_or_path):
|
||||
# Full path provided
|
||||
profile_path = profile_name_or_path
|
||||
else:
|
||||
# Just a name provided, construct path
|
||||
profile_path = os.path.join(self.profiles_dir, profile_name_or_path)
|
||||
|
||||
# Check if path exists and is a valid profile
|
||||
if not os.path.isdir(profile_path):
|
||||
return False
|
||||
|
||||
# Look for profile indicators
|
||||
is_profile = (
|
||||
os.path.exists(os.path.join(profile_path, "Preferences")) or
|
||||
os.path.exists(os.path.join(profile_path, "Default", "Preferences")) or
|
||||
os.path.exists(os.path.join(profile_path, "prefs.js"))
|
||||
)
|
||||
|
||||
if not is_profile:
|
||||
return False # Not a valid browser profile
|
||||
|
||||
# Delete the profile directory
|
||||
try:
|
||||
shutil.rmtree(profile_path)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def interactive_manager(self, crawl_callback=None):
|
||||
"""
|
||||
Launch an interactive profile management console.
|
||||
|
||||
Args:
|
||||
crawl_callback (callable, optional): Function to call when selecting option to use
|
||||
a profile for crawling. It will be called with (profile_path, url).
|
||||
|
||||
Example:
|
||||
```python
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# Define a custom crawl function
|
||||
async def my_crawl_function(profile_path, url):
|
||||
print(f"Crawling {url} with profile {profile_path}")
|
||||
# Implement your crawling logic here
|
||||
|
||||
# Start interactive manager
|
||||
await profiler.interactive_manager(crawl_callback=my_crawl_function)
|
||||
```
|
||||
"""
|
||||
while True:
|
||||
self.logger.info(f"\n{Fore.CYAN}Profile Management Options:{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"1. {Fore.GREEN}Create a new profile{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"2. {Fore.YELLOW}List available profiles{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"3. {Fore.RED}Delete a profile{Style.RESET_ALL}", tag="MENU")
|
||||
|
||||
# Only show crawl option if callback provided
|
||||
if crawl_callback:
|
||||
self.logger.info(f"4. {Fore.CYAN}Use a profile to crawl a website{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"5. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||
exit_option = "5"
|
||||
else:
|
||||
self.logger.info(f"4. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||
exit_option = "4"
|
||||
|
||||
choice = input(f"\n{Fore.CYAN}Enter your choice (1-{exit_option}): {Style.RESET_ALL}")
|
||||
|
||||
if choice == "1":
|
||||
# Create new profile
|
||||
name = input(f"{Fore.GREEN}Enter a name for the new profile (or press Enter for auto-generated name): {Style.RESET_ALL}")
|
||||
await self.create_profile(name or None)
|
||||
|
||||
elif choice == "2":
|
||||
# List profiles
|
||||
profiles = self.list_profiles()
|
||||
|
||||
if not profiles:
|
||||
self.logger.warning(" No profiles found. Create one first with option 1.", tag="PROFILES")
|
||||
continue
|
||||
|
||||
# Print profile information with colorama formatting
|
||||
self.logger.info("\nAvailable profiles:", tag="PROFILES")
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {Fore.CYAN}{profile['name']}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f" Path: {Fore.YELLOW}{profile['path']}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f" Created: {profile['created'].strftime('%Y-%m-%d %H:%M:%S')}", tag="PROFILES")
|
||||
self.logger.info(f" Browser type: {profile['type']}", tag="PROFILES")
|
||||
self.logger.info("", tag="PROFILES") # Empty line for spacing
|
||||
|
||||
elif choice == "3":
|
||||
# Delete profile
|
||||
profiles = self.list_profiles()
|
||||
if not profiles:
|
||||
self.logger.warning("No profiles found to delete", tag="PROFILES")
|
||||
continue
|
||||
|
||||
# Display numbered list
|
||||
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
|
||||
# Get profile to delete
|
||||
profile_idx = input(f"{Fore.RED}Enter the number of the profile to delete (or 'c' to cancel): {Style.RESET_ALL}")
|
||||
if profile_idx.lower() == 'c':
|
||||
continue
|
||||
|
||||
try:
|
||||
idx = int(profile_idx) - 1
|
||||
if 0 <= idx < len(profiles):
|
||||
profile_name = profiles[idx]["name"]
|
||||
self.logger.info(f"Deleting profile: {Fore.YELLOW}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||
|
||||
# Confirm deletion
|
||||
confirm = input(f"{Fore.RED}Are you sure you want to delete this profile? (y/n): {Style.RESET_ALL}")
|
||||
if confirm.lower() == 'y':
|
||||
success = self.delete_profile(profiles[idx]["path"])
|
||||
|
||||
if success:
|
||||
self.logger.success(f"Profile {Fore.GREEN}{profile_name}{Style.RESET_ALL} deleted successfully", tag="PROFILES")
|
||||
else:
|
||||
self.logger.error(f"Failed to delete profile {Fore.RED}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||
else:
|
||||
self.logger.error("Invalid profile number", tag="PROFILES")
|
||||
except ValueError:
|
||||
self.logger.error("Please enter a valid number", tag="PROFILES")
|
||||
|
||||
elif choice == "4" and crawl_callback:
|
||||
# Use profile to crawl a site
|
||||
profiles = self.list_profiles()
|
||||
if not profiles:
|
||||
self.logger.warning("No profiles found. Create one first.", tag="PROFILES")
|
||||
continue
|
||||
|
||||
# Display numbered list
|
||||
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
|
||||
# Get profile to use
|
||||
profile_idx = input(f"{Fore.CYAN}Enter the number of the profile to use (or 'c' to cancel): {Style.RESET_ALL}")
|
||||
if profile_idx.lower() == 'c':
|
||||
continue
|
||||
|
||||
try:
|
||||
idx = int(profile_idx) - 1
|
||||
if 0 <= idx < len(profiles):
|
||||
profile_path = profiles[idx]["path"]
|
||||
url = input(f"{Fore.CYAN}Enter the URL to crawl: {Style.RESET_ALL}")
|
||||
if url:
|
||||
# Call the provided crawl callback
|
||||
await crawl_callback(profile_path, url)
|
||||
else:
|
||||
self.logger.error("No URL provided", tag="CRAWL")
|
||||
else:
|
||||
self.logger.error("Invalid profile number", tag="PROFILES")
|
||||
except ValueError:
|
||||
self.logger.error("Please enter a valid number", tag="PROFILES")
|
||||
|
||||
elif choice == exit_option:
|
||||
# Exit
|
||||
self.logger.info("Exiting profile management", tag="MENU")
|
||||
break
|
||||
|
||||
else:
|
||||
self.logger.error(f"Invalid choice. Please enter a number between 1 and {exit_option}.", tag="MENU")
|
||||
|
||||
async def launch_standalone_browser(self,
|
||||
browser_type: str = "chromium",
|
||||
user_data_dir: Optional[str] = None,
|
||||
debugging_port: int = 9222,
|
||||
headless: bool = False,
|
||||
save_as_builtin: bool = False) -> Optional[str]:
|
||||
"""
|
||||
Launch a standalone browser with CDP debugging enabled and keep it running
|
||||
until the user presses 'q'. Returns and displays the CDP URL.
|
||||
|
||||
Args:
|
||||
browser_type (str): Type of browser to launch ('chromium' or 'firefox')
|
||||
user_data_dir (str, optional): Path to user profile directory
|
||||
debugging_port (int): Port to use for CDP debugging
|
||||
headless (bool): Whether to run in headless mode
|
||||
|
||||
Returns:
|
||||
str: CDP URL for the browser, or None if launch failed
|
||||
|
||||
Example:
|
||||
```python
|
||||
profiler = BrowserProfiler()
|
||||
cdp_url = await profiler.launch_standalone_browser(
|
||||
user_data_dir="/path/to/profile",
|
||||
debugging_port=9222
|
||||
)
|
||||
# Use cdp_url to connect to the browser
|
||||
```
|
||||
"""
|
||||
# Use the provided directory if specified, otherwise create a temporary directory
|
||||
if user_data_dir:
|
||||
# Directory is provided directly, ensure it exists
|
||||
profile_path = user_data_dir
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
else:
|
||||
# Create a temporary profile directory
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
profile_name = f"temp_{timestamp}_{uuid.uuid4().hex[:6]}"
|
||||
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
|
||||
# Print initial information
|
||||
border = f"{Fore.CYAN}{'='*80}{Style.RESET_ALL}"
|
||||
self.logger.info(f"\n{border}", tag="CDP")
|
||||
self.logger.info(f"Launching standalone browser with CDP debugging", tag="CDP")
|
||||
self.logger.info(f"Browser type: {Fore.GREEN}{browser_type}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Profile path: {Fore.YELLOW}{profile_path}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Debugging port: {Fore.CYAN}{debugging_port}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Headless mode: {Fore.CYAN}{headless}{Style.RESET_ALL}", tag="CDP")
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_type,
|
||||
user_data_dir=profile_path,
|
||||
headless=headless,
|
||||
logger=self.logger,
|
||||
debugging_port=debugging_port
|
||||
)
|
||||
|
||||
# Set up signal handlers to ensure cleanup on interrupt
|
||||
original_sigint = signal.getsignal(signal.SIGINT)
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
|
||||
# Define cleanup handler for signals
|
||||
async def cleanup_handler(sig, frame):
|
||||
self.logger.warning("\nCleaning up browser process...", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
# Restore original signal handlers
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
if sig == signal.SIGINT:
|
||||
self.logger.error("Browser terminated by user.", tag="CDP")
|
||||
sys.exit(1)
|
||||
|
||||
# Set signal handlers
|
||||
def sigint_handler(sig, frame):
|
||||
asyncio.create_task(cleanup_handler(sig, frame))
|
||||
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
signal.signal(signal.SIGTERM, sigint_handler)
|
||||
|
||||
# Event to signal when user wants to exit
|
||||
user_done_event = asyncio.Event()
|
||||
|
||||
# Run keyboard input loop in a separate task
|
||||
async def listen_for_quit_command():
|
||||
import termios
|
||||
import tty
|
||||
import select
|
||||
|
||||
# First output the prompt
|
||||
self.logger.info(f"{Fore.CYAN}Press '{Fore.WHITE}q{Fore.CYAN}' to stop the browser and exit...{Style.RESET_ALL}", tag="CDP")
|
||||
|
||||
# Save original terminal settings
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
|
||||
try:
|
||||
# Switch to non-canonical mode (no line buffering)
|
||||
tty.setcbreak(fd)
|
||||
|
||||
while True:
|
||||
# Check if input is available (non-blocking)
|
||||
readable, _, _ = select.select([sys.stdin], [], [], 0.5)
|
||||
if readable:
|
||||
key = sys.stdin.read(1)
|
||||
if key.lower() == 'q':
|
||||
self.logger.info(f"{Fore.GREEN}Closing browser...{Style.RESET_ALL}", tag="CDP")
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
# Check if the browser process has already exited
|
||||
if managed_browser.browser_process and managed_browser.browser_process.poll() is not None:
|
||||
self.logger.info("Browser already closed. Ending input listener.", tag="CDP")
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
finally:
|
||||
# Restore terminal settings
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
# Function to retrieve and display CDP JSON config
|
||||
async def get_cdp_json(port):
|
||||
import aiohttp
|
||||
cdp_url = f"http://localhost:{port}"
|
||||
json_url = f"{cdp_url}/json/version"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Try multiple times in case the browser is still starting up
|
||||
for _ in range(10):
|
||||
try:
|
||||
async with session.get(json_url) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
return cdp_url, data
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
return cdp_url, None
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error fetching CDP JSON: {str(e)}", tag="CDP")
|
||||
return cdp_url, None
|
||||
|
||||
cdp_url = None
|
||||
config_json = None
|
||||
|
||||
try:
|
||||
# Start the browser
|
||||
await managed_browser.start()
|
||||
|
||||
# Check if browser started successfully
|
||||
browser_process = managed_browser.browser_process
|
||||
if not browser_process:
|
||||
self.logger.error("Failed to start browser process.", tag="CDP")
|
||||
return None
|
||||
|
||||
self.logger.info(f"Browser launched successfully. Retrieving CDP information...", tag="CDP")
|
||||
|
||||
# Get CDP URL and JSON config
|
||||
cdp_url, config_json = await get_cdp_json(debugging_port)
|
||||
|
||||
if cdp_url:
|
||||
self.logger.success(f"CDP URL: {Fore.GREEN}{cdp_url}{Style.RESET_ALL}", tag="CDP")
|
||||
|
||||
if config_json:
|
||||
# Display relevant CDP information
|
||||
self.logger.info(f"Browser: {Fore.CYAN}{config_json.get('Browser', 'Unknown')}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Protocol Version: {config_json.get('Protocol-Version', 'Unknown')}", tag="CDP")
|
||||
if 'webSocketDebuggerUrl' in config_json:
|
||||
self.logger.info(f"WebSocket URL: {Fore.GREEN}{config_json['webSocketDebuggerUrl']}{Style.RESET_ALL}", tag="CDP")
|
||||
else:
|
||||
self.logger.warning("Could not retrieve CDP configuration JSON", tag="CDP")
|
||||
else:
|
||||
self.logger.error(f"Failed to get CDP URL on port {debugging_port}", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
|
||||
# Start listening for keyboard input
|
||||
listener_task = asyncio.create_task(listen_for_quit_command())
|
||||
|
||||
# Wait for the user to press 'q' or for the browser process to exit naturally
|
||||
while not user_done_event.is_set() and browser_process.poll() is None:
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Cancel the listener task if it's still running
|
||||
if not listener_task.done():
|
||||
listener_task.cancel()
|
||||
try:
|
||||
await listener_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# If the browser is still running and the user pressed 'q', terminate it
|
||||
if browser_process.poll() is None and user_done_event.is_set():
|
||||
self.logger.info("Terminating browser process...", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
|
||||
self.logger.success(f"Browser closed.", tag="CDP")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error launching standalone browser: {str(e)}", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
finally:
|
||||
# Restore original signal handlers
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
|
||||
# Make sure browser is fully cleaned up
|
||||
await managed_browser.cleanup()
|
||||
|
||||
# Return the CDP URL
|
||||
return cdp_url
|
||||
|
||||
async def launch_builtin_browser(self,
|
||||
browser_type: str = "chromium",
|
||||
debugging_port: int = 9222,
|
||||
headless: bool = True) -> Optional[str]:
|
||||
"""
|
||||
Launch a browser in the background for use as the builtin browser.
|
||||
|
||||
Args:
|
||||
browser_type (str): Type of browser to launch ('chromium' or 'firefox')
|
||||
debugging_port (int): Port to use for CDP debugging
|
||||
headless (bool): Whether to run in headless mode
|
||||
|
||||
Returns:
|
||||
str: CDP URL for the browser, or None if launch failed
|
||||
"""
|
||||
# Check if there's an existing browser still running
|
||||
browser_info = self.get_builtin_browser_info()
|
||||
if browser_info and self._is_browser_running(browser_info.get('pid')):
|
||||
self.logger.info("Builtin browser is already running", tag="BUILTIN")
|
||||
return browser_info.get('cdp_url')
|
||||
|
||||
# Create a user data directory for the builtin browser
|
||||
user_data_dir = os.path.join(self.builtin_browser_dir, "user_data")
|
||||
os.makedirs(user_data_dir, exist_ok=True)
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_type,
|
||||
user_data_dir=user_data_dir,
|
||||
headless=headless,
|
||||
logger=self.logger,
|
||||
debugging_port=debugging_port
|
||||
)
|
||||
|
||||
try:
|
||||
# Start the browser
|
||||
await managed_browser.start()
|
||||
|
||||
# Check if browser started successfully
|
||||
browser_process = managed_browser.browser_process
|
||||
if not browser_process:
|
||||
self.logger.error("Failed to start browser process.", tag="BUILTIN")
|
||||
return None
|
||||
|
||||
# Get CDP URL
|
||||
cdp_url = f"http://localhost:{debugging_port}"
|
||||
|
||||
# Try to verify browser is responsive by fetching version info
|
||||
import aiohttp
|
||||
json_url = f"{cdp_url}/json/version"
|
||||
config_json = None
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for _ in range(10): # Try multiple times
|
||||
try:
|
||||
async with session.get(json_url) as response:
|
||||
if response.status == 200:
|
||||
config_json = await response.json()
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Could not verify browser: {str(e)}", tag="BUILTIN")
|
||||
|
||||
# Save browser info
|
||||
browser_info = {
|
||||
'pid': browser_process.pid,
|
||||
'cdp_url': cdp_url,
|
||||
'user_data_dir': user_data_dir,
|
||||
'browser_type': browser_type,
|
||||
'debugging_port': debugging_port,
|
||||
'start_time': time.time(),
|
||||
'config': config_json
|
||||
}
|
||||
|
||||
with open(self.builtin_config_file, 'w') as f:
|
||||
json.dump(browser_info, f, indent=2)
|
||||
|
||||
# Detach from the browser process - don't keep any references
|
||||
# This is important to allow the Python script to exit while the browser continues running
|
||||
# We'll just record the PID and other info, and the browser will run independently
|
||||
managed_browser.browser_process = None
|
||||
|
||||
self.logger.success(f"Builtin browser launched at CDP URL: {cdp_url}", tag="BUILTIN")
|
||||
return cdp_url
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error launching builtin browser: {str(e)}", tag="BUILTIN")
|
||||
if managed_browser:
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
|
||||
def get_builtin_browser_info(self) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get information about the builtin browser.
|
||||
|
||||
Returns:
|
||||
dict: Browser information or None if no builtin browser is configured
|
||||
"""
|
||||
if not os.path.exists(self.builtin_config_file):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(self.builtin_config_file, 'r') as f:
|
||||
browser_info = json.load(f)
|
||||
|
||||
# Check if the browser is still running
|
||||
if not self._is_browser_running(browser_info.get('pid')):
|
||||
self.logger.warning("Builtin browser is not running", tag="BUILTIN")
|
||||
return None
|
||||
|
||||
return browser_info
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading builtin browser config: {str(e)}", tag="BUILTIN")
|
||||
return None
|
||||
|
||||
def _is_browser_running(self, pid: Optional[int]) -> bool:
|
||||
"""Check if a process with the given PID is running"""
|
||||
if not pid:
|
||||
return False
|
||||
|
||||
try:
|
||||
# Check if the process exists
|
||||
if sys.platform == "win32":
|
||||
process = subprocess.run(["tasklist", "/FI", f"PID eq {pid}"],
|
||||
capture_output=True, text=True)
|
||||
return str(pid) in process.stdout
|
||||
else:
|
||||
# Unix-like systems
|
||||
os.kill(pid, 0) # This doesn't actually kill the process, just checks if it exists
|
||||
return True
|
||||
except (ProcessLookupError, PermissionError, OSError):
|
||||
return False
|
||||
|
||||
async def kill_builtin_browser(self) -> bool:
|
||||
"""
|
||||
Kill the builtin browser if it's running.
|
||||
|
||||
Returns:
|
||||
bool: True if the browser was killed, False otherwise
|
||||
"""
|
||||
browser_info = self.get_builtin_browser_info()
|
||||
if not browser_info:
|
||||
self.logger.warning("No builtin browser found", tag="BUILTIN")
|
||||
return False
|
||||
|
||||
pid = browser_info.get('pid')
|
||||
if not pid:
|
||||
return False
|
||||
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
subprocess.run(["taskkill", "/F", "/PID", str(pid)], check=True)
|
||||
else:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
# Wait for termination
|
||||
for _ in range(5):
|
||||
if not self._is_browser_running(pid):
|
||||
break
|
||||
await asyncio.sleep(0.5)
|
||||
else:
|
||||
# Force kill if still running
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
|
||||
# Remove config file
|
||||
if os.path.exists(self.builtin_config_file):
|
||||
os.unlink(self.builtin_config_file)
|
||||
|
||||
self.logger.success("Builtin browser terminated", tag="BUILTIN")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error killing builtin browser: {str(e)}", tag="BUILTIN")
|
||||
return False
|
||||
|
||||
async def get_builtin_browser_status(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get status information about the builtin browser.
|
||||
|
||||
Returns:
|
||||
dict: Status information with running, cdp_url, and info fields
|
||||
"""
|
||||
browser_info = self.get_builtin_browser_info()
|
||||
|
||||
if not browser_info:
|
||||
return {
|
||||
'running': False,
|
||||
'cdp_url': None,
|
||||
'info': None
|
||||
}
|
||||
|
||||
return {
|
||||
'running': True,
|
||||
'cdp_url': browser_info.get('cdp_url'),
|
||||
'info': browser_info
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ from collections import Counter
|
||||
import string
|
||||
from .model_loader import load_nltk_punkt
|
||||
|
||||
|
||||
# Define the abstract base class for chunking strategies
|
||||
class ChunkingStrategy(ABC):
|
||||
"""
|
||||
@@ -72,6 +71,7 @@ class NlpSentenceChunking(ChunkingStrategy):
|
||||
"""
|
||||
Initialize the NlpSentenceChunking object.
|
||||
"""
|
||||
from crawl4ai.le.legacy.model_loader import load_nltk_punkt
|
||||
load_nltk_punkt()
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
|
||||
1499
crawl4ai/cli.py
1499
crawl4ai/cli.py
File diff suppressed because it is too large
Load Diff
837
crawl4ai/components/crawler_monitor.py
Normal file
837
crawl4ai/components/crawler_monitor.py
Normal file
@@ -0,0 +1,837 @@
|
||||
import time
|
||||
import uuid
|
||||
import threading
|
||||
import psutil
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Optional, List
|
||||
import threading
|
||||
from rich.console import Console
|
||||
from rich.layout import Layout
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
from rich.live import Live
|
||||
from rich import box
|
||||
from ..models import CrawlStatus
|
||||
|
||||
class TerminalUI:
|
||||
"""Terminal user interface for CrawlerMonitor using rich library."""
|
||||
|
||||
def __init__(self, refresh_rate: float = 1.0, max_width: int = 120):
|
||||
"""
|
||||
Initialize the terminal UI.
|
||||
|
||||
Args:
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
self.console = Console(width=max_width)
|
||||
self.layout = Layout()
|
||||
self.refresh_rate = refresh_rate
|
||||
self.stop_event = threading.Event()
|
||||
self.ui_thread = None
|
||||
self.monitor = None # Will be set by CrawlerMonitor
|
||||
self.max_width = max_width
|
||||
|
||||
# Setup layout - vertical layout (top to bottom)
|
||||
self.layout.split(
|
||||
Layout(name="header", size=3),
|
||||
Layout(name="pipeline_status", size=10),
|
||||
Layout(name="task_details", ratio=1),
|
||||
Layout(name="footer", size=3) # Increased footer size to fit all content
|
||||
)
|
||||
|
||||
def start(self, monitor):
|
||||
"""Start the UI thread."""
|
||||
self.monitor = monitor
|
||||
self.stop_event.clear()
|
||||
self.ui_thread = threading.Thread(target=self._ui_loop)
|
||||
self.ui_thread.daemon = True
|
||||
self.ui_thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the UI thread."""
|
||||
if self.ui_thread and self.ui_thread.is_alive():
|
||||
self.stop_event.set()
|
||||
# Only try to join if we're not in the UI thread
|
||||
# This prevents "cannot join current thread" errors
|
||||
if threading.current_thread() != self.ui_thread:
|
||||
self.ui_thread.join(timeout=5.0)
|
||||
|
||||
def _ui_loop(self):
|
||||
"""Main UI rendering loop."""
|
||||
import sys
|
||||
import select
|
||||
import termios
|
||||
import tty
|
||||
|
||||
# Setup terminal for non-blocking input
|
||||
old_settings = termios.tcgetattr(sys.stdin)
|
||||
try:
|
||||
tty.setcbreak(sys.stdin.fileno())
|
||||
|
||||
# Use Live display to render the UI
|
||||
with Live(self.layout, refresh_per_second=1/self.refresh_rate, screen=True) as live:
|
||||
self.live = live # Store the live display for updates
|
||||
|
||||
# Main UI loop
|
||||
while not self.stop_event.is_set():
|
||||
self._update_display()
|
||||
|
||||
# Check for key press (non-blocking)
|
||||
if select.select([sys.stdin], [], [], 0)[0]:
|
||||
key = sys.stdin.read(1)
|
||||
# Check for 'q' to quit
|
||||
if key == 'q':
|
||||
# Signal stop but don't call monitor.stop() from UI thread
|
||||
# as it would cause the thread to try to join itself
|
||||
self.stop_event.set()
|
||||
self.monitor.is_running = False
|
||||
break
|
||||
|
||||
time.sleep(self.refresh_rate)
|
||||
|
||||
# Just check if the monitor was stopped
|
||||
if not self.monitor.is_running:
|
||||
break
|
||||
finally:
|
||||
# Restore terminal settings
|
||||
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
|
||||
|
||||
def _update_display(self):
|
||||
"""Update the terminal display with current statistics."""
|
||||
if not self.monitor:
|
||||
return
|
||||
|
||||
# Update crawler status panel
|
||||
self.layout["header"].update(self._create_status_panel())
|
||||
|
||||
# Update pipeline status panel and task details panel
|
||||
self.layout["pipeline_status"].update(self._create_pipeline_panel())
|
||||
self.layout["task_details"].update(self._create_task_details_panel())
|
||||
|
||||
# Update footer
|
||||
self.layout["footer"].update(self._create_footer())
|
||||
|
||||
def _create_status_panel(self) -> Panel:
|
||||
"""Create the crawler status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
|
||||
# Format memory status with icon
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Get current memory usage
|
||||
current_memory = psutil.Process().memory_info().rss / (1024 * 1024) # MB
|
||||
memory_percent = (current_memory / psutil.virtual_memory().total) * 100
|
||||
|
||||
# Format runtime
|
||||
runtime = self.monitor._format_time(time.time() - self.monitor.start_time if self.monitor.start_time else 0)
|
||||
|
||||
# Create the status text
|
||||
status_text = Text()
|
||||
status_text.append(f"Web Crawler Dashboard | Runtime: {runtime} | Memory: {memory_percent:.1f}% {memory_icon}\n")
|
||||
status_text.append(f"Status: {memory_status} | URLs: {summary['urls_completed']}/{summary['urls_total']} | ")
|
||||
status_text.append(f"Peak Mem: {summary['peak_memory_percent']:.1f}% at {self.monitor._format_time(summary['peak_memory_time'])}")
|
||||
|
||||
return Panel(status_text, title="Crawler Status", border_style="blue")
|
||||
|
||||
def _create_pipeline_panel(self) -> Panel:
|
||||
"""Create the pipeline status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
queue_stats = self.monitor.get_queue_stats()
|
||||
|
||||
# Create a table for status counts
|
||||
table = Table(show_header=True, box=None)
|
||||
table.add_column("Status", style="cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
table.add_column("Stat", style="cyan")
|
||||
table.add_column("Value", justify="right")
|
||||
|
||||
# Calculate overall progress
|
||||
progress = f"{summary['urls_completed']}/{summary['urls_total']}"
|
||||
progress_percent = f"{summary['completion_percentage']:.1f}%"
|
||||
|
||||
# Add rows for each status
|
||||
table.add_row(
|
||||
"Overall Progress",
|
||||
progress,
|
||||
progress_percent,
|
||||
"Est. Completion",
|
||||
summary.get('estimated_completion_time', "N/A")
|
||||
)
|
||||
|
||||
# Add rows for each status
|
||||
status_counts = summary['status_counts']
|
||||
total = summary['urls_total'] or 1 # Avoid division by zero
|
||||
|
||||
# Status rows
|
||||
table.add_row(
|
||||
"Completed",
|
||||
str(status_counts.get(CrawlStatus.COMPLETED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.COMPLETED.name, 0) / total * 100:.1f}%",
|
||||
"Avg. Time/URL",
|
||||
f"{summary.get('avg_task_duration', 0):.2f}s"
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Failed",
|
||||
str(status_counts.get(CrawlStatus.FAILED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.FAILED.name, 0) / total * 100:.1f}%",
|
||||
"Concurrent Tasks",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0))
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"In Progress",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.IN_PROGRESS.name, 0) / total * 100:.1f}%",
|
||||
"Queue Size",
|
||||
str(queue_stats['total_queued'])
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Queued",
|
||||
str(status_counts.get(CrawlStatus.QUEUED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.QUEUED.name, 0) / total * 100:.1f}%",
|
||||
"Max Wait Time",
|
||||
f"{queue_stats['highest_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Requeued is a special case as it's not a status
|
||||
requeued_count = summary.get('requeued_count', 0)
|
||||
table.add_row(
|
||||
"Requeued",
|
||||
str(requeued_count),
|
||||
f"{summary.get('requeue_rate', 0):.1f}%",
|
||||
"Avg Wait Time",
|
||||
f"{queue_stats['avg_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Add empty row for spacing
|
||||
table.add_row(
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"Requeue Rate",
|
||||
f"{summary.get('requeue_rate', 0):.1f}%"
|
||||
)
|
||||
|
||||
return Panel(table, title="Pipeline Status", border_style="green")
|
||||
|
||||
def _create_task_details_panel(self) -> Panel:
|
||||
"""Create the task details panel."""
|
||||
# Create a table for task details
|
||||
table = Table(show_header=True, expand=True)
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True, width=10)
|
||||
table.add_column("URL", style="blue", ratio=3)
|
||||
table.add_column("Status", style="green", width=15)
|
||||
table.add_column("Memory", justify="right", width=8)
|
||||
table.add_column("Peak", justify="right", width=8)
|
||||
table.add_column("Duration", justify="right", width=10)
|
||||
|
||||
# Get all task stats
|
||||
task_stats = self.monitor.get_all_task_stats()
|
||||
|
||||
# Add summary row
|
||||
active_tasks = sum(1 for stats in task_stats.values()
|
||||
if stats['status'] == CrawlStatus.IN_PROGRESS.name)
|
||||
|
||||
total_memory = sum(stats['memory_usage'] for stats in task_stats.values())
|
||||
total_peak = sum(stats['peak_memory'] for stats in task_stats.values())
|
||||
|
||||
# Summary row with separators
|
||||
table.add_row(
|
||||
"SUMMARY",
|
||||
f"Total: {len(task_stats)}",
|
||||
f"Active: {active_tasks}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{total_peak:.1f}",
|
||||
"N/A"
|
||||
)
|
||||
|
||||
# Add a separator
|
||||
table.add_row("—" * 10, "—" * 20, "—" * 10, "—" * 8, "—" * 8, "—" * 10)
|
||||
|
||||
# Status icons
|
||||
status_icons = {
|
||||
CrawlStatus.QUEUED.name: "⏳",
|
||||
CrawlStatus.IN_PROGRESS.name: "🔄",
|
||||
CrawlStatus.COMPLETED.name: "✅",
|
||||
CrawlStatus.FAILED.name: "❌"
|
||||
}
|
||||
|
||||
# Calculate how many rows we can display based on available space
|
||||
# We can display more rows now that we have a dedicated panel
|
||||
display_count = min(len(task_stats), 20) # Display up to 20 tasks
|
||||
|
||||
# Add rows for each task
|
||||
for task_id, stats in sorted(
|
||||
list(task_stats.items())[:display_count],
|
||||
# Sort: 1. IN_PROGRESS first, 2. QUEUED, 3. COMPLETED/FAILED by recency
|
||||
key=lambda x: (
|
||||
0 if x[1]['status'] == CrawlStatus.IN_PROGRESS.name else
|
||||
1 if x[1]['status'] == CrawlStatus.QUEUED.name else
|
||||
2,
|
||||
-1 * (x[1].get('end_time', 0) or 0) # Most recent first
|
||||
)
|
||||
):
|
||||
# Truncate task_id and URL for display
|
||||
short_id = task_id[:8]
|
||||
url = stats['url']
|
||||
if len(url) > 50: # Allow longer URLs in the dedicated panel
|
||||
url = url[:47] + "..."
|
||||
|
||||
# Format status with icon
|
||||
status = f"{status_icons.get(stats['status'], '?')} {stats['status']}"
|
||||
|
||||
# Add row
|
||||
table.add_row(
|
||||
short_id,
|
||||
url,
|
||||
status,
|
||||
f"{stats['memory_usage']:.1f}",
|
||||
f"{stats['peak_memory']:.1f}",
|
||||
stats['duration'] if 'duration' in stats else "0:00"
|
||||
)
|
||||
|
||||
return Panel(table, title="Task Details", border_style="yellow")
|
||||
|
||||
def _create_footer(self) -> Panel:
|
||||
"""Create the footer panel."""
|
||||
from rich.columns import Columns
|
||||
from rich.align import Align
|
||||
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Left section - memory status
|
||||
left_text = Text()
|
||||
left_text.append("Memory Status: ", style="bold")
|
||||
status_style = "green" if memory_status == "NORMAL" else "yellow" if memory_status == "PRESSURE" else "red bold"
|
||||
left_text.append(f"{memory_icon} {memory_status}", style=status_style)
|
||||
|
||||
# Center section - copyright
|
||||
center_text = Text("© Crawl4AI 2025 | Made by UnclecCode", style="cyan italic")
|
||||
|
||||
# Right section - quit instruction
|
||||
right_text = Text()
|
||||
right_text.append("Press ", style="bold")
|
||||
right_text.append("q", style="white on blue")
|
||||
right_text.append(" to quit", style="bold")
|
||||
|
||||
# Create columns with the three sections
|
||||
footer_content = Columns(
|
||||
[
|
||||
Align.left(left_text),
|
||||
Align.center(center_text),
|
||||
Align.right(right_text)
|
||||
],
|
||||
expand=True
|
||||
)
|
||||
|
||||
# Create a more visible footer panel
|
||||
return Panel(
|
||||
footer_content,
|
||||
border_style="white",
|
||||
padding=(0, 1) # Add padding for better visibility
|
||||
)
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
"""
|
||||
Comprehensive monitoring and visualization system for tracking web crawler operations in real-time.
|
||||
Provides a terminal-based dashboard that displays task statuses, memory usage, queue statistics,
|
||||
and performance metrics.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
urls_total: int = 0,
|
||||
refresh_rate: float = 1.0,
|
||||
enable_ui: bool = True,
|
||||
max_width: int = 120
|
||||
):
|
||||
"""
|
||||
Initialize the CrawlerMonitor.
|
||||
|
||||
Args:
|
||||
urls_total: Total number of URLs to be crawled
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
enable_ui: Whether to display the terminal UI
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
# Core monitoring attributes
|
||||
self.stats = {} # Task ID -> stats dict
|
||||
self.memory_status = "NORMAL"
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.is_running = False
|
||||
self.queue_stats = {
|
||||
"total_queued": 0,
|
||||
"highest_wait_time": 0.0,
|
||||
"avg_wait_time": 0.0
|
||||
}
|
||||
self.urls_total = urls_total
|
||||
self.urls_completed = 0
|
||||
self.peak_memory_percent = 0.0
|
||||
self.peak_memory_time = 0.0
|
||||
|
||||
# Status counts
|
||||
self.status_counts = {
|
||||
CrawlStatus.QUEUED.name: 0,
|
||||
CrawlStatus.IN_PROGRESS.name: 0,
|
||||
CrawlStatus.COMPLETED.name: 0,
|
||||
CrawlStatus.FAILED.name: 0
|
||||
}
|
||||
|
||||
# Requeue tracking
|
||||
self.requeued_count = 0
|
||||
|
||||
# Thread-safety
|
||||
self._lock = threading.RLock()
|
||||
|
||||
# Terminal UI
|
||||
self.enable_ui = enable_ui
|
||||
self.terminal_ui = TerminalUI(
|
||||
refresh_rate=refresh_rate,
|
||||
max_width=max_width
|
||||
) if enable_ui else None
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start the monitoring session.
|
||||
|
||||
- Initializes the start_time
|
||||
- Sets is_running to True
|
||||
- Starts the terminal UI if enabled
|
||||
"""
|
||||
with self._lock:
|
||||
self.start_time = time.time()
|
||||
self.is_running = True
|
||||
|
||||
# Start the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.start(self)
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the monitoring session.
|
||||
|
||||
- Records end_time
|
||||
- Sets is_running to False
|
||||
- Stops the terminal UI
|
||||
- Generates final summary statistics
|
||||
"""
|
||||
with self._lock:
|
||||
self.end_time = time.time()
|
||||
self.is_running = False
|
||||
|
||||
# Stop the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
"""
|
||||
Register a new task with the monitor.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
url: URL being crawled
|
||||
|
||||
The task is initialized with:
|
||||
- status: QUEUED
|
||||
- url: The URL to crawl
|
||||
- enqueue_time: Current time
|
||||
- memory_usage: 0
|
||||
- peak_memory: 0
|
||||
- wait_time: 0
|
||||
- retry_count: 0
|
||||
"""
|
||||
with self._lock:
|
||||
self.stats[task_id] = {
|
||||
"task_id": task_id,
|
||||
"url": url,
|
||||
"status": CrawlStatus.QUEUED.name,
|
||||
"enqueue_time": time.time(),
|
||||
"start_time": None,
|
||||
"end_time": None,
|
||||
"memory_usage": 0.0,
|
||||
"peak_memory": 0.0,
|
||||
"error_message": "",
|
||||
"wait_time": 0.0,
|
||||
"retry_count": 0,
|
||||
"duration": "0:00",
|
||||
"counted_requeue": False
|
||||
}
|
||||
|
||||
# Update status counts
|
||||
self.status_counts[CrawlStatus.QUEUED.name] += 1
|
||||
|
||||
def update_task(
|
||||
self,
|
||||
task_id: str,
|
||||
status: Optional[CrawlStatus] = None,
|
||||
start_time: Optional[float] = None,
|
||||
end_time: Optional[float] = None,
|
||||
memory_usage: Optional[float] = None,
|
||||
peak_memory: Optional[float] = None,
|
||||
error_message: Optional[str] = None,
|
||||
retry_count: Optional[int] = None,
|
||||
wait_time: Optional[float] = None
|
||||
):
|
||||
"""
|
||||
Update statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
status: New status (QUEUED, IN_PROGRESS, COMPLETED, FAILED)
|
||||
start_time: When task execution started
|
||||
end_time: When task execution ended
|
||||
memory_usage: Current memory usage in MB
|
||||
peak_memory: Maximum memory usage in MB
|
||||
error_message: Error description if failed
|
||||
retry_count: Number of retry attempts
|
||||
wait_time: Time spent in queue
|
||||
|
||||
Updates task statistics and updates status counts.
|
||||
If status changes, decrements old status count and
|
||||
increments new status count.
|
||||
"""
|
||||
with self._lock:
|
||||
# Check if task exists
|
||||
if task_id not in self.stats:
|
||||
return
|
||||
|
||||
task_stats = self.stats[task_id]
|
||||
|
||||
# Update status counts if status is changing
|
||||
old_status = task_stats["status"]
|
||||
if status and status.name != old_status:
|
||||
self.status_counts[old_status] -= 1
|
||||
self.status_counts[status.name] += 1
|
||||
|
||||
# Track completion
|
||||
if status == CrawlStatus.COMPLETED:
|
||||
self.urls_completed += 1
|
||||
|
||||
# Track requeues
|
||||
if old_status in [CrawlStatus.COMPLETED.name, CrawlStatus.FAILED.name] and not task_stats.get("counted_requeue", False):
|
||||
self.requeued_count += 1
|
||||
task_stats["counted_requeue"] = True
|
||||
|
||||
# Update task statistics
|
||||
if status:
|
||||
task_stats["status"] = status.name
|
||||
if start_time is not None:
|
||||
task_stats["start_time"] = start_time
|
||||
if end_time is not None:
|
||||
task_stats["end_time"] = end_time
|
||||
if memory_usage is not None:
|
||||
task_stats["memory_usage"] = memory_usage
|
||||
|
||||
# Update peak memory if necessary
|
||||
current_percent = (memory_usage / psutil.virtual_memory().total) * 100
|
||||
if current_percent > self.peak_memory_percent:
|
||||
self.peak_memory_percent = current_percent
|
||||
self.peak_memory_time = time.time()
|
||||
|
||||
if peak_memory is not None:
|
||||
task_stats["peak_memory"] = peak_memory
|
||||
if error_message is not None:
|
||||
task_stats["error_message"] = error_message
|
||||
if retry_count is not None:
|
||||
task_stats["retry_count"] = retry_count
|
||||
if wait_time is not None:
|
||||
task_stats["wait_time"] = wait_time
|
||||
|
||||
# Calculate duration
|
||||
if task_stats["start_time"]:
|
||||
end = task_stats["end_time"] or time.time()
|
||||
duration = end - task_stats["start_time"]
|
||||
task_stats["duration"] = self._format_time(duration)
|
||||
|
||||
def update_memory_status(self, status: str):
|
||||
"""
|
||||
Update the current memory status.
|
||||
|
||||
Args:
|
||||
status: Memory status (NORMAL, PRESSURE, CRITICAL, or custom)
|
||||
|
||||
Also updates the UI to reflect the new status.
|
||||
"""
|
||||
with self._lock:
|
||||
self.memory_status = status
|
||||
|
||||
def update_queue_statistics(
|
||||
self,
|
||||
total_queued: int,
|
||||
highest_wait_time: float,
|
||||
avg_wait_time: float
|
||||
):
|
||||
"""
|
||||
Update statistics related to the task queue.
|
||||
|
||||
Args:
|
||||
total_queued: Number of tasks currently in queue
|
||||
highest_wait_time: Longest wait time of any queued task
|
||||
avg_wait_time: Average wait time across all queued tasks
|
||||
"""
|
||||
with self._lock:
|
||||
self.queue_stats = {
|
||||
"total_queued": total_queued,
|
||||
"highest_wait_time": highest_wait_time,
|
||||
"avg_wait_time": avg_wait_time
|
||||
}
|
||||
|
||||
def get_task_stats(self, task_id: str) -> Dict:
|
||||
"""
|
||||
Get statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
|
||||
Returns:
|
||||
Dictionary containing all task statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.get(task_id, {}).copy()
|
||||
|
||||
def get_all_task_stats(self) -> Dict[str, Dict]:
|
||||
"""
|
||||
Get statistics for all tasks.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping task_ids to their statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.copy()
|
||||
|
||||
def get_memory_status(self) -> str:
|
||||
"""
|
||||
Get the current memory status.
|
||||
|
||||
Returns:
|
||||
Current memory status string
|
||||
"""
|
||||
with self._lock:
|
||||
return self.memory_status
|
||||
|
||||
def get_queue_stats(self) -> Dict:
|
||||
"""
|
||||
Get current queue statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with queue statistics including:
|
||||
- total_queued: Number of tasks in queue
|
||||
- highest_wait_time: Longest wait time
|
||||
- avg_wait_time: Average wait time
|
||||
"""
|
||||
with self._lock:
|
||||
return self.queue_stats.copy()
|
||||
|
||||
def get_summary(self) -> Dict:
|
||||
"""
|
||||
Get a summary of all crawler statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- runtime: Total runtime in seconds
|
||||
- urls_total: Total URLs to process
|
||||
- urls_completed: Number of completed URLs
|
||||
- completion_percentage: Percentage complete
|
||||
- status_counts: Count of tasks in each status
|
||||
- memory_status: Current memory status
|
||||
- peak_memory_percent: Highest memory usage
|
||||
- peak_memory_time: When peak memory occurred
|
||||
- avg_task_duration: Average task processing time
|
||||
- estimated_completion_time: Projected finish time
|
||||
- requeue_rate: Percentage of tasks requeued
|
||||
"""
|
||||
with self._lock:
|
||||
# Calculate runtime
|
||||
current_time = time.time()
|
||||
runtime = current_time - (self.start_time or current_time)
|
||||
|
||||
# Calculate completion percentage
|
||||
completion_percentage = 0
|
||||
if self.urls_total > 0:
|
||||
completion_percentage = (self.urls_completed / self.urls_total) * 100
|
||||
|
||||
# Calculate average task duration for completed tasks
|
||||
completed_tasks = [
|
||||
task for task in self.stats.values()
|
||||
if task["status"] == CrawlStatus.COMPLETED.name and task.get("start_time") and task.get("end_time")
|
||||
]
|
||||
|
||||
avg_task_duration = 0
|
||||
if completed_tasks:
|
||||
total_duration = sum(task["end_time"] - task["start_time"] for task in completed_tasks)
|
||||
avg_task_duration = total_duration / len(completed_tasks)
|
||||
|
||||
# Calculate requeue rate
|
||||
requeue_rate = 0
|
||||
if len(self.stats) > 0:
|
||||
requeue_rate = (self.requeued_count / len(self.stats)) * 100
|
||||
|
||||
# Calculate estimated completion time
|
||||
estimated_completion_time = "N/A"
|
||||
if avg_task_duration > 0 and self.urls_total > 0 and self.urls_completed > 0:
|
||||
remaining_tasks = self.urls_total - self.urls_completed
|
||||
estimated_seconds = remaining_tasks * avg_task_duration
|
||||
estimated_completion_time = self._format_time(estimated_seconds)
|
||||
|
||||
return {
|
||||
"runtime": runtime,
|
||||
"urls_total": self.urls_total,
|
||||
"urls_completed": self.urls_completed,
|
||||
"completion_percentage": completion_percentage,
|
||||
"status_counts": self.status_counts.copy(),
|
||||
"memory_status": self.memory_status,
|
||||
"peak_memory_percent": self.peak_memory_percent,
|
||||
"peak_memory_time": self.peak_memory_time,
|
||||
"avg_task_duration": avg_task_duration,
|
||||
"estimated_completion_time": estimated_completion_time,
|
||||
"requeue_rate": requeue_rate,
|
||||
"requeued_count": self.requeued_count
|
||||
}
|
||||
|
||||
def render(self):
|
||||
"""
|
||||
Render the terminal UI.
|
||||
|
||||
This is the main UI rendering loop that:
|
||||
1. Updates all statistics
|
||||
2. Formats the display
|
||||
3. Renders the ASCII interface
|
||||
4. Handles keyboard input
|
||||
|
||||
Note: The actual rendering is handled by the TerminalUI class
|
||||
which uses the rich library's Live display.
|
||||
"""
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
# Force an update of the UI
|
||||
if hasattr(self.terminal_ui, '_update_display'):
|
||||
self.terminal_ui._update_display()
|
||||
|
||||
def _format_time(self, seconds: float) -> str:
|
||||
"""
|
||||
Format time in hours:minutes:seconds.
|
||||
|
||||
Args:
|
||||
seconds: Time in seconds
|
||||
|
||||
Returns:
|
||||
Formatted time string (e.g., "1:23:45")
|
||||
"""
|
||||
delta = timedelta(seconds=int(seconds))
|
||||
hours, remainder = divmod(delta.seconds, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
|
||||
if hours > 0:
|
||||
return f"{hours}:{minutes:02}:{seconds:02}"
|
||||
else:
|
||||
return f"{minutes}:{seconds:02}"
|
||||
|
||||
def _calculate_estimated_completion(self) -> str:
|
||||
"""
|
||||
Calculate estimated completion time based on current progress.
|
||||
|
||||
Returns:
|
||||
Formatted time string
|
||||
"""
|
||||
summary = self.get_summary()
|
||||
return summary.get("estimated_completion_time", "N/A")
|
||||
|
||||
|
||||
# Example code for testing
|
||||
if __name__ == "__main__":
|
||||
# Initialize the monitor
|
||||
monitor = CrawlerMonitor(urls_total=100)
|
||||
|
||||
# Start monitoring
|
||||
monitor.start()
|
||||
|
||||
try:
|
||||
# Simulate some tasks
|
||||
for i in range(20):
|
||||
task_id = str(uuid.uuid4())
|
||||
url = f"https://example.com/page{i}"
|
||||
monitor.add_task(task_id, url)
|
||||
|
||||
# Simulate 20% of tasks are already running
|
||||
if i < 4:
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=time.time() - 30, # Started 30 seconds ago
|
||||
memory_usage=10.5
|
||||
)
|
||||
|
||||
# Simulate 10% of tasks are completed
|
||||
if i >= 4 and i < 6:
|
||||
start_time = time.time() - 60
|
||||
end_time = time.time() - 15
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=8.2
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.COMPLETED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=15.7
|
||||
)
|
||||
|
||||
# Simulate 5% of tasks fail
|
||||
if i >= 6 and i < 7:
|
||||
start_time = time.time() - 45
|
||||
end_time = time.time() - 20
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=12.3
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=18.2,
|
||||
error_message="Connection timeout"
|
||||
)
|
||||
|
||||
# Simulate memory pressure
|
||||
monitor.update_memory_status("PRESSURE")
|
||||
|
||||
# Simulate queue statistics
|
||||
monitor.update_queue_statistics(
|
||||
total_queued=16, # 20 - 4 (in progress)
|
||||
highest_wait_time=120.5,
|
||||
avg_wait_time=60.2
|
||||
)
|
||||
|
||||
# Keep the monitor running for a demonstration
|
||||
print("Crawler Monitor is running. Press 'q' to exit.")
|
||||
while monitor.is_running:
|
||||
time.sleep(0.1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nExiting crawler monitor...")
|
||||
finally:
|
||||
# Stop the monitor
|
||||
monitor.stop()
|
||||
print("Crawler monitor exited successfully.")
|
||||
@@ -4,7 +4,8 @@ from dotenv import load_dotenv
|
||||
load_dotenv() # Load environment variables from .env file
|
||||
|
||||
# Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
DEFAULT_PROVIDER = "openai/gpt-4o-mini"
|
||||
DEFAULT_PROVIDER = "openai/gpt-4o"
|
||||
DEFAULT_PROVIDER_API_KEY = "OPENAI_API_KEY"
|
||||
MODEL_REPO_BRANCH = "new-release-0.0.2"
|
||||
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
PROVIDER_MODELS = {
|
||||
@@ -15,10 +16,26 @@ PROVIDER_MODELS = {
|
||||
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o3-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o3-mini-high": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini/gemini-pro": os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-1.5-pro': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash-exp': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
PROVIDER_MODELS_PREFIXES = {
|
||||
"ollama": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq": os.getenv("GROQ_API_KEY"),
|
||||
"openai": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini": os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
|
||||
# Chunk token threshold
|
||||
@@ -84,3 +101,46 @@ SHOW_DEPRECATION_WARNINGS = True
|
||||
SCREENSHOT_HEIGHT_TRESHOLD = 10000
|
||||
PAGE_TIMEOUT = 60000
|
||||
DOWNLOAD_PAGE_TIMEOUT = 60000
|
||||
|
||||
# Global user settings with descriptions and default values
|
||||
USER_SETTINGS = {
|
||||
"DEFAULT_LLM_PROVIDER": {
|
||||
"default": "openai/gpt-4o",
|
||||
"description": "Default LLM provider in 'company/model' format (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')",
|
||||
"type": "string"
|
||||
},
|
||||
"DEFAULT_LLM_PROVIDER_TOKEN": {
|
||||
"default": "",
|
||||
"description": "API token for the default LLM provider",
|
||||
"type": "string",
|
||||
"secret": True
|
||||
},
|
||||
"VERBOSE": {
|
||||
"default": False,
|
||||
"description": "Enable verbose output for all commands",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_HEADLESS": {
|
||||
"default": True,
|
||||
"description": "Run browser in headless mode by default",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_TYPE": {
|
||||
"default": "chromium",
|
||||
"description": "Default browser type (chromium or firefox)",
|
||||
"type": "string",
|
||||
"options": ["chromium", "firefox"]
|
||||
},
|
||||
"CACHE_MODE": {
|
||||
"default": "bypass",
|
||||
"description": "Default cache mode (bypass, use, or refresh)",
|
||||
"type": "string",
|
||||
"options": ["bypass", "use", "refresh"]
|
||||
},
|
||||
"USER_AGENT_MODE": {
|
||||
"default": "default",
|
||||
"description": "Default user agent mode (default, random, or mobile)",
|
||||
"type": "string",
|
||||
"options": ["default", "random", "mobile"]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import inspect
|
||||
import re
|
||||
import time
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
@@ -5,25 +6,47 @@ from typing import List, Tuple, Dict, Optional
|
||||
from rank_bm25 import BM25Okapi
|
||||
from collections import deque
|
||||
from bs4 import NavigableString, Comment
|
||||
from .utils import clean_tokens, perform_completion_with_backoff, escape_json_string, sanitize_html, get_home_folder, extract_xml_data
|
||||
|
||||
from .utils import (
|
||||
clean_tokens,
|
||||
perform_completion_with_backoff,
|
||||
escape_json_string,
|
||||
sanitize_html,
|
||||
get_home_folder,
|
||||
extract_xml_data,
|
||||
merge_chunks,
|
||||
)
|
||||
from .types import LLMConfig
|
||||
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE
|
||||
from abc import ABC, abstractmethod
|
||||
import math
|
||||
from snowballstemmer import stemmer
|
||||
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE
|
||||
from .models import TokenUsage
|
||||
from .prompts import PROMPT_FILTER_CONTENT
|
||||
import os
|
||||
import json
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from colorama import Fore, Style, init
|
||||
from colorama import Fore, Style
|
||||
|
||||
|
||||
class RelevantContentFilter(ABC):
|
||||
"""Abstract base class for content filtering strategies"""
|
||||
|
||||
def __init__(self, user_query: str = None):
|
||||
def __init__(
|
||||
self,
|
||||
user_query: str = None,
|
||||
verbose: bool = False,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
):
|
||||
"""
|
||||
Initializes the RelevantContentFilter class with optional user query.
|
||||
|
||||
Args:
|
||||
user_query (str): User query for filtering (optional).
|
||||
verbose (bool): Enable verbose logging (default: False).
|
||||
"""
|
||||
self.user_query = user_query
|
||||
self.included_tags = {
|
||||
# Primary structure
|
||||
@@ -92,6 +115,8 @@ class RelevantContentFilter(ABC):
|
||||
r"nav|footer|header|sidebar|ads|comment|promo|advert|social|share", re.I
|
||||
)
|
||||
self.min_word_count = 2
|
||||
self.verbose = False
|
||||
self.logger = logger
|
||||
|
||||
@abstractmethod
|
||||
def filter_content(self, html: str) -> List[str]:
|
||||
@@ -353,6 +378,7 @@ class RelevantContentFilter(ABC):
|
||||
except Exception:
|
||||
return str(tag) # Fallback to original if anything fails
|
||||
|
||||
|
||||
class BM25ContentFilter(RelevantContentFilter):
|
||||
"""
|
||||
Content filtering using BM25 algorithm with priority tag handling.
|
||||
@@ -495,6 +521,7 @@ class BM25ContentFilter(RelevantContentFilter):
|
||||
|
||||
return [self.clean_element(tag) for _, _, tag in selected_candidates]
|
||||
|
||||
|
||||
class PruningContentFilter(RelevantContentFilter):
|
||||
"""
|
||||
Content filtering using pruning algorithm with dynamic threshold.
|
||||
@@ -741,110 +768,131 @@ class PruningContentFilter(RelevantContentFilter):
|
||||
class_id_score -= 0.5
|
||||
return class_id_score
|
||||
|
||||
|
||||
class LLMContentFilter(RelevantContentFilter):
|
||||
"""Content filtering using LLMs to generate relevant markdown."""
|
||||
"""Content filtering using LLMs to generate relevant markdown.
|
||||
|
||||
How it works:
|
||||
1. Extracts page metadata with fallbacks.
|
||||
2. Extracts text chunks from the body element.
|
||||
3. Applies LLMs to generate markdown for each chunk.
|
||||
4. Filters out chunks below the threshold.
|
||||
5. Sorts chunks by score in descending order.
|
||||
6. Returns the top N chunks.
|
||||
|
||||
Attributes:
|
||||
llm_config (LLMConfig): LLM configuration object.
|
||||
instruction (str): Instruction for LLM markdown generation
|
||||
chunk_token_threshold (int): Chunk token threshold for splitting (default: 1e9).
|
||||
overlap_rate (float): Overlap rate for chunking (default: 0.5).
|
||||
word_token_rate (float): Word token rate for chunking (default: 0.2).
|
||||
verbose (bool): Enable verbose logging (default: False).
|
||||
logger (AsyncLogger): Custom logger for LLM operations (optional).
|
||||
"""
|
||||
_UNWANTED_PROPS = {
|
||||
'provider' : 'Instead, use llm_config=LLMConfig(provider="...")',
|
||||
'api_token' : 'Instead, use llm_config=LlMConfig(api_token="...")',
|
||||
'base_url' : 'Instead, use llm_config=LLMConfig(base_url="...")',
|
||||
'api_base' : 'Instead, use llm_config=LLMConfig(base_url="...")',
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
llm_config: "LLMConfig" = None,
|
||||
instruction: str = None,
|
||||
chunk_token_threshold: int = int(1e9),
|
||||
overlap_rate: float = OVERLAP_RATE,
|
||||
word_token_rate: float = WORD_TOKEN_RATE,
|
||||
# char_token_rate: float = WORD_TOKEN_RATE * 5,
|
||||
# chunk_mode: str = "char",
|
||||
verbose: bool = False,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
ignore_cache: bool = True,
|
||||
# Deprecated properties
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
extra_args: Dict = None,
|
||||
verbose: bool = False,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
):
|
||||
super().__init__(None)
|
||||
self.provider = provider
|
||||
self.api_token = (
|
||||
api_token
|
||||
or PROVIDER_MODELS.get(provider, "no-token")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
)
|
||||
self.api_token = api_token
|
||||
self.base_url = base_url or api_base
|
||||
self.llm_config = llm_config
|
||||
self.instruction = instruction
|
||||
self.chunk_token_threshold = chunk_token_threshold
|
||||
self.overlap_rate = overlap_rate
|
||||
self.word_token_rate = word_token_rate
|
||||
self.base_url = base_url
|
||||
self.api_base = api_base or base_url
|
||||
self.word_token_rate = word_token_rate or WORD_TOKEN_RATE
|
||||
# self.chunk_mode: str = chunk_mode
|
||||
# self.char_token_rate = char_token_rate or word_token_rate / 5
|
||||
# self.token_rate = word_token_rate if chunk_mode == "word" else self.char_token_rate
|
||||
self.token_rate = word_token_rate or WORD_TOKEN_RATE
|
||||
self.extra_args = extra_args or {}
|
||||
self.ignore_cache = ignore_cache
|
||||
self.verbose = verbose
|
||||
|
||||
|
||||
# Setup logger with custom styling for LLM operations
|
||||
if logger:
|
||||
self.logger = logger
|
||||
elif verbose:
|
||||
self.logger = AsyncLogger(
|
||||
verbose=True,
|
||||
verbose=verbose,
|
||||
icons={
|
||||
**AsyncLogger.DEFAULT_ICONS,
|
||||
"LLM": "★", # Star for LLM operations
|
||||
"CHUNK": "◈", # Diamond for chunks
|
||||
"CACHE": "⚡", # Lightning for cache operations
|
||||
"CACHE": "⚡", # Lightning for cache operations
|
||||
},
|
||||
colors={
|
||||
**AsyncLogger.DEFAULT_COLORS,
|
||||
LogLevel.INFO: Fore.MAGENTA + Style.DIM, # Dimmed purple for LLM ops
|
||||
}
|
||||
LogLevel.INFO: Fore.MAGENTA
|
||||
+ Style.DIM, # Dimmed purple for LLM ops
|
||||
},
|
||||
)
|
||||
else:
|
||||
self.logger = None
|
||||
|
||||
|
||||
self.usages = []
|
||||
self.total_usage = TokenUsage()
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
"""Handle attribute setting."""
|
||||
# TODO: Planning to set properties dynamically based on the __init__ signature
|
||||
sig = inspect.signature(self.__init__)
|
||||
all_params = sig.parameters # Dictionary of parameter names and their details
|
||||
|
||||
if name in self._UNWANTED_PROPS and value is not all_params[name].default:
|
||||
raise AttributeError(f"Setting '{name}' is deprecated. {self._UNWANTED_PROPS[name]}")
|
||||
|
||||
super().__setattr__(name, value)
|
||||
|
||||
def _get_cache_key(self, html: str, instruction: str) -> str:
|
||||
"""Generate a unique cache key based on HTML and instruction"""
|
||||
content = f"{html}{instruction}"
|
||||
return hashlib.md5(content.encode()).hexdigest()
|
||||
|
||||
def _merge_chunks(self, text: str) -> List[str]:
|
||||
"""Split text into chunks with overlap"""
|
||||
# Calculate tokens and sections
|
||||
total_tokens = len(text.split()) * self.word_token_rate
|
||||
num_sections = max(1, math.floor(total_tokens / self.chunk_token_threshold))
|
||||
adjusted_chunk_threshold = total_tokens / num_sections
|
||||
"""Split text into chunks with overlap using char or word mode."""
|
||||
ov = int(self.chunk_token_threshold * self.overlap_rate)
|
||||
sections = merge_chunks(
|
||||
docs=[text],
|
||||
target_size=self.chunk_token_threshold,
|
||||
overlap=ov,
|
||||
word_token_ratio=self.word_token_rate,
|
||||
)
|
||||
return sections
|
||||
|
||||
# Split into words
|
||||
words = text.split()
|
||||
chunks = []
|
||||
current_chunk = []
|
||||
current_token_count = 0
|
||||
|
||||
for word in words:
|
||||
word_tokens = len(word) * self.word_token_rate
|
||||
if current_token_count + word_tokens <= adjusted_chunk_threshold:
|
||||
current_chunk.append(word)
|
||||
current_token_count += word_tokens
|
||||
else:
|
||||
# Add overlap if not the last chunk
|
||||
if chunks and self.overlap_rate > 0:
|
||||
overlap_size = int(len(current_chunk) * self.overlap_rate)
|
||||
current_chunk.extend(current_chunk[-overlap_size:])
|
||||
|
||||
chunks.append(" ".join(current_chunk))
|
||||
current_chunk = [word]
|
||||
current_token_count = word_tokens
|
||||
|
||||
if current_chunk:
|
||||
chunks.append(" ".join(current_chunk))
|
||||
|
||||
return chunks
|
||||
|
||||
def filter_content(self, html: str, ignore_cache: bool = False) -> List[str]:
|
||||
def filter_content(self, html: str, ignore_cache: bool = True) -> List[str]:
|
||||
if not html or not isinstance(html, str):
|
||||
return []
|
||||
|
||||
if self.logger:
|
||||
self.logger.info(
|
||||
"Starting LLM content filtering process",
|
||||
"Starting LLM markdown content filtering process",
|
||||
tag="LLM",
|
||||
params={"provider": self.provider},
|
||||
colors={"provider": Fore.CYAN}
|
||||
params={"provider": self.llm_config.provider},
|
||||
colors={"provider": Fore.CYAN},
|
||||
)
|
||||
|
||||
# Cache handling
|
||||
@@ -853,65 +901,88 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
cache_key = self._get_cache_key(html, self.instruction or "")
|
||||
cache_file = cache_dir / f"{cache_key}.json"
|
||||
|
||||
# if ignore_cache == None:
|
||||
ignore_cache = self.ignore_cache
|
||||
|
||||
if not ignore_cache and cache_file.exists():
|
||||
if self.logger:
|
||||
self.logger.info("Found cached result", tag="CACHE")
|
||||
self.logger.info("Found cached markdown result", tag="CACHE")
|
||||
try:
|
||||
with cache_file.open('r') as f:
|
||||
with cache_file.open("r") as f:
|
||||
cached_data = json.load(f)
|
||||
usage = TokenUsage(**cached_data['usage'])
|
||||
usage = TokenUsage(**cached_data["usage"])
|
||||
self.usages.append(usage)
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
return cached_data['blocks']
|
||||
return cached_data["blocks"]
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.error(f"Cache read error: {str(e)}", tag="CACHE")
|
||||
self.logger.error(
|
||||
f"LLM markdown: Cache read error: {str(e)}", tag="CACHE"
|
||||
)
|
||||
|
||||
# Split into chunks
|
||||
html_chunks = self._merge_chunks(html)
|
||||
if self.logger:
|
||||
self.logger.info(
|
||||
"Split content into {chunk_count} chunks",
|
||||
"LLM markdown: Split content into {chunk_count} chunks",
|
||||
tag="CHUNK",
|
||||
params={"chunk_count": len(html_chunks)},
|
||||
colors={"chunk_count": Fore.YELLOW}
|
||||
colors={"chunk_count": Fore.YELLOW},
|
||||
)
|
||||
|
||||
extracted_content = []
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
# Process chunks in parallel
|
||||
with ThreadPoolExecutor(max_workers=4) as executor:
|
||||
futures = []
|
||||
for i, chunk in enumerate(html_chunks):
|
||||
if self.logger:
|
||||
self.logger.debug(
|
||||
"Processing chunk {chunk_num}/{total_chunks}",
|
||||
"LLM markdown: Processing chunk {chunk_num}/{total_chunks}",
|
||||
tag="CHUNK",
|
||||
params={
|
||||
"chunk_num": i + 1,
|
||||
"total_chunks": len(html_chunks)
|
||||
}
|
||||
params={"chunk_num": i + 1, "total_chunks": len(html_chunks)},
|
||||
)
|
||||
|
||||
prompt_variables = {
|
||||
"HTML": escape_json_string(sanitize_html(chunk)),
|
||||
"REQUEST": self.instruction or "Convert this HTML into clean, relevant markdown, removing any noise or irrelevant content."
|
||||
"REQUEST": self.instruction
|
||||
or "Convert this HTML into clean, relevant markdown, removing any noise or irrelevant content.",
|
||||
}
|
||||
|
||||
prompt = PROMPT_FILTER_CONTENT
|
||||
for var, value in prompt_variables.items():
|
||||
prompt = prompt.replace("{" + var + "}", value)
|
||||
|
||||
def _proceed_with_chunk(
|
||||
provider: str,
|
||||
prompt: str,
|
||||
api_token: str,
|
||||
base_url: Optional[str] = None,
|
||||
extra_args: Dict = {},
|
||||
) -> List[str]:
|
||||
if self.logger:
|
||||
self.logger.info(
|
||||
"LLM Markdown: Processing chunk {chunk_num}",
|
||||
tag="CHUNK",
|
||||
params={"chunk_num": i + 1},
|
||||
)
|
||||
return perform_completion_with_backoff(
|
||||
provider,
|
||||
prompt,
|
||||
api_token,
|
||||
base_url=base_url,
|
||||
extra_args=extra_args,
|
||||
)
|
||||
|
||||
future = executor.submit(
|
||||
perform_completion_with_backoff,
|
||||
self.provider,
|
||||
_proceed_with_chunk,
|
||||
self.llm_config.provider,
|
||||
prompt,
|
||||
self.api_token,
|
||||
base_url=self.api_base,
|
||||
extra_args=self.extra_args
|
||||
self.llm_config.api_token,
|
||||
self.llm_config.base_url,
|
||||
self.extra_args,
|
||||
)
|
||||
futures.append((i, future))
|
||||
|
||||
@@ -920,59 +991,61 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
for i, future in sorted(futures):
|
||||
try:
|
||||
response = future.result()
|
||||
|
||||
|
||||
# Track usage
|
||||
usage = TokenUsage(
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
total_tokens=response.usage.total_tokens,
|
||||
completion_tokens_details=response.usage.completion_tokens_details.__dict__
|
||||
if response.usage.completion_tokens_details else {},
|
||||
prompt_tokens_details=response.usage.prompt_tokens_details.__dict__
|
||||
if response.usage.prompt_tokens_details else {},
|
||||
completion_tokens_details=(
|
||||
response.usage.completion_tokens_details.__dict__
|
||||
if response.usage.completion_tokens_details
|
||||
else {}
|
||||
),
|
||||
prompt_tokens_details=(
|
||||
response.usage.prompt_tokens_details.__dict__
|
||||
if response.usage.prompt_tokens_details
|
||||
else {}
|
||||
),
|
||||
)
|
||||
self.usages.append(usage)
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
|
||||
blocks = extract_xml_data(["content"], response.choices[0].message.content)["content"]
|
||||
blocks = extract_xml_data(
|
||||
["content"], response.choices[0].message.content
|
||||
)["content"]
|
||||
if blocks:
|
||||
ordered_results.append(blocks)
|
||||
if self.logger:
|
||||
self.logger.success(
|
||||
"Successfully processed chunk {chunk_num}",
|
||||
"LLM markdown: Successfully processed chunk {chunk_num}",
|
||||
tag="CHUNK",
|
||||
params={"chunk_num": i + 1}
|
||||
params={"chunk_num": i + 1},
|
||||
)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.error(
|
||||
"Error processing chunk {chunk_num}: {error}",
|
||||
"LLM markdown: Error processing chunk {chunk_num}: {error}",
|
||||
tag="CHUNK",
|
||||
params={
|
||||
"chunk_num": i + 1,
|
||||
"error": str(e)
|
||||
}
|
||||
params={"chunk_num": i + 1, "error": str(e)},
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
if self.logger:
|
||||
self.logger.success(
|
||||
"Completed processing in {time:.2f}s",
|
||||
"LLM markdown: Completed processing in {time:.2f}s",
|
||||
tag="LLM",
|
||||
params={"time": end_time - start_time},
|
||||
colors={"time": Fore.YELLOW}
|
||||
colors={"time": Fore.YELLOW},
|
||||
)
|
||||
|
||||
result = ordered_results if ordered_results else []
|
||||
|
||||
# Cache the final result
|
||||
cache_data = {
|
||||
'blocks': result,
|
||||
'usage': self.total_usage.__dict__
|
||||
}
|
||||
with cache_file.open('w') as f:
|
||||
cache_data = {"blocks": result, "usage": self.total_usage.__dict__}
|
||||
with cache_file.open("w") as f:
|
||||
json.dump(cache_data, f)
|
||||
if self.logger:
|
||||
self.logger.info("Cached results for future use", tag="CACHE")
|
||||
@@ -996,4 +1069,4 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
print(
|
||||
f"{i:<10} {usage.completion_tokens:>12,} "
|
||||
f"{usage.prompt_tokens:>12,} {usage.total_tokens:>12,}"
|
||||
)
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ from lxml import etree
|
||||
from lxml import html as lhtml
|
||||
from typing import List
|
||||
from .models import ScrapingResult, MediaItem, Link, Media, Links
|
||||
import copy
|
||||
|
||||
# Pre-compile regular expressions for Open Graph and Twitter metadata
|
||||
OG_REGEX = re.compile(r"^og:")
|
||||
@@ -48,7 +49,7 @@ def parse_srcset(s: str) -> List[Dict]:
|
||||
if len(parts) >= 1:
|
||||
url = parts[0]
|
||||
width = (
|
||||
parts[1].rstrip("w")
|
||||
parts[1].rstrip("w").split('.')[0]
|
||||
if len(parts) > 1 and parts[1].endswith("w")
|
||||
else None
|
||||
)
|
||||
@@ -128,7 +129,8 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
Returns:
|
||||
ScrapingResult: A structured result containing the scraped content.
|
||||
"""
|
||||
raw_result = self._scrap(url, html, is_async=False, **kwargs)
|
||||
actual_url = kwargs.get("redirected_url", url)
|
||||
raw_result = self._scrap(actual_url, html, is_async=False, **kwargs)
|
||||
if raw_result is None:
|
||||
return ScrapingResult(
|
||||
cleaned_html="",
|
||||
@@ -155,6 +157,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
for aud in raw_result.get("media", {}).get("audios", [])
|
||||
if aud
|
||||
],
|
||||
tables=raw_result.get("media", {}).get("tables", [])
|
||||
)
|
||||
|
||||
# Convert links
|
||||
@@ -193,6 +196,153 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
"""
|
||||
return await asyncio.to_thread(self._scrap, url, html, **kwargs)
|
||||
|
||||
def is_data_table(self, table: Tag, **kwargs) -> bool:
|
||||
"""
|
||||
Determine if a table element is a data table (not a layout table).
|
||||
|
||||
Args:
|
||||
table (Tag): BeautifulSoup Tag representing a table element
|
||||
**kwargs: Additional keyword arguments including table_score_threshold
|
||||
|
||||
Returns:
|
||||
bool: True if the table is a data table, False otherwise
|
||||
"""
|
||||
score = 0
|
||||
|
||||
# Check for thead and tbody
|
||||
has_thead = len(table.select('thead')) > 0
|
||||
has_tbody = len(table.select('tbody')) > 0
|
||||
if has_thead:
|
||||
score += 2
|
||||
if has_tbody:
|
||||
score += 1
|
||||
|
||||
# Check for th elements
|
||||
th_count = len(table.select('th'))
|
||||
if th_count > 0:
|
||||
score += 2
|
||||
if has_thead or len(table.select('tr:first-child th')) > 0:
|
||||
score += 1
|
||||
|
||||
# Check for nested tables
|
||||
if len(table.select('table')) > 0:
|
||||
score -= 3
|
||||
|
||||
# Role attribute check
|
||||
role = table.get('role', '').lower()
|
||||
if role in {'presentation', 'none'}:
|
||||
score -= 3
|
||||
|
||||
# Column consistency
|
||||
rows = table.select('tr')
|
||||
if not rows:
|
||||
return False
|
||||
|
||||
col_counts = [len(row.select('td, th')) for row in rows]
|
||||
avg_cols = sum(col_counts) / len(col_counts)
|
||||
variance = sum((c - avg_cols)**2 for c in col_counts) / len(col_counts)
|
||||
if variance < 1:
|
||||
score += 2
|
||||
|
||||
# Caption and summary
|
||||
if table.select('caption'):
|
||||
score += 2
|
||||
if table.has_attr('summary') and table['summary']:
|
||||
score += 1
|
||||
|
||||
# Text density
|
||||
total_text = sum(len(cell.get_text().strip()) for row in rows for cell in row.select('td, th'))
|
||||
total_tags = sum(1 for _ in table.descendants if isinstance(_, Tag))
|
||||
text_ratio = total_text / (total_tags + 1e-5)
|
||||
if text_ratio > 20:
|
||||
score += 3
|
||||
elif text_ratio > 10:
|
||||
score += 2
|
||||
|
||||
# Data attributes
|
||||
data_attrs = sum(1 for attr in table.attrs if attr.startswith('data-'))
|
||||
score += data_attrs * 0.5
|
||||
|
||||
# Size check
|
||||
if avg_cols >= 2 and len(rows) >= 2:
|
||||
score += 2
|
||||
|
||||
threshold = kwargs.get('table_score_threshold', 7)
|
||||
return score >= threshold
|
||||
|
||||
def extract_table_data(self, table: Tag) -> dict:
|
||||
"""
|
||||
Extract structured data from a table element.
|
||||
|
||||
Args:
|
||||
table (Tag): BeautifulSoup Tag representing a table element
|
||||
|
||||
Returns:
|
||||
dict: Dictionary containing table data (headers, rows, caption, summary)
|
||||
"""
|
||||
caption_elem = table.select_one('caption')
|
||||
caption = caption_elem.get_text().strip() if caption_elem else ""
|
||||
summary = table.get('summary', '').strip()
|
||||
|
||||
# Extract headers with colspan handling
|
||||
headers = []
|
||||
thead_rows = table.select('thead tr')
|
||||
if thead_rows:
|
||||
header_cells = thead_rows[0].select('th')
|
||||
for cell in header_cells:
|
||||
text = cell.get_text().strip()
|
||||
colspan = int(cell.get('colspan', 1))
|
||||
headers.extend([text] * colspan)
|
||||
else:
|
||||
first_row = table.select('tr:first-child')
|
||||
if first_row:
|
||||
for cell in first_row[0].select('th, td'):
|
||||
text = cell.get_text().strip()
|
||||
colspan = int(cell.get('colspan', 1))
|
||||
headers.extend([text] * colspan)
|
||||
|
||||
# Extract rows with colspan handling
|
||||
rows = []
|
||||
all_rows = table.select('tr')
|
||||
thead = table.select_one('thead')
|
||||
tbody_rows = []
|
||||
|
||||
if thead:
|
||||
thead_rows = thead.select('tr')
|
||||
tbody_rows = [row for row in all_rows if row not in thead_rows]
|
||||
else:
|
||||
if all_rows and all_rows[0].select('th'):
|
||||
tbody_rows = all_rows[1:]
|
||||
else:
|
||||
tbody_rows = all_rows
|
||||
|
||||
for row in tbody_rows:
|
||||
# for row in table.select('tr:not(:has(ancestor::thead))'):
|
||||
row_data = []
|
||||
for cell in row.select('td'):
|
||||
text = cell.get_text().strip()
|
||||
colspan = int(cell.get('colspan', 1))
|
||||
row_data.extend([text] * colspan)
|
||||
if row_data:
|
||||
rows.append(row_data)
|
||||
|
||||
# Align rows with headers
|
||||
max_columns = len(headers) if headers else (max(len(row) for row in rows) if rows else 0)
|
||||
aligned_rows = []
|
||||
for row in rows:
|
||||
aligned = row[:max_columns] + [''] * (max_columns - len(row))
|
||||
aligned_rows.append(aligned)
|
||||
|
||||
if not headers:
|
||||
headers = [f"Column {i+1}" for i in range(max_columns)]
|
||||
|
||||
return {
|
||||
"headers": headers,
|
||||
"rows": aligned_rows,
|
||||
"caption": caption,
|
||||
"summary": summary,
|
||||
}
|
||||
|
||||
def flatten_nested_elements(self, node):
|
||||
"""
|
||||
Flatten nested elements in a HTML tree.
|
||||
@@ -431,7 +581,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
Returns:
|
||||
dict: A dictionary containing the processed element information.
|
||||
"""
|
||||
media = {"images": [], "videos": [], "audios": []}
|
||||
media = {"images": [], "videos": [], "audios": [], "tables": []}
|
||||
internal_links_dict = {}
|
||||
external_links_dict = {}
|
||||
self._process_element(
|
||||
@@ -471,6 +621,9 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
return False
|
||||
|
||||
keep_element = False
|
||||
# Special case for table elements - always preserve structure
|
||||
if element.name in ["tr", "td", "th"]:
|
||||
keep_element = True
|
||||
|
||||
exclude_domains = kwargs.get("exclude_domains", [])
|
||||
# exclude_social_media_domains = kwargs.get('exclude_social_media_domains', set(SOCIAL_MEDIA_DOMAINS))
|
||||
@@ -529,6 +682,9 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
if normalized_href not in external_links_dict:
|
||||
external_links_dict[normalized_href] = link_data
|
||||
else:
|
||||
if kwargs.get("exclude_internal_links", False):
|
||||
element.decompose()
|
||||
return False
|
||||
if normalized_href not in internal_links_dict:
|
||||
internal_links_dict[normalized_href] = link_data
|
||||
|
||||
@@ -629,7 +785,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
|
||||
try:
|
||||
self.remove_unwanted_attributes(
|
||||
element, IMPORTANT_ATTRS, kwargs.get("keep_data_attributes", False)
|
||||
element, IMPORTANT_ATTRS + kwargs.get("keep_attrs", []) , kwargs.get("keep_data_attributes", False)
|
||||
)
|
||||
except Exception as e:
|
||||
# print('Error removing unwanted attributes:', str(e))
|
||||
@@ -685,6 +841,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
html: str,
|
||||
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
||||
css_selector: str = None,
|
||||
target_elements: List[str] = None,
|
||||
**kwargs,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
@@ -707,7 +864,15 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
parser_type = kwargs.get("parser", "lxml")
|
||||
soup = BeautifulSoup(html, parser_type)
|
||||
body = soup.body
|
||||
if body is None:
|
||||
raise Exception("'<body>' tag is not found in fetched html. Consider adding wait_for=\"css:body\" to wait for body tag to be loaded into DOM.")
|
||||
base_domain = get_base_domain(url)
|
||||
|
||||
# Early removal of all images if exclude_all_images is set
|
||||
# This happens before any processing to minimize memory usage
|
||||
if kwargs.get("exclude_all_images", False):
|
||||
for img in body.find_all('img'):
|
||||
img.decompose()
|
||||
|
||||
try:
|
||||
meta = extract_metadata("", soup)
|
||||
@@ -739,22 +904,20 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
for element in body.select(excluded_selector):
|
||||
element.extract()
|
||||
|
||||
if css_selector:
|
||||
selected_elements = body.select(css_selector)
|
||||
if not selected_elements:
|
||||
return {
|
||||
"markdown": "",
|
||||
"cleaned_html": "",
|
||||
"success": True,
|
||||
"media": {"images": [], "videos": [], "audios": []},
|
||||
"links": {"internal": [], "external": []},
|
||||
"metadata": {},
|
||||
"message": f"No elements found for CSS selector: {css_selector}",
|
||||
}
|
||||
# raise InvalidCSSSelectorError(f"Invalid CSS selector, No elements found for CSS selector: {css_selector}")
|
||||
body = soup.new_tag("div")
|
||||
for el in selected_elements:
|
||||
body.append(el)
|
||||
content_element = None
|
||||
if target_elements:
|
||||
try:
|
||||
for_content_targeted_element = []
|
||||
for target_element in target_elements:
|
||||
for_content_targeted_element.extend(body.select(target_element))
|
||||
content_element = soup.new_tag("div")
|
||||
for el in for_content_targeted_element:
|
||||
content_element.append(copy.deepcopy(el))
|
||||
except Exception as e:
|
||||
self._log("error", f"Error with target element detection: {str(e)}", "SCRAPE")
|
||||
return None
|
||||
else:
|
||||
content_element = body
|
||||
|
||||
kwargs["exclude_social_media_domains"] = set(
|
||||
kwargs.get("exclude_social_media_domains", []) + SOCIAL_MEDIA_DOMAINS
|
||||
@@ -794,6 +957,15 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
if result is not None
|
||||
for img in result
|
||||
]
|
||||
|
||||
# Process tables if not excluded
|
||||
excluded_tags = set(kwargs.get("excluded_tags", []) or [])
|
||||
if 'table' not in excluded_tags:
|
||||
tables = body.find_all('table')
|
||||
for table in tables:
|
||||
if self.is_data_table(table, **kwargs):
|
||||
table_data = self.extract_table_data(table)
|
||||
media["tables"].append(table_data)
|
||||
|
||||
body = self.flatten_nested_elements(body)
|
||||
base64_pattern = re.compile(r'data:image/[^;]+;base64,([^"]+)')
|
||||
@@ -805,7 +977,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
|
||||
str_body = ""
|
||||
try:
|
||||
str_body = body.encode_contents().decode("utf-8")
|
||||
str_body = content_element.encode_contents().decode("utf-8")
|
||||
except Exception:
|
||||
# Reset body to the original HTML
|
||||
success = False
|
||||
@@ -844,7 +1016,6 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
cleaned_html = str_body.replace("\n\n", "\n").replace(" ", " ")
|
||||
|
||||
return {
|
||||
# **markdown_content,
|
||||
"cleaned_html": cleaned_html,
|
||||
"success": success,
|
||||
"media": media,
|
||||
@@ -1127,6 +1298,9 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
"source",
|
||||
"track",
|
||||
"wbr",
|
||||
"tr",
|
||||
"td",
|
||||
"th",
|
||||
}
|
||||
|
||||
for el in reversed(list(root.iterdescendants())):
|
||||
@@ -1184,12 +1358,125 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
|
||||
return root
|
||||
|
||||
def is_data_table(self, table: etree.Element, **kwargs) -> bool:
|
||||
score = 0
|
||||
# Check for thead and tbody
|
||||
has_thead = len(table.xpath(".//thead")) > 0
|
||||
has_tbody = len(table.xpath(".//tbody")) > 0
|
||||
if has_thead:
|
||||
score += 2
|
||||
if has_tbody:
|
||||
score += 1
|
||||
|
||||
# Check for th elements
|
||||
th_count = len(table.xpath(".//th"))
|
||||
if th_count > 0:
|
||||
score += 2
|
||||
if has_thead or table.xpath(".//tr[1]/th"):
|
||||
score += 1
|
||||
|
||||
# Check for nested tables
|
||||
if len(table.xpath(".//table")) > 0:
|
||||
score -= 3
|
||||
|
||||
# Role attribute check
|
||||
role = table.get("role", "").lower()
|
||||
if role in {"presentation", "none"}:
|
||||
score -= 3
|
||||
|
||||
# Column consistency
|
||||
rows = table.xpath(".//tr")
|
||||
if not rows:
|
||||
return False
|
||||
col_counts = [len(row.xpath(".//td|.//th")) for row in rows]
|
||||
avg_cols = sum(col_counts) / len(col_counts)
|
||||
variance = sum((c - avg_cols)**2 for c in col_counts) / len(col_counts)
|
||||
if variance < 1:
|
||||
score += 2
|
||||
|
||||
# Caption and summary
|
||||
if table.xpath(".//caption"):
|
||||
score += 2
|
||||
if table.get("summary"):
|
||||
score += 1
|
||||
|
||||
# Text density
|
||||
total_text = sum(len(''.join(cell.itertext()).strip()) for row in rows for cell in row.xpath(".//td|.//th"))
|
||||
total_tags = sum(1 for _ in table.iterdescendants())
|
||||
text_ratio = total_text / (total_tags + 1e-5)
|
||||
if text_ratio > 20:
|
||||
score += 3
|
||||
elif text_ratio > 10:
|
||||
score += 2
|
||||
|
||||
# Data attributes
|
||||
data_attrs = sum(1 for attr in table.attrib if attr.startswith('data-'))
|
||||
score += data_attrs * 0.5
|
||||
|
||||
# Size check
|
||||
if avg_cols >= 2 and len(rows) >= 2:
|
||||
score += 2
|
||||
|
||||
threshold = kwargs.get("table_score_threshold", 7)
|
||||
return score >= threshold
|
||||
|
||||
def extract_table_data(self, table: etree.Element) -> dict:
|
||||
caption = table.xpath(".//caption/text()")
|
||||
caption = caption[0].strip() if caption else ""
|
||||
summary = table.get("summary", "").strip()
|
||||
|
||||
# Extract headers with colspan handling
|
||||
headers = []
|
||||
thead_rows = table.xpath(".//thead/tr")
|
||||
if thead_rows:
|
||||
header_cells = thead_rows[0].xpath(".//th")
|
||||
for cell in header_cells:
|
||||
text = cell.text_content().strip()
|
||||
colspan = int(cell.get("colspan", 1))
|
||||
headers.extend([text] * colspan)
|
||||
else:
|
||||
first_row = table.xpath(".//tr[1]")
|
||||
if first_row:
|
||||
for cell in first_row[0].xpath(".//th|.//td"):
|
||||
text = cell.text_content().strip()
|
||||
colspan = int(cell.get("colspan", 1))
|
||||
headers.extend([text] * colspan)
|
||||
|
||||
# Extract rows with colspan handling
|
||||
rows = []
|
||||
for row in table.xpath(".//tr[not(ancestor::thead)]"):
|
||||
row_data = []
|
||||
for cell in row.xpath(".//td"):
|
||||
text = cell.text_content().strip()
|
||||
colspan = int(cell.get("colspan", 1))
|
||||
row_data.extend([text] * colspan)
|
||||
if row_data:
|
||||
rows.append(row_data)
|
||||
|
||||
# Align rows with headers
|
||||
max_columns = len(headers) if headers else (max(len(row) for row in rows) if rows else 0)
|
||||
aligned_rows = []
|
||||
for row in rows:
|
||||
aligned = row[:max_columns] + [''] * (max_columns - len(row))
|
||||
aligned_rows.append(aligned)
|
||||
|
||||
if not headers:
|
||||
headers = [f"Column {i+1}" for i in range(max_columns)]
|
||||
|
||||
return {
|
||||
"headers": headers,
|
||||
"rows": aligned_rows,
|
||||
"caption": caption,
|
||||
"summary": summary,
|
||||
}
|
||||
|
||||
def _scrap(
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
||||
css_selector: str = None,
|
||||
target_elements: List[str] = None,
|
||||
**kwargs,
|
||||
) -> Dict[str, Any]:
|
||||
if not html:
|
||||
@@ -1203,6 +1490,13 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
body = doc
|
||||
|
||||
base_domain = get_base_domain(url)
|
||||
|
||||
# Early removal of all images if exclude_all_images is set
|
||||
# This is more efficient in lxml as we remove elements before any processing
|
||||
if kwargs.get("exclude_all_images", False):
|
||||
for img in body.xpath('//img'):
|
||||
if img.getparent() is not None:
|
||||
img.getparent().remove(img)
|
||||
|
||||
# Add comment removal
|
||||
if kwargs.get("remove_comments", False):
|
||||
@@ -1239,25 +1533,19 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
self._log("error", f"Error extracting metadata: {str(e)}", "SCRAPE")
|
||||
meta = {}
|
||||
|
||||
# Handle CSS selector targeting
|
||||
if css_selector:
|
||||
content_element = None
|
||||
if target_elements:
|
||||
try:
|
||||
selected_elements = body.cssselect(css_selector)
|
||||
if not selected_elements:
|
||||
return {
|
||||
"markdown": "",
|
||||
"cleaned_html": "",
|
||||
"success": True,
|
||||
"media": {"images": [], "videos": [], "audios": []},
|
||||
"links": {"internal": [], "external": []},
|
||||
"metadata": meta,
|
||||
"message": f"No elements found for CSS selector: {css_selector}",
|
||||
}
|
||||
body = lhtml.Element("div")
|
||||
body.extend(selected_elements)
|
||||
for_content_targeted_element = []
|
||||
for target_element in target_elements:
|
||||
for_content_targeted_element.extend(body.cssselect(target_element))
|
||||
content_element = lhtml.Element("div")
|
||||
content_element.extend(copy.deepcopy(for_content_targeted_element))
|
||||
except Exception as e:
|
||||
self._log("error", f"Error with CSS selector: {str(e)}", "SCRAPE")
|
||||
self._log("error", f"Error with target element detection: {str(e)}", "SCRAPE")
|
||||
return None
|
||||
else:
|
||||
content_element = body
|
||||
|
||||
# Remove script and style tags
|
||||
for tag in ["script", "style", "link", "meta", "noscript"]:
|
||||
@@ -1281,7 +1569,7 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
form.getparent().remove(form)
|
||||
|
||||
# Process content
|
||||
media = {"images": [], "videos": [], "audios": []}
|
||||
media = {"images": [], "videos": [], "audios": [], "tables": []}
|
||||
internal_links_dict = {}
|
||||
external_links_dict = {}
|
||||
|
||||
@@ -1295,6 +1583,13 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if 'table' not in excluded_tags:
|
||||
tables = body.xpath(".//table")
|
||||
for table in tables:
|
||||
if self.is_data_table(table, **kwargs):
|
||||
table_data = self.extract_table_data(table)
|
||||
media["tables"].append(table_data)
|
||||
|
||||
# Handle only_text option
|
||||
if kwargs.get("only_text", False):
|
||||
for tag in ONLY_TEXT_ELIGIBLE_TAGS:
|
||||
@@ -1314,14 +1609,15 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
# Remove empty elements
|
||||
self.remove_empty_elements_fast(body, 1)
|
||||
|
||||
# Remvoe unneeded attributes
|
||||
# Remove unneeded attributes
|
||||
self.remove_unwanted_attributes_fast(
|
||||
body, keep_data_attributes=kwargs.get("keep_data_attributes", False)
|
||||
)
|
||||
|
||||
# Generate output HTML
|
||||
cleaned_html = lhtml.tostring(
|
||||
body,
|
||||
# body,
|
||||
content_element,
|
||||
encoding="unicode",
|
||||
pretty_print=True,
|
||||
method="html",
|
||||
@@ -1366,7 +1662,12 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
return {
|
||||
"cleaned_html": cleaned_html,
|
||||
"success": False,
|
||||
"media": {"images": [], "videos": [], "audios": []},
|
||||
"media": {
|
||||
"images": [],
|
||||
"videos": [],
|
||||
"audios": [],
|
||||
"tables": []
|
||||
},
|
||||
"links": {"internal": [], "external": []},
|
||||
"metadata": {},
|
||||
}
|
||||
|
||||
0
crawl4ai/crawlers/__init__.py
Normal file
0
crawl4ai/crawlers/__init__.py
Normal file
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
|
||||
__meta__ = {
|
||||
"version": "1.2.0",
|
||||
"tested_on": ["amazon.com"],
|
||||
"rate_limit": "50 RPM",
|
||||
"schema": {"product": ["name", "price"]}
|
||||
}
|
||||
|
||||
class AmazonProductCrawler(BaseCrawler):
|
||||
async def run(self, url: str, **kwargs) -> str:
|
||||
try:
|
||||
self.logger.info(f"Crawling {url}")
|
||||
return '{"product": {"name": "Test Amazon Product"}}'
|
||||
except Exception as e:
|
||||
self.logger.error(f"Crawl failed: {str(e)}")
|
||||
return json.dumps({
|
||||
"error": str(e),
|
||||
"metadata": self.meta # Include meta in error response
|
||||
})
|
||||
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
131
crawl4ai/crawlers/google_search/crawler.py
Normal file
131
crawl4ai/crawlers/google_search/crawler.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from pathlib import Path
|
||||
import json
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
|
||||
class GoogleSearchCrawler(BaseCrawler):
|
||||
__meta__ = {
|
||||
"version": "1.0.0",
|
||||
"tested_on": ["google.com/search*"],
|
||||
"rate_limit": "10 RPM",
|
||||
"description": "Crawls Google Search results (text + images)",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.js_script = (Path(__file__).parent /
|
||||
"script.js").read_text()
|
||||
|
||||
async def run(self, url="", query: str = "", search_type: str = "text", schema_cache_path = None, **kwargs) -> str:
|
||||
"""Crawl Google Search results for a query"""
|
||||
url = f"https://www.google.com/search?q={query}&gl=sg&hl=en" if search_type == "text" else f"https://www.google.com/search?q={query}&gl=sg&hl=en&tbs=qdr:d&udm=2"
|
||||
if kwargs.get("page_start", 1) > 1:
|
||||
url = f"{url}&start={kwargs['page_start'] * 10}"
|
||||
if kwargs.get("page_length", 1) > 1:
|
||||
url = f"{url}&num={kwargs['page_length']}"
|
||||
|
||||
browser_config = BrowserConfig(headless=True, verbose=True)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS),
|
||||
keep_attrs=["id", "class"],
|
||||
keep_data_attributes=True,
|
||||
delay_before_return_html=kwargs.get(
|
||||
"delay", 2 if search_type == "image" else 1),
|
||||
js_code=self.js_script if search_type == "image" else None,
|
||||
)
|
||||
|
||||
result = await crawler.arun(url=url, config=config)
|
||||
if not result.success:
|
||||
return json.dumps({"error": result.error})
|
||||
|
||||
if search_type == "image":
|
||||
if result.js_execution_result.get("success", False) is False:
|
||||
return json.dumps({"error": result.js_execution_result.get("error", "Unknown error")})
|
||||
if "results" in result.js_execution_result:
|
||||
image_result = result.js_execution_result['results'][0]
|
||||
if image_result.get("success", False) is False:
|
||||
return json.dumps({"error": image_result.get("error", "Unknown error")})
|
||||
return json.dumps(image_result["result"], indent=4)
|
||||
|
||||
# For text search, extract structured data
|
||||
schemas = await self._build_schemas(result.cleaned_html, schema_cache_path)
|
||||
extracted = {
|
||||
key: JsonCssExtractionStrategy(schema=schemas[key]).run(
|
||||
url=url, sections=[result.html]
|
||||
)
|
||||
for key in schemas
|
||||
}
|
||||
return json.dumps(extracted, indent=4)
|
||||
|
||||
async def _build_schemas(self, html: str, schema_cache_path: str = None) -> Dict[str, Dict]:
|
||||
"""Build extraction schemas (organic, top stories, etc.)"""
|
||||
home_dir = get_home_folder() if not schema_cache_path else schema_cache_path
|
||||
os.makedirs(f"{home_dir}/schema", exist_ok=True)
|
||||
|
||||
# cleaned_html = optimize_html(html, threshold=100)
|
||||
cleaned_html = preprocess_html_for_schema(html)
|
||||
|
||||
organic_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/organic_schema.json"):
|
||||
with open(f"{home_dir}/schema/organic_schema.json", "r") as f:
|
||||
organic_schema = json.load(f)
|
||||
else:
|
||||
organic_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"title": "...",
|
||||
"link": "...",
|
||||
"snippet": "...",
|
||||
"date": "1 hour ago",
|
||||
}""",
|
||||
query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text. date."""
|
||||
)
|
||||
|
||||
with open(f"{home_dir}/schema/organic_schema.json", "w") as f:
|
||||
f.write(json.dumps(organic_schema))
|
||||
|
||||
top_stories_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/top_stories_schema.json"):
|
||||
with open(f"{home_dir}/schema/top_stories_schema.json", "r") as f:
|
||||
top_stories_schema = json.load(f)
|
||||
else:
|
||||
top_stories_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"title": "...",
|
||||
"link": "...",
|
||||
"source": "Insider Monkey",
|
||||
"date": "1 hour ago",
|
||||
}""",
|
||||
query="""The given html is the crawled html from Google search result. Please find the schema for Top Story item int he given html, I am interested in title, link, source. date and imageUrl."""
|
||||
)
|
||||
|
||||
with open(f"{home_dir}/schema/top_stories_schema.json", "w") as f:
|
||||
f.write(json.dumps(top_stories_schema))
|
||||
|
||||
suggested_query_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/suggested_query_schema.json"):
|
||||
with open(f"{home_dir}/schema/suggested_query_schema.json", "r") as f:
|
||||
suggested_query_schema = json.load(f)
|
||||
else:
|
||||
suggested_query_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"query": "A for Apple",
|
||||
}""",
|
||||
query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "People also search for" within the given HTML. I am interested in the queries only."""
|
||||
)
|
||||
with open(f"{home_dir}/schema/suggested_query_schema.json", "w") as f:
|
||||
f.write(json.dumps(suggested_query_schema))
|
||||
|
||||
return {
|
||||
"organic_schema": organic_schema,
|
||||
"top_stories_schema": top_stories_schema,
|
||||
"suggested_query_schema": suggested_query_schema,
|
||||
}
|
||||
115
crawl4ai/crawlers/google_search/script.js
Normal file
115
crawl4ai/crawlers/google_search/script.js
Normal file
@@ -0,0 +1,115 @@
|
||||
(() => {
|
||||
// Function to extract image data from Google Images page
|
||||
function extractImageData() {
|
||||
const keys = Object.keys(window.W_jd);
|
||||
let allImageData = [];
|
||||
let currentPosition = 0;
|
||||
|
||||
// Get the symbol we'll use (from first valid entry)
|
||||
let targetSymbol;
|
||||
for (let key of keys) {
|
||||
try {
|
||||
const symbols = Object.getOwnPropertySymbols(window.W_jd[key]);
|
||||
if (symbols.length > 0) {
|
||||
targetSymbol = symbols[0];
|
||||
break;
|
||||
}
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!targetSymbol) return [];
|
||||
|
||||
// Iterate through ALL keys
|
||||
for (let key of keys) {
|
||||
try {
|
||||
const o1 = window.W_jd[key][targetSymbol]
|
||||
if (!o1) continue;
|
||||
const data = Object.values(o1)[0]
|
||||
// const data = window.W_jd[key][targetSymbol]?.Ws;
|
||||
// Check if this is a valid image data entry
|
||||
if (data && Array.isArray(data[1])) {
|
||||
const processedData = processImageEntry(data, currentPosition);
|
||||
if (processedData) {
|
||||
allImageData.push(processedData);
|
||||
currentPosition++;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return allImageData;
|
||||
}
|
||||
|
||||
function processImageEntry(entry, position) {
|
||||
const imageData = entry[1];
|
||||
if (!Array.isArray(imageData)) return null;
|
||||
|
||||
// Extract the image ID
|
||||
const imageId = imageData[1];
|
||||
if (!imageId) return null;
|
||||
|
||||
// Find the corresponding DOM element
|
||||
const domElement = document.querySelector(`[data-docid="${imageId}"]`);
|
||||
if (!domElement) return null;
|
||||
|
||||
// Extract data from the array structure
|
||||
const [
|
||||
_,
|
||||
id,
|
||||
thumbnailInfo,
|
||||
imageInfo,
|
||||
__,
|
||||
___,
|
||||
rgb,
|
||||
____,
|
||||
_____,
|
||||
metadata
|
||||
] = imageData;
|
||||
|
||||
// Ensure we have the required data
|
||||
if (!thumbnailInfo || !imageInfo) return null;
|
||||
|
||||
// Extract metadata from DOM
|
||||
const title = domElement?.querySelector('.toI8Rb')?.textContent?.trim();
|
||||
const source = domElement?.querySelector('.guK3rf')?.textContent?.trim();
|
||||
const link = domElement?.querySelector('a.EZAeBe')?.href;
|
||||
|
||||
if (!link) return null;
|
||||
|
||||
// Build Google Image URL
|
||||
const googleUrl = buildGoogleImageUrl(imageInfo[0], link, imageId, imageInfo[1], imageInfo[2]);
|
||||
|
||||
return {
|
||||
title,
|
||||
imageUrl: imageInfo[0],
|
||||
imageWidth: imageInfo[2],
|
||||
imageHeight: imageInfo[1],
|
||||
thumbnailUrl: thumbnailInfo[0],
|
||||
thumbnailWidth: thumbnailInfo[2],
|
||||
thumbnailHeight: thumbnailInfo[1],
|
||||
source,
|
||||
domain: metadata['2000']?.[1] || new URL(link).hostname,
|
||||
link,
|
||||
googleUrl,
|
||||
position: position + 1
|
||||
};
|
||||
}
|
||||
|
||||
function buildGoogleImageUrl(imgUrl, refUrl, tbnid, height, width) {
|
||||
const params = new URLSearchParams({
|
||||
imgurl: imgUrl,
|
||||
tbnid: tbnid,
|
||||
imgrefurl: refUrl,
|
||||
docid: tbnid,
|
||||
w: width.toString(),
|
||||
h: height.toString(),
|
||||
});
|
||||
|
||||
return `https://www.google.com/imgres?${params.toString()}`;
|
||||
}
|
||||
return extractImageData();
|
||||
})();
|
||||
47
crawl4ai/deep_crawling/__init__.py
Normal file
47
crawl4ai/deep_crawling/__init__.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# deep_crawling/__init__.py
|
||||
from .base_strategy import DeepCrawlDecorator, DeepCrawlStrategy
|
||||
from .bfs_strategy import BFSDeepCrawlStrategy
|
||||
from .bff_strategy import BestFirstCrawlingStrategy
|
||||
from .dfs_strategy import DFSDeepCrawlStrategy
|
||||
from .filters import (
|
||||
FilterChain,
|
||||
ContentTypeFilter,
|
||||
DomainFilter,
|
||||
URLFilter,
|
||||
URLPatternFilter,
|
||||
FilterStats,
|
||||
ContentRelevanceFilter,
|
||||
SEOFilter
|
||||
)
|
||||
from .scorers import (
|
||||
KeywordRelevanceScorer,
|
||||
URLScorer,
|
||||
CompositeScorer,
|
||||
DomainAuthorityScorer,
|
||||
FreshnessScorer,
|
||||
PathDepthScorer,
|
||||
ContentTypeScorer
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"DeepCrawlDecorator",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
"DFSDeepCrawlStrategy",
|
||||
"FilterChain",
|
||||
"ContentTypeFilter",
|
||||
"DomainFilter",
|
||||
"URLFilter",
|
||||
"URLPatternFilter",
|
||||
"FilterStats",
|
||||
"ContentRelevanceFilter",
|
||||
"SEOFilter",
|
||||
"KeywordRelevanceScorer",
|
||||
"URLScorer",
|
||||
"CompositeScorer",
|
||||
"DomainAuthorityScorer",
|
||||
"FreshnessScorer",
|
||||
"PathDepthScorer",
|
||||
"ContentTypeScorer",
|
||||
]
|
||||
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
@@ -0,0 +1,159 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import AsyncGenerator, Optional, Set, List, Dict
|
||||
from functools import wraps
|
||||
from contextvars import ContextVar
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
|
||||
|
||||
class DeepCrawlDecorator:
|
||||
"""Decorator that adds deep crawling capability to arun method."""
|
||||
deep_crawl_active = ContextVar("deep_crawl_active", default=False)
|
||||
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
|
||||
def __call__(self, original_arun):
|
||||
@wraps(original_arun)
|
||||
async def wrapped_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||
# If deep crawling is already active, call the original method to avoid recursion.
|
||||
if config and config.deep_crawl_strategy and not self.deep_crawl_active.get():
|
||||
token = self.deep_crawl_active.set(True)
|
||||
# Await the arun call to get the actual result object.
|
||||
result_obj = await config.deep_crawl_strategy.arun(
|
||||
crawler=self.crawler,
|
||||
start_url=url,
|
||||
config=config
|
||||
)
|
||||
if config.stream:
|
||||
async def result_wrapper():
|
||||
try:
|
||||
async for result in result_obj:
|
||||
yield result
|
||||
finally:
|
||||
self.deep_crawl_active.reset(token)
|
||||
return result_wrapper()
|
||||
else:
|
||||
try:
|
||||
return result_obj
|
||||
finally:
|
||||
self.deep_crawl_active.reset(token)
|
||||
return await original_arun(url, config=config, **kwargs)
|
||||
return wrapped_arun
|
||||
|
||||
class DeepCrawlStrategy(ABC):
|
||||
"""
|
||||
Abstract base class for deep crawling strategies.
|
||||
|
||||
Core functions:
|
||||
- arun: Main entry point that returns an async generator of CrawlResults.
|
||||
- shutdown: Clean up resources.
|
||||
- can_process_url: Validate a URL and decide whether to process it.
|
||||
- _process_links: Extract and process links from a CrawlResult.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) mode:
|
||||
Processes one BFS level at a time, then yields all the results.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming mode:
|
||||
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Traverse the given URL using the specified crawler.
|
||||
|
||||
Args:
|
||||
start_url (str): The URL from which to start crawling.
|
||||
crawler (AsyncWebCrawler): The crawler instance to use.
|
||||
crawler_run_config (Optional[CrawlerRunConfig]): Crawler configuration.
|
||||
|
||||
Returns:
|
||||
Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
"""
|
||||
if config is None:
|
||||
raise ValueError("CrawlerRunConfig must be provided")
|
||||
|
||||
if config.stream:
|
||||
return self._arun_stream(start_url, crawler, config)
|
||||
else:
|
||||
return await self._arun_batch(start_url, crawler, config)
|
||||
|
||||
def __call__(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig):
|
||||
return self.arun(start_url, crawler, config)
|
||||
|
||||
@abstractmethod
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Clean up resources used by the deep crawl strategy.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validate the URL format and apply custom filtering logic.
|
||||
|
||||
Args:
|
||||
url (str): The URL to validate.
|
||||
depth (int): The current depth in the crawl.
|
||||
|
||||
Returns:
|
||||
bool: True if the URL should be processed, False otherwise.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_level: List[tuple],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extract and process links from the given crawl result.
|
||||
|
||||
This method should:
|
||||
- Validate each extracted URL using can_process_url.
|
||||
- Optionally score URLs.
|
||||
- Append valid URLs (and their parent references) to the next_level list.
|
||||
- Update the depths dictionary with the new depth for each URL.
|
||||
|
||||
Args:
|
||||
result (CrawlResult): The result from a crawl operation.
|
||||
source_url (str): The URL from which this result was obtained.
|
||||
current_depth (int): The depth at which the source URL was processed.
|
||||
visited (Set[str]): Set of already visited URLs.
|
||||
next_level (List[tuple]): List of tuples (url, parent_url) for the next BFS level.
|
||||
depths (Dict[str, int]): Mapping of URLs to their current depth.
|
||||
"""
|
||||
pass
|
||||
|
||||
257
crawl4ai/deep_crawling/bff_strategy.py
Normal file
257
crawl4ai/deep_crawling/bff_strategy.py
Normal file
@@ -0,0 +1,257 @@
|
||||
# best_first_crawling_strategy.py
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..models import TraversalStats
|
||||
from .filters import FilterChain
|
||||
from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
from ..utils import normalize_url_for_deep_crawl
|
||||
|
||||
from math import inf as infinity
|
||||
|
||||
# Configurable batch size for processing items from the priority queue
|
||||
BATCH_SIZE = 10
|
||||
|
||||
|
||||
class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||
"""
|
||||
Best-First Crawling Strategy using a priority queue.
|
||||
|
||||
This strategy prioritizes URLs based on their score, ensuring that higher-value
|
||||
pages are crawled first. It reimplements the core traversal loop to use a priority
|
||||
queue while keeping URL validation and link discovery consistent with our design.
|
||||
|
||||
Core methods:
|
||||
- arun: Returns either a list (batch mode) or an async generator (stream mode).
|
||||
- _arun_best_first: Core generator that uses a priority queue to yield CrawlResults.
|
||||
- can_process_url: Validates URLs and applies filtering (inherited behavior).
|
||||
- link_discovery: Extracts and validates links from a CrawlResult.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
url_scorer: Optional[URLScorer] = None,
|
||||
include_external: bool = False,
|
||||
max_pages: int = infinity,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.url_scorer = url_scorer
|
||||
self.include_external = include_external
|
||||
self.max_pages = max_pages
|
||||
self.logger = logger or logging.getLogger(__name__)
|
||||
self.stats = TraversalStats(start_time=datetime.now())
|
||||
self._cancel_event = asyncio.Event()
|
||||
self._pages_crawled = 0
|
||||
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validate the URL format and apply filtering.
|
||||
For the starting URL (depth 0), filtering is bypassed.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValueError("Missing scheme or netloc")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Invalid scheme")
|
||||
if "." not in parsed.netloc:
|
||||
raise ValueError("Invalid domain")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||
return False
|
||||
|
||||
if depth != 0 and not await self.filter_chain.apply(url):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_links: List[Tuple[str, Optional[str]]],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extract links from the crawl result, validate them, and append new URLs
|
||||
(with their parent references) to next_links.
|
||||
Also updates the depths dictionary.
|
||||
"""
|
||||
new_depth = current_depth + 1
|
||||
if new_depth > self.max_depth:
|
||||
return
|
||||
|
||||
# If we've reached the max pages limit, don't discover new links
|
||||
remaining_capacity = self.max_pages - self._pages_crawled
|
||||
if remaining_capacity <= 0:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||
return
|
||||
|
||||
# Retrieve internal links; include external links if enabled.
|
||||
links = result.links.get("internal", [])
|
||||
if self.include_external:
|
||||
links += result.links.get("external", [])
|
||||
|
||||
# If we have more links than remaining capacity, limit how many we'll process
|
||||
valid_links = []
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, new_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
valid_links.append(base_url)
|
||||
|
||||
# If we have more valid links than capacity, limit them
|
||||
if len(valid_links) > remaining_capacity:
|
||||
valid_links = valid_links[:remaining_capacity]
|
||||
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
|
||||
|
||||
# Record the new depths and add to next_links
|
||||
for url in valid_links:
|
||||
depths[url] = new_depth
|
||||
next_links.append((url, source_url))
|
||||
|
||||
async def _arun_best_first(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Core best-first crawl method using a priority queue.
|
||||
|
||||
The queue items are tuples of (score, depth, url, parent_url). Lower scores
|
||||
are treated as higher priority. URLs are processed in batches for efficiency.
|
||||
"""
|
||||
queue: asyncio.PriorityQueue = asyncio.PriorityQueue()
|
||||
# Push the initial URL with score 0 and depth 0.
|
||||
await queue.put((0, 0, start_url, None))
|
||||
visited: Set[str] = set()
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while not queue.empty() and not self._cancel_event.is_set():
|
||||
# Stop if we've reached the max pages limit
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
batch: List[Tuple[float, int, str, Optional[str]]] = []
|
||||
# Retrieve up to BATCH_SIZE items from the priority queue.
|
||||
for _ in range(BATCH_SIZE):
|
||||
if queue.empty():
|
||||
break
|
||||
item = await queue.get()
|
||||
score, depth, url, parent_url = item
|
||||
if url in visited:
|
||||
continue
|
||||
visited.add(url)
|
||||
batch.append(item)
|
||||
|
||||
if not batch:
|
||||
continue
|
||||
|
||||
# Process the current batch of URLs.
|
||||
urls = [item[2] for item in batch]
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=urls, config=batch_config)
|
||||
async for result in stream_gen:
|
||||
result_url = result.url
|
||||
# Find the corresponding tuple from the batch.
|
||||
corresponding = next((item for item in batch if item[2] == result_url), None)
|
||||
if not corresponding:
|
||||
continue
|
||||
score, depth, url, parent_url = corresponding
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent_url
|
||||
result.metadata["score"] = score
|
||||
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
yield result
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Discover new links from this result
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, result_url, depth, visited, new_links, depths)
|
||||
|
||||
for new_url, new_parent in new_links:
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
new_score = self.url_scorer.score(new_url) if self.url_scorer else 0
|
||||
await queue.put((new_score, new_depth, new_url, new_parent))
|
||||
|
||||
# End of crawl.
|
||||
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Best-first crawl in batch mode.
|
||||
|
||||
Aggregates all CrawlResults into a list.
|
||||
"""
|
||||
results: List[CrawlResult] = []
|
||||
async for result in self._arun_best_first(start_url, crawler, config):
|
||||
results.append(result)
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Best-first crawl in streaming mode.
|
||||
|
||||
Yields CrawlResults as they become available.
|
||||
"""
|
||||
async for result in self._arun_best_first(start_url, crawler, config):
|
||||
yield result
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
) -> "RunManyReturn":
|
||||
"""
|
||||
Main entry point for best-first crawling.
|
||||
|
||||
Returns either a list (batch mode) or an async generator (stream mode)
|
||||
of CrawlResults.
|
||||
"""
|
||||
if config is None:
|
||||
raise ValueError("CrawlerRunConfig must be provided")
|
||||
if config.stream:
|
||||
return self._arun_stream(start_url, crawler, config)
|
||||
else:
|
||||
return await self._arun_batch(start_url, crawler, config)
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Signal cancellation and clean up resources.
|
||||
"""
|
||||
self._cancel_event.set()
|
||||
self.stats.end_time = datetime.now()
|
||||
245
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
245
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
@@ -0,0 +1,245 @@
|
||||
# bfs_deep_crawl_strategy.py
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..models import TraversalStats
|
||||
from .filters import FilterChain
|
||||
from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult
|
||||
from ..utils import normalize_url_for_deep_crawl, efficient_normalize_url_for_deep_crawl
|
||||
from math import inf as infinity
|
||||
|
||||
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
"""
|
||||
Breadth-First Search deep crawling strategy.
|
||||
|
||||
Core functions:
|
||||
- arun: Main entry point; splits execution into batch or stream modes.
|
||||
- link_discovery: Extracts, filters, and (if needed) scores the outgoing URLs.
|
||||
- can_process_url: Validates URL format and applies the filter chain.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
url_scorer: Optional[URLScorer] = None,
|
||||
include_external: bool = False,
|
||||
score_threshold: float = -infinity,
|
||||
max_pages: int = infinity,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.url_scorer = url_scorer
|
||||
self.include_external = include_external
|
||||
self.score_threshold = score_threshold
|
||||
self.max_pages = max_pages
|
||||
self.logger = logger or logging.getLogger(__name__)
|
||||
self.stats = TraversalStats(start_time=datetime.now())
|
||||
self._cancel_event = asyncio.Event()
|
||||
self._pages_crawled = 0
|
||||
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validates the URL and applies the filter chain.
|
||||
For the start URL (depth 0) filtering is bypassed.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValueError("Missing scheme or netloc")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Invalid scheme")
|
||||
if "." not in parsed.netloc:
|
||||
raise ValueError("Invalid domain")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||
return False
|
||||
|
||||
if depth != 0 and not await self.filter_chain.apply(url):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_level: List[Tuple[str, Optional[str]]],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extracts links from the crawl result, validates and scores them, and
|
||||
prepares the next level of URLs.
|
||||
Each valid URL is appended to next_level as a tuple (url, parent_url)
|
||||
and its depth is tracked.
|
||||
"""
|
||||
next_depth = current_depth + 1
|
||||
if next_depth > self.max_depth:
|
||||
return
|
||||
|
||||
# If we've reached the max pages limit, don't discover new links
|
||||
remaining_capacity = self.max_pages - self._pages_crawled
|
||||
if remaining_capacity <= 0:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||
return
|
||||
|
||||
# Get internal links and, if enabled, external links.
|
||||
links = result.links.get("internal", [])
|
||||
if self.include_external:
|
||||
links += result.links.get("external", [])
|
||||
|
||||
valid_links = []
|
||||
|
||||
# First collect all valid links
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
# Strip URL fragments to avoid duplicate crawling
|
||||
# base_url = url.split('#')[0] if url else url
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, next_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
# Score the URL if a scorer is provided
|
||||
score = self.url_scorer.score(base_url) if self.url_scorer else 0
|
||||
|
||||
# Skip URLs with scores below the threshold
|
||||
if score < self.score_threshold:
|
||||
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
visited.add(base_url)
|
||||
valid_links.append((base_url, score))
|
||||
|
||||
# If we have more valid links than capacity, sort by score and take the top ones
|
||||
if len(valid_links) > remaining_capacity:
|
||||
if self.url_scorer:
|
||||
# Sort by score in descending order
|
||||
valid_links.sort(key=lambda x: x[1], reverse=True)
|
||||
# Take only as many as we have capacity for
|
||||
valid_links = valid_links[:remaining_capacity]
|
||||
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
|
||||
|
||||
# Process the final selected links
|
||||
for url, score in valid_links:
|
||||
# attach the score to metadata if needed
|
||||
if score:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["score"] = score
|
||||
next_level.append((url, source_url))
|
||||
depths[url] = next_depth
|
||||
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) mode:
|
||||
Processes one BFS level at a time, then yields all the results.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
# current_level holds tuples: (url, parent_url)
|
||||
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
results: List[CrawlResult] = []
|
||||
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
|
||||
# Clone the config to disable deep crawling recursion and enforce batch mode.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
batch_results = await crawler.arun_many(urls=urls, config=batch_config)
|
||||
|
||||
# Update pages crawled counter - count only successful crawls
|
||||
successful_results = [r for r in batch_results if r.success]
|
||||
self._pages_crawled += len(successful_results)
|
||||
|
||||
for result in batch_results:
|
||||
url = result.url
|
||||
depth = depths.get(url, 0)
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||
result.metadata["parent_url"] = parent_url
|
||||
results.append(result)
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Link discovery will handle the max pages limit internally
|
||||
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||
|
||||
current_level = next_level
|
||||
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming mode:
|
||||
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
visited.update(urls)
|
||||
|
||||
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=urls, config=stream_config)
|
||||
|
||||
# Keep track of processed results for this batch
|
||||
results_count = 0
|
||||
async for result in stream_gen:
|
||||
url = result.url
|
||||
depth = depths.get(url, 0)
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||
result.metadata["parent_url"] = parent_url
|
||||
|
||||
# Count only successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
results_count += 1
|
||||
yield result
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Link discovery will handle the max pages limit internally
|
||||
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||
|
||||
# If we didn't get results back (e.g. due to errors), avoid getting stuck in an infinite loop
|
||||
# by considering these URLs as visited but not counting them toward the max_pages limit
|
||||
if results_count == 0 and urls:
|
||||
self.logger.warning(f"No results returned for {len(urls)} URLs, marking as visited")
|
||||
|
||||
current_level = next_level
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Clean up resources and signal cancellation of the crawl.
|
||||
"""
|
||||
self._cancel_event.set()
|
||||
self.stats.end_time = datetime.now()
|
||||
432
crawl4ai/deep_crawling/crazy.py
Normal file
432
crawl4ai/deep_crawling/crazy.py
Normal file
@@ -0,0 +1,432 @@
|
||||
from __future__ import annotations
|
||||
# I just got crazy, trying to wrute K&R C but in Python. Right now I feel like I'm in a quantum state.
|
||||
# I probably won't use this; I just want to leave it here. A century later, the future human race will be like, "WTF?"
|
||||
|
||||
# ------ Imports That Will Make You Question Reality ------ #
|
||||
from functools import wraps
|
||||
from contextvars import ContextVar
|
||||
import inspect
|
||||
|
||||
from crawl4ai import CacheMode
|
||||
from crawl4ai.async_configs import CrawlerRunConfig
|
||||
from crawl4ai.models import CrawlResult, TraversalStats
|
||||
from crawl4ai.deep_crawling.filters import FilterChain
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
import time
|
||||
import logging
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import deque
|
||||
import asyncio
|
||||
from typing import (
|
||||
AsyncGenerator,
|
||||
Dict,
|
||||
List,
|
||||
TypeVar,
|
||||
Generic,
|
||||
Tuple,
|
||||
Callable,
|
||||
Awaitable,
|
||||
Union,
|
||||
)
|
||||
from functools import lru_cache
|
||||
import mmh3
|
||||
from bitarray import bitarray
|
||||
import numpy as np
|
||||
from heapq import heappush, heappop
|
||||
|
||||
# ------ Type Algebra Mastery ------ #
|
||||
CrawlResultT = TypeVar("CrawlResultT", bound="CrawlResult")
|
||||
PriorityT = TypeVar("PriorityT")
|
||||
P = TypeVar("P")
|
||||
|
||||
# ------ Hyperscalar Context Management ------ #
|
||||
deep_crawl_ctx = ContextVar("deep_crawl_stack", default=deque())
|
||||
|
||||
# ------ Algebraic Crawler Monoid ------ #
|
||||
class TraversalContext:
|
||||
__slots__ = ('visited', 'frontier', 'depths', 'priority_fn', 'current_depth')
|
||||
|
||||
def __init__(self,
|
||||
priority_fn: Callable[[str], Awaitable[float]] = lambda _: 1.0):
|
||||
self.visited: BloomFilter = BloomFilter(10**6, 0.01) # 1M items, 1% FP
|
||||
self.frontier: PriorityQueue = PriorityQueue()
|
||||
self.depths: Dict[str, int] = {}
|
||||
self.priority_fn = priority_fn
|
||||
self.current_depth = 0
|
||||
|
||||
def clone_for_level(self) -> TraversalContext:
|
||||
"""Monadic context propagation"""
|
||||
new_ctx = TraversalContext(self.priority_fn)
|
||||
new_ctx.visited = self.visited.copy()
|
||||
new_ctx.depths = self.depths.copy()
|
||||
new_ctx.current_depth = self.current_depth
|
||||
return new_ctx
|
||||
|
||||
class PriorityQueue(Generic[PriorityT]):
|
||||
"""Fibonacci heap-inspired priority queue with O(1) amortized operations"""
|
||||
__slots__ = ('_heap', '_index')
|
||||
|
||||
def __init__(self):
|
||||
self._heap: List[Tuple[PriorityT, float, P]] = []
|
||||
self._index: Dict[P, int] = {}
|
||||
|
||||
def insert(self, priority: PriorityT, item: P) -> None:
|
||||
tiebreaker = time.time() # Ensure FIFO for equal priorities
|
||||
heappush(self._heap, (priority, tiebreaker, item))
|
||||
self._index[item] = len(self._heap) - 1
|
||||
|
||||
def extract(self, top_n = 1) -> P:
|
||||
items = []
|
||||
for _ in range(top_n):
|
||||
if not self._heap:
|
||||
break
|
||||
priority, _, item = heappop(self._heap)
|
||||
del self._index[item]
|
||||
items.append(item)
|
||||
if not items:
|
||||
raise IndexError("Priority queue empty")
|
||||
return items
|
||||
# while self._heap:
|
||||
# _, _, item = heappop(self._heap)
|
||||
# if item in self._index:
|
||||
# del self._index[item]
|
||||
# return item
|
||||
raise IndexError("Priority queue empty")
|
||||
|
||||
|
||||
def is_empty(self) -> bool:
|
||||
return not bool(self._heap)
|
||||
|
||||
class BloomFilter:
|
||||
"""Optimal Bloom filter using murmur3 hash avalanche"""
|
||||
__slots__ = ('size', 'hashes', 'bits')
|
||||
|
||||
def __init__(self, capacity: int, error_rate: float):
|
||||
self.size = self._optimal_size(capacity, error_rate)
|
||||
self.hashes = self._optimal_hashes(capacity, self.size)
|
||||
self.bits = bitarray(self.size)
|
||||
self.bits.setall(False)
|
||||
|
||||
@staticmethod
|
||||
def _optimal_size(n: int, p: float) -> int:
|
||||
m = - (n * np.log(p)) / (np.log(2) ** 2)
|
||||
return int(np.ceil(m))
|
||||
|
||||
@staticmethod
|
||||
def _optimal_hashes(n: int, m: int) -> int:
|
||||
k = (m / n) * np.log(2)
|
||||
return int(np.ceil(k))
|
||||
|
||||
def add(self, item: str) -> None:
|
||||
for seed in range(self.hashes):
|
||||
digest = mmh3.hash(item, seed) % self.size
|
||||
self.bits[digest] = True
|
||||
|
||||
def __contains__(self, item: str) -> bool:
|
||||
return all(
|
||||
self.bits[mmh3.hash(item, seed) % self.size]
|
||||
for seed in range(self.hashes)
|
||||
)
|
||||
|
||||
def copy(self) -> BloomFilter:
|
||||
new = object.__new__(BloomFilter)
|
||||
new.size = self.size
|
||||
new.hashes = self.hashes
|
||||
new.bits = self.bits.copy()
|
||||
return new
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""
|
||||
Estimates the number of items in the filter using the
|
||||
count of set bits and the formula:
|
||||
n = -m/k * ln(1 - X/m)
|
||||
where:
|
||||
m = size of bit array
|
||||
k = number of hash functions
|
||||
X = count of set bits
|
||||
"""
|
||||
set_bits = self.bits.count(True)
|
||||
if set_bits == 0:
|
||||
return 0
|
||||
|
||||
# Use the inverse bloom filter formula to estimate cardinality
|
||||
return int(
|
||||
-(self.size / self.hashes) *
|
||||
np.log(1 - set_bits / self.size)
|
||||
)
|
||||
|
||||
def bit_count(self) -> int:
|
||||
"""Returns the raw count of set bits in the filter"""
|
||||
return self.bits.count(True)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"BloomFilter(est_items={len(self)}, bits={self.bit_count()}/{self.size})"
|
||||
|
||||
# ------ Hyper-Optimal Deep Crawl Core ------ #
|
||||
class DeepCrawlDecorator:
|
||||
"""Metaprogramming marvel: Zero-cost deep crawl abstraction"""
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
|
||||
def __call__(self, original_arun: Callable) -> Callable:
|
||||
@wraps(original_arun)
|
||||
async def quantum_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||
stack = deep_crawl_ctx.get()
|
||||
if config and config.deep_crawl_strategy and not stack:
|
||||
stack.append(self.crawler)
|
||||
try:
|
||||
deep_crawl_ctx.set(stack)
|
||||
async for result in config.deep_crawl_strategy.traverse(
|
||||
start_url=url,
|
||||
crawler=self.crawler,
|
||||
config=config
|
||||
):
|
||||
yield result
|
||||
finally:
|
||||
stack.pop()
|
||||
deep_crawl_ctx.set(stack)
|
||||
else:
|
||||
result = await original_arun(url, config=config, **kwargs)
|
||||
yield result
|
||||
return quantum_arun
|
||||
|
||||
|
||||
async def collect_results(url, crawler, config):
|
||||
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||
|
||||
ret = crawler.arun(url, config=config)
|
||||
# If arun is an async generator, iterate over it
|
||||
if inspect.isasyncgen(ret):
|
||||
return [r async for r in ret]
|
||||
# Otherwise, await the coroutine and normalize to a list
|
||||
result = await ret
|
||||
return result if isinstance(result, list) else [result]
|
||||
|
||||
async def collect_many_results(url, crawler, config):
|
||||
# Replace back arun to its original implementation
|
||||
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||
ret = crawler.arun_many(url, config=config)
|
||||
# If arun is an async generator, iterate over it
|
||||
if inspect.isasyncgen(ret):
|
||||
return [r async for r in ret]
|
||||
# Otherwise, await the coroutine and normalize to a list
|
||||
result = await ret
|
||||
return result if isinstance(result, list) else [result]
|
||||
|
||||
|
||||
# ------ Deep Crawl Strategy Interface ------ #
|
||||
CrawlResultT = TypeVar("CrawlResultT", bound=CrawlResult)
|
||||
# In batch mode we return List[CrawlResult] and in stream mode an AsyncGenerator.
|
||||
RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
|
||||
|
||||
class DeepCrawlStrategy(ABC):
|
||||
"""Abstract base class that will make Dijkstra smile"""
|
||||
@abstractmethod
|
||||
async def traverse(self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig) -> RunManyReturn:
|
||||
"""Traverse with O(1) memory complexity via generator fusion"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def precompute_priority(self, url: str) -> Awaitable[float]:
|
||||
"""Quantum-inspired priority precomputation"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||
"""Hilbert-curve optimized link generation"""
|
||||
pass
|
||||
|
||||
# ------ BFS That Would Make Knuth Proud ------ #
|
||||
|
||||
def calculate_quantum_batch_size(
|
||||
depth: int,
|
||||
max_depth: int,
|
||||
frontier_size: int,
|
||||
visited_size: int
|
||||
) -> int:
|
||||
"""
|
||||
Calculates optimal batch size for URL processing using quantum-inspired mathematical principles.
|
||||
|
||||
This function implements a sophisticated batch size calculation using:
|
||||
1. Golden Ratio (φ) based scaling for optimal irrationality
|
||||
2. Depth-aware amplitude modulation
|
||||
3. Harmonic series dampening
|
||||
4. Logarithmic growth control
|
||||
5. Dynamic frontier adaptation
|
||||
|
||||
The formula follows the quantum harmonic oscillator principle:
|
||||
N = ⌈φ^(2d) * log₂(|V|) * H(d)⁻¹ * min(20, |F|/10)⌉
|
||||
where:
|
||||
φ = Golden Ratio ((1 + √5) / 2)
|
||||
d = depth factor (normalized remaining depth)
|
||||
|V| = size of visited set
|
||||
H(d) = d-th harmonic number
|
||||
|F| = frontier size
|
||||
|
||||
Args:
|
||||
depth (int): Current traversal depth
|
||||
max_depth (int): Maximum allowed depth
|
||||
frontier_size (int): Current size of frontier queue
|
||||
visited_size (int): Number of URLs visited so far
|
||||
|
||||
Returns:
|
||||
int: Optimal batch size bounded between 1 and 100
|
||||
|
||||
Mathematical Properties:
|
||||
- Maintains O(log n) growth with respect to visited size
|
||||
- Provides φ-optimal distribution of resources
|
||||
- Ensures quantum-like state transitions between depths
|
||||
- Harmonically dampened to prevent exponential explosion
|
||||
"""
|
||||
# Golden ratio φ = (1 + √5) / 2
|
||||
φ = (1 + 5 ** 0.5) / 2
|
||||
|
||||
# Calculate normalized depth factor [0, 1]
|
||||
depth_factor = (max_depth - depth) / max_depth if depth < max_depth else 0
|
||||
|
||||
# Compute harmonic number for current depth
|
||||
harmonic = sum(1/k for k in range(1, depth + 2))
|
||||
|
||||
# Calculate quantum batch size
|
||||
batch_size = int(np.ceil(
|
||||
(φ ** (depth_factor * 2)) * # Golden ratio scaling
|
||||
np.log2(visited_size + 2) * # Logarithmic growth factor
|
||||
(1 / harmonic) * # Harmonic dampening
|
||||
max(1, min(20, frontier_size / 10)) # Frontier-aware scaling
|
||||
))
|
||||
|
||||
# Enforce practical bounds
|
||||
return max(1, min(100, batch_size))
|
||||
|
||||
|
||||
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
"""Breadth-First Search with Einstein-Rosen bridge optimization"""
|
||||
__slots__ = ('max_depth', 'filter_chain', 'priority_fn', 'stats', '_cancel')
|
||||
|
||||
def __init__(self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
priority_fn: Callable[[str], Awaitable[float]] = lambda url: 1.0,
|
||||
logger: logging.Logger = None):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.priority_fn = priority_fn
|
||||
self.stats = TraversalStats()
|
||||
self._cancel = asyncio.Event()
|
||||
self.semaphore = asyncio.Semaphore(1000)
|
||||
|
||||
async def traverse(self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig) -> RunManyReturn:
|
||||
"""Non-blocking BFS with O(b^d) time complexity awareness"""
|
||||
ctx = TraversalContext(self.priority_fn)
|
||||
ctx.frontier.insert(self.priority_fn(start_url), (start_url, None, 0))
|
||||
ctx.visited.add(start_url)
|
||||
ctx.depths[start_url] = 0
|
||||
|
||||
while not ctx.frontier.is_empty() and not self._cancel.is_set():
|
||||
# Use the best algorith, to find top_n value
|
||||
top_n = calculate_quantum_batch_size(
|
||||
depth=ctx.current_depth,
|
||||
max_depth=self.max_depth,
|
||||
frontier_size=len(ctx.frontier._heap),
|
||||
visited_size=len(ctx.visited)
|
||||
)
|
||||
|
||||
urls = ctx.frontier.extract(top_n=top_n)
|
||||
# url, parent, depth = ctx.frontier.extract(top_n=top_n)
|
||||
if urls:
|
||||
ctx.current_depth = urls[0][2]
|
||||
|
||||
async with self.semaphore:
|
||||
results = await collect_many_results([url for (url, parent, depth) in urls], crawler, config)
|
||||
# results = await asyncio.gather(*[
|
||||
# collect_results(url, crawler, config) for (url, parent, depth) in urls
|
||||
# ])
|
||||
# result = _result[0]
|
||||
for ix, result in enumerate(results):
|
||||
url, parent, depth = result.url, urls[ix][1], urls[ix][2]
|
||||
result.metadata['depth'] = depth
|
||||
result.metadata['parent'] = parent
|
||||
yield result
|
||||
|
||||
if depth < self.max_depth:
|
||||
async for link in self.link_hypercube(result):
|
||||
if link not in ctx.visited:
|
||||
priority = self.priority_fn(link)
|
||||
ctx.frontier.insert(priority, (link, url, depth + 1))
|
||||
ctx.visited.add(link)
|
||||
ctx.depths[link] = depth + 1
|
||||
|
||||
@lru_cache(maxsize=65536)
|
||||
async def validate_url(self, url: str) -> bool:
|
||||
"""Memoized URL validation with λ-calculus purity"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
return (parsed.scheme in {'http', 'https'}
|
||||
and '.' in parsed.netloc
|
||||
and await self.filter_chain.apply(url))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||
"""Hilbert-ordered link generation with O(1) yield latency"""
|
||||
links = (link['href'] for link in result.links.get('internal', []))
|
||||
validated = filter(self.validate_url, links)
|
||||
for link in sorted(validated, key=lambda x: -self.priority_fn(x)):
|
||||
yield link
|
||||
|
||||
def __aiter__(self) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""Native async iterator interface"""
|
||||
return self.traverse()
|
||||
|
||||
async def __anext__(self) -> CrawlResult:
|
||||
"""True async iterator protocol implementation"""
|
||||
result = await self.traverse().__anext__()
|
||||
if result:
|
||||
return result
|
||||
raise StopAsyncIteration
|
||||
|
||||
async def precompute_priority(self, url):
|
||||
return super().precompute_priority(url)
|
||||
|
||||
async def shutdown(self):
|
||||
self._cancel.set()
|
||||
|
||||
# ------ Usage That Will Drop Jaws ------ #
|
||||
async def main():
|
||||
"""Quantum crawl example"""
|
||||
strategy = BFSDeepCrawlStrategy(
|
||||
max_depth=2,
|
||||
priority_fn=lambda url: 1.0 / (len(url) + 1e-9), # Inverse length priority
|
||||
# filter_chain=FilterChain(...)
|
||||
)
|
||||
|
||||
config: CrawlerRunConfig = CrawlerRunConfig(
|
||||
deep_crawl_strategy=strategy,
|
||||
stream=False,
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
run_decorator = DeepCrawlDecorator(crawler)
|
||||
setattr(crawler, "original_arun", crawler.arun)
|
||||
crawler.arun = run_decorator(crawler.arun)
|
||||
start_time = time.perf_counter()
|
||||
async for result in crawler.arun("https://docs.crawl4ai.com", config=config):
|
||||
print(f"🌀 {result.url} (Depth: {result.metadata['depth']})")
|
||||
print(f"Deep crawl completed in {time.perf_counter() - start_time:.2f}s")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
102
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
102
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# dfs_deep_crawl_strategy.py
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
|
||||
from ..models import CrawlResult
|
||||
from .bfs_strategy import BFSDeepCrawlStrategy # noqa
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig
|
||||
|
||||
class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
"""
|
||||
Depth-First Search (DFS) deep crawling strategy.
|
||||
|
||||
Inherits URL validation and link discovery from BFSDeepCrawlStrategy.
|
||||
Overrides _arun_batch and _arun_stream to use a stack (LIFO) for DFS traversal.
|
||||
"""
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order, aggregating CrawlResults into a list.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
# Stack items: (url, parent_url, depth)
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
results: List[CrawlResult] = []
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
if url in visited or depth > self.max_depth:
|
||||
continue
|
||||
visited.add(url)
|
||||
|
||||
# Clone config to disable recursive deep crawling.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
url_results = await crawler.arun_many(urls=[url], config=batch_config)
|
||||
|
||||
for result in url_results:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent
|
||||
if self.url_scorer:
|
||||
result.metadata["score"] = self.url_scorer.score(url)
|
||||
results.append(result)
|
||||
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
# Only discover links from successful crawls
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||
|
||||
# Push new links in reverse order so the first discovered is processed next.
|
||||
for new_url, new_parent in reversed(new_links):
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
stack.append((new_url, new_parent, new_depth))
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order and yields CrawlResults as they become available.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
if url in visited or depth > self.max_depth:
|
||||
continue
|
||||
visited.add(url)
|
||||
|
||||
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=[url], config=stream_config)
|
||||
async for result in stream_gen:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent
|
||||
if self.url_scorer:
|
||||
result.metadata["score"] = self.url_scorer.score(url)
|
||||
yield result
|
||||
|
||||
# Only count successful crawls toward max_pages limit
|
||||
# and only discover links from successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||
for new_url, new_parent in reversed(new_links):
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
stack.append((new_url, new_parent, new_depth))
|
||||
666
crawl4ai/deep_crawling/filters.py
Normal file
666
crawl4ai/deep_crawling/filters.py
Normal file
@@ -0,0 +1,666 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Pattern, Set, Union
|
||||
from urllib.parse import urlparse
|
||||
from array import array
|
||||
import re
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
import fnmatch
|
||||
from dataclasses import dataclass
|
||||
import weakref
|
||||
import math
|
||||
from collections import defaultdict
|
||||
from typing import Dict
|
||||
from ..utils import HeadPeekr
|
||||
import asyncio
|
||||
import inspect
|
||||
|
||||
|
||||
@dataclass
|
||||
class FilterStats:
|
||||
__slots__ = ("_counters",)
|
||||
|
||||
def __init__(self):
|
||||
# Use array of unsigned ints for atomic operations
|
||||
self._counters = array("I", [0, 0, 0]) # total, passed, rejected
|
||||
|
||||
@property
|
||||
def total_urls(self):
|
||||
return self._counters[0]
|
||||
|
||||
@property
|
||||
def passed_urls(self):
|
||||
return self._counters[1]
|
||||
|
||||
@property
|
||||
def rejected_urls(self):
|
||||
return self._counters[2]
|
||||
|
||||
|
||||
class URLFilter(ABC):
|
||||
"""Optimized base filter class"""
|
||||
|
||||
__slots__ = ("name", "stats", "_logger_ref")
|
||||
|
||||
def __init__(self, name: str = None):
|
||||
self.name = name or self.__class__.__name__
|
||||
self.stats = FilterStats()
|
||||
# Lazy logger initialization using weakref
|
||||
self._logger_ref = None
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if self._logger_ref is None or self._logger_ref() is None:
|
||||
logger = logging.getLogger(f"urlfilter.{self.name}")
|
||||
self._logger_ref = weakref.ref(logger)
|
||||
return self._logger_ref()
|
||||
|
||||
@abstractmethod
|
||||
def apply(self, url: str) -> bool:
|
||||
pass
|
||||
|
||||
def _update_stats(self, passed: bool):
|
||||
# Use direct array index for speed
|
||||
self.stats._counters[0] += 1 # total
|
||||
self.stats._counters[1] += passed # passed
|
||||
self.stats._counters[2] += not passed # rejected
|
||||
|
||||
|
||||
class FilterChain:
|
||||
"""Optimized filter chain"""
|
||||
|
||||
__slots__ = ("filters", "stats", "_logger_ref")
|
||||
|
||||
def __init__(self, filters: List[URLFilter] = None):
|
||||
self.filters = tuple(filters or []) # Immutable tuple for speed
|
||||
self.stats = FilterStats()
|
||||
self._logger_ref = None
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if self._logger_ref is None or self._logger_ref() is None:
|
||||
logger = logging.getLogger("urlfilter.chain")
|
||||
self._logger_ref = weakref.ref(logger)
|
||||
return self._logger_ref()
|
||||
|
||||
def add_filter(self, filter_: URLFilter) -> "FilterChain":
|
||||
"""Add a filter to the chain"""
|
||||
self.filters.append(filter_)
|
||||
return self # Enable method chaining
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
"""Apply all filters concurrently when possible"""
|
||||
self.stats._counters[0] += 1 # Total processed URLs
|
||||
|
||||
tasks = []
|
||||
for f in self.filters:
|
||||
result = f.apply(url)
|
||||
|
||||
if inspect.isawaitable(result):
|
||||
tasks.append(result) # Collect async tasks
|
||||
elif not result: # Sync rejection
|
||||
self.stats._counters[2] += 1 # Sync rejected
|
||||
return False
|
||||
|
||||
if tasks:
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
# Count how many filters rejected
|
||||
rejections = results.count(False)
|
||||
self.stats._counters[2] += rejections
|
||||
|
||||
if not all(results):
|
||||
return False # Stop early if any filter rejected
|
||||
|
||||
self.stats._counters[1] += 1 # Passed
|
||||
return True
|
||||
|
||||
|
||||
class URLPatternFilter(URLFilter):
|
||||
"""Pattern filter balancing speed and completeness"""
|
||||
|
||||
__slots__ = (
|
||||
"_simple_suffixes",
|
||||
"_simple_prefixes",
|
||||
"_domain_patterns",
|
||||
"_path_patterns",
|
||||
"_reverse",
|
||||
)
|
||||
|
||||
PATTERN_TYPES = {
|
||||
"SUFFIX": 1, # *.html
|
||||
"PREFIX": 2, # /foo/*
|
||||
"DOMAIN": 3, # *.example.com
|
||||
"PATH": 4, # Everything else
|
||||
"REGEX": 5,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
patterns: Union[str, Pattern, List[Union[str, Pattern]]],
|
||||
use_glob: bool = True,
|
||||
reverse: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self._reverse = reverse
|
||||
patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns
|
||||
|
||||
self._simple_suffixes = set()
|
||||
self._simple_prefixes = set()
|
||||
self._domain_patterns = []
|
||||
self._path_patterns = []
|
||||
|
||||
for pattern in patterns:
|
||||
pattern_type = self._categorize_pattern(pattern)
|
||||
self._add_pattern(pattern, pattern_type)
|
||||
|
||||
def _categorize_pattern(self, pattern: str) -> int:
|
||||
"""Categorize pattern for specialized handling"""
|
||||
if not isinstance(pattern, str):
|
||||
return self.PATTERN_TYPES["PATH"]
|
||||
|
||||
# Check if it's a regex pattern
|
||||
if pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern:
|
||||
return self.PATTERN_TYPES["REGEX"]
|
||||
|
||||
if pattern.count("*") == 1:
|
||||
if pattern.startswith("*."):
|
||||
return self.PATTERN_TYPES["SUFFIX"]
|
||||
if pattern.endswith("/*"):
|
||||
return self.PATTERN_TYPES["PREFIX"]
|
||||
|
||||
if "://" in pattern and pattern.startswith("*."):
|
||||
return self.PATTERN_TYPES["DOMAIN"]
|
||||
|
||||
return self.PATTERN_TYPES["PATH"]
|
||||
|
||||
def _add_pattern(self, pattern: str, pattern_type: int):
|
||||
"""Add pattern to appropriate matcher"""
|
||||
if pattern_type == self.PATTERN_TYPES["REGEX"]:
|
||||
# For regex patterns, compile directly without glob translation
|
||||
if isinstance(pattern, str) and (
|
||||
pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern
|
||||
):
|
||||
self._path_patterns.append(re.compile(pattern))
|
||||
return
|
||||
elif pattern_type == self.PATTERN_TYPES["SUFFIX"]:
|
||||
self._simple_suffixes.add(pattern[2:])
|
||||
elif pattern_type == self.PATTERN_TYPES["PREFIX"]:
|
||||
self._simple_prefixes.add(pattern[:-2])
|
||||
elif pattern_type == self.PATTERN_TYPES["DOMAIN"]:
|
||||
self._domain_patterns.append(re.compile(pattern.replace("*.", r"[^/]+\.")))
|
||||
else:
|
||||
if isinstance(pattern, str):
|
||||
# Handle complex glob patterns
|
||||
if "**" in pattern:
|
||||
pattern = pattern.replace("**", ".*")
|
||||
if "{" in pattern:
|
||||
# Convert {a,b} to (a|b)
|
||||
pattern = re.sub(
|
||||
r"\{([^}]+)\}",
|
||||
lambda m: f'({"|".join(m.group(1).split(","))})',
|
||||
pattern,
|
||||
)
|
||||
pattern = fnmatch.translate(pattern)
|
||||
self._path_patterns.append(
|
||||
pattern if isinstance(pattern, Pattern) else re.compile(pattern)
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def apply(self, url: str) -> bool:
|
||||
# Quick suffix check (*.html)
|
||||
if self._simple_suffixes:
|
||||
path = url.split("?")[0]
|
||||
if path.split("/")[-1].split(".")[-1] in self._simple_suffixes:
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Domain check
|
||||
if self._domain_patterns:
|
||||
for pattern in self._domain_patterns:
|
||||
if pattern.match(url):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Prefix check (/foo/*)
|
||||
if self._simple_prefixes:
|
||||
path = url.split("?")[0]
|
||||
if any(path.startswith(p) for p in self._simple_prefixes):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Complex patterns
|
||||
if self._path_patterns:
|
||||
if any(p.search(url) for p in self._path_patterns):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
result = False
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
|
||||
class ContentTypeFilter(URLFilter):
|
||||
"""Optimized content type filter using fast lookups"""
|
||||
|
||||
__slots__ = ("allowed_types", "_ext_map", "_check_extension")
|
||||
|
||||
# Fast extension to mime type mapping
|
||||
_MIME_MAP = {
|
||||
# Text Formats
|
||||
"txt": "text/plain",
|
||||
"html": "text/html",
|
||||
"htm": "text/html",
|
||||
"xhtml": "application/xhtml+xml",
|
||||
"css": "text/css",
|
||||
"csv": "text/csv",
|
||||
"ics": "text/calendar",
|
||||
"js": "application/javascript",
|
||||
# Images
|
||||
"bmp": "image/bmp",
|
||||
"gif": "image/gif",
|
||||
"jpeg": "image/jpeg",
|
||||
"jpg": "image/jpeg",
|
||||
"png": "image/png",
|
||||
"svg": "image/svg+xml",
|
||||
"tiff": "image/tiff",
|
||||
"ico": "image/x-icon",
|
||||
"webp": "image/webp",
|
||||
# Audio
|
||||
"mp3": "audio/mpeg",
|
||||
"wav": "audio/wav",
|
||||
"ogg": "audio/ogg",
|
||||
"m4a": "audio/mp4",
|
||||
"aac": "audio/aac",
|
||||
# Video
|
||||
"mp4": "video/mp4",
|
||||
"mpeg": "video/mpeg",
|
||||
"webm": "video/webm",
|
||||
"avi": "video/x-msvideo",
|
||||
"mov": "video/quicktime",
|
||||
"flv": "video/x-flv",
|
||||
"wmv": "video/x-ms-wmv",
|
||||
"mkv": "video/x-matroska",
|
||||
# Applications
|
||||
"json": "application/json",
|
||||
"xml": "application/xml",
|
||||
"pdf": "application/pdf",
|
||||
"zip": "application/zip",
|
||||
"gz": "application/gzip",
|
||||
"tar": "application/x-tar",
|
||||
"rar": "application/vnd.rar",
|
||||
"7z": "application/x-7z-compressed",
|
||||
"exe": "application/vnd.microsoft.portable-executable",
|
||||
"msi": "application/x-msdownload",
|
||||
# Fonts
|
||||
"woff": "font/woff",
|
||||
"woff2": "font/woff2",
|
||||
"ttf": "font/ttf",
|
||||
"otf": "font/otf",
|
||||
# Microsoft Office
|
||||
"doc": "application/msword",
|
||||
"dot": "application/msword",
|
||||
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"xls": "application/vnd.ms-excel",
|
||||
"ppt": "application/vnd.ms-powerpoint",
|
||||
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
# OpenDocument Formats
|
||||
"odt": "application/vnd.oasis.opendocument.text",
|
||||
"ods": "application/vnd.oasis.opendocument.spreadsheet",
|
||||
"odp": "application/vnd.oasis.opendocument.presentation",
|
||||
# Archives
|
||||
"tar.gz": "application/gzip",
|
||||
"tgz": "application/gzip",
|
||||
"bz2": "application/x-bzip2",
|
||||
# Others
|
||||
"rtf": "application/rtf",
|
||||
"apk": "application/vnd.android.package-archive",
|
||||
"epub": "application/epub+zip",
|
||||
"jar": "application/java-archive",
|
||||
"swf": "application/x-shockwave-flash",
|
||||
"midi": "audio/midi",
|
||||
"mid": "audio/midi",
|
||||
"ps": "application/postscript",
|
||||
"ai": "application/postscript",
|
||||
"eps": "application/postscript",
|
||||
# Custom or less common
|
||||
"bin": "application/octet-stream",
|
||||
"dmg": "application/x-apple-diskimage",
|
||||
"iso": "application/x-iso9660-image",
|
||||
"deb": "application/x-debian-package",
|
||||
"rpm": "application/x-rpm",
|
||||
"sqlite": "application/vnd.sqlite3",
|
||||
# Placeholder
|
||||
"unknown": "application/octet-stream", # Fallback for unknown file types
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=1000)
|
||||
def _extract_extension(url: str) -> str:
|
||||
"""Extracts file extension from a URL."""
|
||||
# Remove scheme (http://, https://) if present
|
||||
if "://" in url:
|
||||
url = url.split("://", 1)[-1] # Get everything after '://'
|
||||
|
||||
# Remove domain (everything up to the first '/')
|
||||
path_start = url.find("/")
|
||||
path = url[path_start:] if path_start != -1 else ""
|
||||
|
||||
# Extract last filename in path
|
||||
filename = path.rsplit("/", 1)[-1] if "/" in path else ""
|
||||
|
||||
# Extract and validate extension
|
||||
if "." not in filename:
|
||||
return ""
|
||||
|
||||
return filename.rpartition(".")[-1].lower()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
allowed_types: Union[str, List[str]],
|
||||
check_extension: bool = True,
|
||||
ext_map: Dict[str, str] = _MIME_MAP,
|
||||
):
|
||||
super().__init__()
|
||||
# Normalize and store as frozenset for fast lookup
|
||||
self.allowed_types = frozenset(
|
||||
t.lower()
|
||||
for t in (
|
||||
allowed_types if isinstance(allowed_types, list) else [allowed_types]
|
||||
)
|
||||
)
|
||||
self._check_extension = check_extension
|
||||
|
||||
# Pre-compute extension map for allowed types
|
||||
self._ext_map = frozenset(
|
||||
ext
|
||||
for ext, mime in self._MIME_MAP.items()
|
||||
if any(allowed in mime for allowed in self.allowed_types)
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=1000)
|
||||
def _check_url_cached(self, url: str) -> bool:
|
||||
"""Cached URL checking"""
|
||||
if not self._check_extension:
|
||||
return True
|
||||
ext = self._extract_extension(url)
|
||||
if not ext:
|
||||
return True
|
||||
|
||||
return ext in self._ext_map
|
||||
|
||||
def apply(self, url: str) -> bool:
|
||||
"""Fast extension check with caching"""
|
||||
result = self._check_url_cached(url)
|
||||
self._update_stats(result)
|
||||
return result
|
||||
|
||||
|
||||
class DomainFilter(URLFilter):
|
||||
"""Optimized domain filter with fast lookups and caching"""
|
||||
|
||||
__slots__ = ("_allowed_domains", "_blocked_domains", "_domain_cache")
|
||||
|
||||
# Regex for fast domain extraction
|
||||
_DOMAIN_REGEX = re.compile(r"://([^/]+)")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
allowed_domains: Union[str, List[str]] = None,
|
||||
blocked_domains: Union[str, List[str]] = None,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
# Convert inputs to frozensets for immutable, fast lookups
|
||||
self._allowed_domains = (
|
||||
frozenset(self._normalize_domains(allowed_domains))
|
||||
if allowed_domains
|
||||
else None
|
||||
)
|
||||
self._blocked_domains = (
|
||||
frozenset(self._normalize_domains(blocked_domains))
|
||||
if blocked_domains
|
||||
else frozenset()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_domains(domains: Union[str, List[str]]) -> Set[str]:
|
||||
"""Fast domain normalization"""
|
||||
if isinstance(domains, str):
|
||||
return {domains.lower()}
|
||||
return {d.lower() for d in domains}
|
||||
|
||||
@staticmethod
|
||||
def _is_subdomain(domain: str, parent_domain: str) -> bool:
|
||||
"""Check if domain is a subdomain of parent_domain"""
|
||||
return domain == parent_domain or domain.endswith(f".{parent_domain}")
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_domain(url: str) -> str:
|
||||
"""Ultra-fast domain extraction with regex and caching"""
|
||||
match = DomainFilter._DOMAIN_REGEX.search(url)
|
||||
return match.group(1).lower() if match else ""
|
||||
|
||||
def apply(self, url: str) -> bool:
|
||||
"""Optimized domain checking with early returns"""
|
||||
# Skip processing if no filters
|
||||
if not self._blocked_domains and self._allowed_domains is None:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
domain = self._extract_domain(url)
|
||||
|
||||
# Check for blocked domains, including subdomains
|
||||
for blocked in self._blocked_domains:
|
||||
if self._is_subdomain(domain, blocked):
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
# If no allowed domains specified, accept all non-blocked
|
||||
if self._allowed_domains is None:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# Check if domain matches any allowed domain (including subdomains)
|
||||
for allowed in self._allowed_domains:
|
||||
if self._is_subdomain(domain, allowed):
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# No matches found
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
|
||||
class ContentRelevanceFilter(URLFilter):
|
||||
"""BM25-based relevance filter using head section content"""
|
||||
|
||||
__slots__ = ("query_terms", "threshold", "k1", "b", "avgdl")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
query: str,
|
||||
threshold: float,
|
||||
k1: float = 1.2,
|
||||
b: float = 0.75,
|
||||
avgdl: int = 1000,
|
||||
):
|
||||
super().__init__(name="BM25RelevanceFilter")
|
||||
self.query_terms = self._tokenize(query)
|
||||
self.threshold = threshold
|
||||
self.k1 = k1 # TF saturation parameter
|
||||
self.b = b # Length normalization parameter
|
||||
self.avgdl = avgdl # Average document length (empirical value)
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
head_content = await HeadPeekr.peek_html(url)
|
||||
if not head_content:
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
# Field extraction with weighting
|
||||
fields = {
|
||||
"title": HeadPeekr.get_title(head_content) or "",
|
||||
"meta": HeadPeekr.extract_meta_tags(head_content),
|
||||
}
|
||||
doc_text = self._build_document(fields)
|
||||
|
||||
score = self._bm25(doc_text)
|
||||
decision = score >= self.threshold
|
||||
self._update_stats(decision)
|
||||
return decision
|
||||
|
||||
def _build_document(self, fields: Dict) -> str:
|
||||
"""Weighted document construction"""
|
||||
return " ".join(
|
||||
[
|
||||
fields["title"] * 3, # Title weight
|
||||
fields["meta"].get("description", "") * 2,
|
||||
fields["meta"].get("keywords", ""),
|
||||
" ".join(fields["meta"].values()),
|
||||
]
|
||||
)
|
||||
|
||||
def _tokenize(self, text: str) -> List[str]:
|
||||
"""Fast case-insensitive tokenization"""
|
||||
return text.lower().split()
|
||||
|
||||
def _bm25(self, document: str) -> float:
|
||||
"""Optimized BM25 implementation for head sections"""
|
||||
doc_terms = self._tokenize(document)
|
||||
doc_len = len(doc_terms)
|
||||
tf = defaultdict(int)
|
||||
|
||||
for term in doc_terms:
|
||||
tf[term] += 1
|
||||
|
||||
score = 0.0
|
||||
for term in set(self.query_terms):
|
||||
term_freq = tf[term]
|
||||
idf = math.log((1 + 1) / (term_freq + 0.5) + 1) # Simplified IDF
|
||||
numerator = term_freq * (self.k1 + 1)
|
||||
denominator = term_freq + self.k1 * (
|
||||
1 - self.b + self.b * (doc_len / self.avgdl)
|
||||
)
|
||||
score += idf * (numerator / denominator)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
class SEOFilter(URLFilter):
|
||||
"""Quantitative SEO quality assessment filter using head section analysis"""
|
||||
|
||||
__slots__ = ("threshold", "_weights", "_kw_patterns")
|
||||
|
||||
# Based on SEMrush/Google ranking factors research
|
||||
DEFAULT_WEIGHTS = {
|
||||
"title_length": 0.15,
|
||||
"title_kw": 0.18,
|
||||
"meta_description": 0.12,
|
||||
"canonical": 0.10,
|
||||
"robot_ok": 0.20, # Most critical factor
|
||||
"schema_org": 0.10,
|
||||
"url_quality": 0.15,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
threshold: float = 0.65,
|
||||
keywords: List[str] = None,
|
||||
weights: Dict[str, float] = None,
|
||||
):
|
||||
super().__init__(name="SEOFilter")
|
||||
self.threshold = threshold
|
||||
self._weights = weights or self.DEFAULT_WEIGHTS
|
||||
self._kw_patterns = (
|
||||
re.compile(
|
||||
r"\b({})\b".format("|".join(map(re.escape, keywords or []))), re.I
|
||||
)
|
||||
if keywords
|
||||
else None
|
||||
)
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
head_content = await HeadPeekr.peek_html(url)
|
||||
if not head_content:
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
meta = HeadPeekr.extract_meta_tags(head_content)
|
||||
title = HeadPeekr.get_title(head_content) or ""
|
||||
parsed_url = urlparse(url)
|
||||
|
||||
scores = {
|
||||
"title_length": self._score_title_length(title),
|
||||
"title_kw": self._score_keyword_presence(title),
|
||||
"meta_description": self._score_meta_description(
|
||||
meta.get("description", "")
|
||||
),
|
||||
"canonical": self._score_canonical(meta.get("canonical"), url),
|
||||
"robot_ok": 1.0 if "noindex" not in meta.get("robots", "") else 0.0,
|
||||
"schema_org": self._score_schema_org(head_content),
|
||||
"url_quality": self._score_url_quality(parsed_url),
|
||||
}
|
||||
|
||||
total_score = sum(
|
||||
weight * scores[factor] for factor, weight in self._weights.items()
|
||||
)
|
||||
|
||||
decision = total_score >= self.threshold
|
||||
self._update_stats(decision)
|
||||
return decision
|
||||
|
||||
def _score_title_length(self, title: str) -> float:
|
||||
length = len(title)
|
||||
if 50 <= length <= 60:
|
||||
return 1.0
|
||||
if 40 <= length < 50 or 60 < length <= 70:
|
||||
return 0.7
|
||||
return 0.3 # Poor length
|
||||
|
||||
def _score_keyword_presence(self, text: str) -> float:
|
||||
if not self._kw_patterns:
|
||||
return 0.0
|
||||
matches = len(self._kw_patterns.findall(text))
|
||||
return min(matches * 0.3, 1.0) # Max 3 matches
|
||||
|
||||
def _score_meta_description(self, desc: str) -> float:
|
||||
length = len(desc)
|
||||
if 140 <= length <= 160:
|
||||
return 1.0
|
||||
return 0.5 if 120 <= length <= 200 else 0.2
|
||||
|
||||
def _score_canonical(self, canonical: str, original: str) -> float:
|
||||
if not canonical:
|
||||
return 0.5 # Neutral score
|
||||
return 1.0 if canonical == original else 0.2
|
||||
|
||||
def _score_schema_org(self, html: str) -> float:
|
||||
# Detect any schema.org markup in head
|
||||
return (
|
||||
1.0
|
||||
if re.search(r'<script[^>]+type=["\']application/ld\+json', html)
|
||||
else 0.0
|
||||
)
|
||||
|
||||
def _score_url_quality(self, parsed_url) -> float:
|
||||
score = 1.0
|
||||
path = parsed_url.path.lower()
|
||||
|
||||
# Penalty factors
|
||||
if len(path) > 80:
|
||||
score *= 0.7
|
||||
if re.search(r"\d{4}", path):
|
||||
score *= 0.8 # Numbers in path
|
||||
if parsed_url.query:
|
||||
score *= 0.6 # URL parameters
|
||||
if "_" in path:
|
||||
score *= 0.9 # Underscores vs hyphens
|
||||
|
||||
return score
|
||||
519
crawl4ai/deep_crawling/scorers.py
Normal file
519
crawl4ai/deep_crawling/scorers.py
Normal file
@@ -0,0 +1,519 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Optional
|
||||
from dataclasses import dataclass
|
||||
from urllib.parse import urlparse, unquote
|
||||
import re
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
from array import array
|
||||
import ctypes
|
||||
import platform
|
||||
PLATFORM = platform.system()
|
||||
|
||||
# Pre-computed scores for common year differences
|
||||
_SCORE_LOOKUP = [1.0, 0.5, 0.3333333333333333, 0.25]
|
||||
|
||||
# Pre-computed scores for common year differences
|
||||
_FRESHNESS_SCORES = [
|
||||
1.0, # Current year
|
||||
0.9, # Last year
|
||||
0.8, # 2 years ago
|
||||
0.7, # 3 years ago
|
||||
0.6, # 4 years ago
|
||||
0.5, # 5 years ago
|
||||
]
|
||||
|
||||
class ScoringStats:
|
||||
__slots__ = ('_urls_scored', '_total_score', '_min_score', '_max_score')
|
||||
|
||||
def __init__(self):
|
||||
self._urls_scored = 0
|
||||
self._total_score = 0.0
|
||||
self._min_score = None # Lazy initialization
|
||||
self._max_score = None
|
||||
|
||||
def update(self, score: float) -> None:
|
||||
"""Optimized update with minimal operations"""
|
||||
self._urls_scored += 1
|
||||
self._total_score += score
|
||||
|
||||
# Lazy min/max tracking - only if actually accessed
|
||||
if self._min_score is not None:
|
||||
if score < self._min_score:
|
||||
self._min_score = score
|
||||
if self._max_score is not None:
|
||||
if score > self._max_score:
|
||||
self._max_score = score
|
||||
|
||||
def get_average(self) -> float:
|
||||
"""Direct calculation instead of property"""
|
||||
return self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
|
||||
def get_min(self) -> float:
|
||||
"""Lazy min calculation"""
|
||||
if self._min_score is None:
|
||||
self._min_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
return self._min_score
|
||||
|
||||
def get_max(self) -> float:
|
||||
"""Lazy max calculation"""
|
||||
if self._max_score is None:
|
||||
self._max_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
return self._max_score
|
||||
class URLScorer(ABC):
|
||||
__slots__ = ('_weight', '_stats')
|
||||
|
||||
def __init__(self, weight: float = 1.0):
|
||||
# Store weight directly as float32 for memory efficiency
|
||||
self._weight = ctypes.c_float(weight).value
|
||||
self._stats = ScoringStats()
|
||||
|
||||
@abstractmethod
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate raw score for URL."""
|
||||
pass
|
||||
|
||||
def score(self, url: str) -> float:
|
||||
"""Calculate weighted score with minimal overhead."""
|
||||
score = self._calculate_score(url) * self._weight
|
||||
self._stats.update(score)
|
||||
return score
|
||||
|
||||
@property
|
||||
def stats(self):
|
||||
"""Access to scoring statistics."""
|
||||
return self._stats
|
||||
|
||||
@property
|
||||
def weight(self):
|
||||
return self._weight
|
||||
|
||||
class CompositeScorer(URLScorer):
|
||||
__slots__ = ('_scorers', '_normalize', '_weights_array', '_score_array')
|
||||
|
||||
def __init__(self, scorers: List[URLScorer], normalize: bool = True):
|
||||
"""Initialize composite scorer combining multiple scoring strategies.
|
||||
|
||||
Optimized for:
|
||||
- Fast parallel scoring
|
||||
- Memory efficient score aggregation
|
||||
- Quick short-circuit conditions
|
||||
- Pre-allocated arrays
|
||||
|
||||
Args:
|
||||
scorers: List of scoring strategies to combine
|
||||
normalize: Whether to normalize final score by scorer count
|
||||
"""
|
||||
super().__init__(weight=1.0)
|
||||
self._scorers = scorers
|
||||
self._normalize = normalize
|
||||
|
||||
# Pre-allocate arrays for scores and weights
|
||||
self._weights_array = array('f', [s.weight for s in scorers])
|
||||
self._score_array = array('f', [0.0] * len(scorers))
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate combined score from all scoring strategies.
|
||||
|
||||
Uses:
|
||||
1. Pre-allocated arrays for scores
|
||||
2. Short-circuit on zero scores
|
||||
3. Optimized normalization
|
||||
4. Vectorized operations where possible
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Combined and optionally normalized score
|
||||
"""
|
||||
total_score = 0.0
|
||||
scores = self._score_array
|
||||
|
||||
# Get scores from all scorers
|
||||
for i, scorer in enumerate(self._scorers):
|
||||
# Use public score() method which applies weight
|
||||
scores[i] = scorer.score(url)
|
||||
total_score += scores[i]
|
||||
|
||||
# Normalize if requested
|
||||
if self._normalize and self._scorers:
|
||||
count = len(self._scorers)
|
||||
return total_score / count
|
||||
|
||||
return total_score
|
||||
|
||||
def score(self, url: str) -> float:
|
||||
"""Public scoring interface with stats tracking.
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Final combined score
|
||||
"""
|
||||
score = self._calculate_score(url)
|
||||
self.stats.update(score)
|
||||
return score
|
||||
|
||||
class KeywordRelevanceScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_stats', '_keywords', '_case_sensitive')
|
||||
|
||||
def __init__(self, keywords: List[str], weight: float = 1.0, case_sensitive: bool = False):
|
||||
super().__init__(weight=weight)
|
||||
self._case_sensitive = case_sensitive
|
||||
# Pre-process keywords once
|
||||
self._keywords = [k if case_sensitive else k.lower() for k in keywords]
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _url_bytes(self, url: str) -> bytes:
|
||||
"""Cache decoded URL bytes"""
|
||||
return url.encode('utf-8') if self._case_sensitive else url.lower().encode('utf-8')
|
||||
|
||||
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Fast string matching without regex or byte conversion"""
|
||||
if not self._case_sensitive:
|
||||
url = url.lower()
|
||||
|
||||
matches = sum(1 for k in self._keywords if k in url)
|
||||
|
||||
# Fast return paths
|
||||
if not matches:
|
||||
return 0.0
|
||||
if matches == len(self._keywords):
|
||||
return 1.0
|
||||
|
||||
return matches / len(self._keywords)
|
||||
|
||||
class PathDepthScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_stats', '_optimal_depth') # Remove _url_cache
|
||||
|
||||
def __init__(self, optimal_depth: int = 3, weight: float = 1.0):
|
||||
super().__init__(weight=weight)
|
||||
self._optimal_depth = optimal_depth
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _quick_depth(path: str) -> int:
|
||||
"""Ultra fast path depth calculation.
|
||||
|
||||
Examples:
|
||||
- "http://example.com" -> 0 # No path segments
|
||||
- "http://example.com/" -> 0 # Empty path
|
||||
- "http://example.com/a" -> 1
|
||||
- "http://example.com/a/b" -> 2
|
||||
"""
|
||||
if not path or path == '/':
|
||||
return 0
|
||||
|
||||
if '/' not in path:
|
||||
return 0
|
||||
|
||||
depth = 0
|
||||
last_was_slash = True
|
||||
|
||||
for c in path:
|
||||
if c == '/':
|
||||
if not last_was_slash:
|
||||
depth += 1
|
||||
last_was_slash = True
|
||||
else:
|
||||
last_was_slash = False
|
||||
|
||||
if not last_was_slash:
|
||||
depth += 1
|
||||
|
||||
return depth
|
||||
|
||||
@lru_cache(maxsize=10000) # Cache the whole calculation
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
pos = url.find('/', url.find('://') + 3)
|
||||
if pos == -1:
|
||||
depth = 0
|
||||
else:
|
||||
depth = self._quick_depth(url[pos:])
|
||||
|
||||
# Use lookup table for common distances
|
||||
distance = depth - self._optimal_depth
|
||||
distance = distance if distance >= 0 else -distance # Faster than abs()
|
||||
|
||||
if distance < 4:
|
||||
return _SCORE_LOOKUP[distance]
|
||||
|
||||
return 1.0 / (1.0 + distance)
|
||||
|
||||
class ContentTypeScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_exact_types', '_regex_types')
|
||||
|
||||
def __init__(self, type_weights: Dict[str, float], weight: float = 1.0):
|
||||
"""Initialize scorer with type weights map.
|
||||
|
||||
Args:
|
||||
type_weights: Dict mapping file extensions/patterns to scores (e.g. {'.html$': 1.0})
|
||||
weight: Overall weight multiplier for this scorer
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
self._exact_types = {} # Fast lookup for simple extensions
|
||||
self._regex_types = [] # Fallback for complex patterns
|
||||
|
||||
# Split into exact vs regex matchers for performance
|
||||
for pattern, score in type_weights.items():
|
||||
if pattern.startswith('.') and pattern.endswith('$'):
|
||||
ext = pattern[1:-1]
|
||||
self._exact_types[ext] = score
|
||||
else:
|
||||
self._regex_types.append((re.compile(pattern), score))
|
||||
|
||||
# Sort complex patterns by score for early exit
|
||||
self._regex_types.sort(key=lambda x: -x[1])
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _quick_extension(url: str) -> str:
|
||||
"""Extract file extension ultra-fast without regex/splits.
|
||||
|
||||
Handles:
|
||||
- Basic extensions: "example.html" -> "html"
|
||||
- Query strings: "page.php?id=1" -> "php"
|
||||
- Fragments: "doc.pdf#page=1" -> "pdf"
|
||||
- Path params: "file.jpg;width=100" -> "jpg"
|
||||
|
||||
Args:
|
||||
url: URL to extract extension from
|
||||
|
||||
Returns:
|
||||
Extension without dot, or empty string if none found
|
||||
"""
|
||||
pos = url.rfind('.')
|
||||
if pos == -1:
|
||||
return ''
|
||||
|
||||
# Find first non-alphanumeric char after extension
|
||||
end = len(url)
|
||||
for i in range(pos + 1, len(url)):
|
||||
c = url[i]
|
||||
# Stop at query string, fragment, path param or any non-alphanumeric
|
||||
if c in '?#;' or not c.isalnum():
|
||||
end = i
|
||||
break
|
||||
|
||||
return url[pos + 1:end].lower()
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate content type score for URL.
|
||||
|
||||
Uses staged approach:
|
||||
1. Try exact extension match (fast path)
|
||||
2. Fall back to regex patterns if needed
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
# Fast path: direct extension lookup
|
||||
ext = self._quick_extension(url)
|
||||
if ext:
|
||||
score = self._exact_types.get(ext, None)
|
||||
if score is not None:
|
||||
return score
|
||||
|
||||
# Slow path: regex patterns
|
||||
for pattern, score in self._regex_types:
|
||||
if pattern.search(url):
|
||||
return score
|
||||
|
||||
return 0.0
|
||||
|
||||
class FreshnessScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_date_pattern', '_current_year')
|
||||
|
||||
def __init__(self, weight: float = 1.0, current_year: int = 2024):
|
||||
"""Initialize freshness scorer.
|
||||
|
||||
Extracts and scores dates from URLs using format:
|
||||
- YYYY/MM/DD
|
||||
- YYYY-MM-DD
|
||||
- YYYY_MM_DD
|
||||
- YYYY (year only)
|
||||
|
||||
Args:
|
||||
weight: Score multiplier
|
||||
current_year: Year to calculate freshness against (default 2024)
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
self._current_year = current_year
|
||||
|
||||
# Combined pattern for all date formats
|
||||
# Uses non-capturing groups (?:) and alternation
|
||||
self._date_pattern = re.compile(
|
||||
r'(?:/' # Path separator
|
||||
r'|[-_])' # or date separators
|
||||
r'((?:19|20)\d{2})' # Year group (1900-2099)
|
||||
r'(?:' # Optional month/day group
|
||||
r'(?:/|[-_])' # Date separator
|
||||
r'(?:\d{2})' # Month
|
||||
r'(?:' # Optional day
|
||||
r'(?:/|[-_])' # Date separator
|
||||
r'(?:\d{2})' # Day
|
||||
r')?' # Day is optional
|
||||
r')?' # Month/day group is optional
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_year(self, url: str) -> Optional[int]:
|
||||
"""Extract the most recent year from URL.
|
||||
|
||||
Args:
|
||||
url: URL to extract year from
|
||||
|
||||
Returns:
|
||||
Year as int or None if no valid year found
|
||||
"""
|
||||
matches = self._date_pattern.finditer(url)
|
||||
latest_year = None
|
||||
|
||||
# Find most recent year
|
||||
for match in matches:
|
||||
year = int(match.group(1))
|
||||
if (year <= self._current_year and # Sanity check
|
||||
(latest_year is None or year > latest_year)):
|
||||
latest_year = year
|
||||
|
||||
return latest_year
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate freshness score based on URL date.
|
||||
|
||||
More recent years score higher. Uses pre-computed scoring
|
||||
table for common year differences.
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
year = self._extract_year(url)
|
||||
if year is None:
|
||||
return 0.5 # Default score
|
||||
|
||||
# Use lookup table for common year differences
|
||||
year_diff = self._current_year - year
|
||||
if year_diff < len(_FRESHNESS_SCORES):
|
||||
return _FRESHNESS_SCORES[year_diff]
|
||||
|
||||
# Fallback calculation for older content
|
||||
return max(0.1, 1.0 - year_diff * 0.1)
|
||||
|
||||
class DomainAuthorityScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_domain_weights', '_default_weight', '_top_domains')
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
domain_weights: Dict[str, float],
|
||||
default_weight: float = 0.5,
|
||||
weight: float = 1.0,
|
||||
):
|
||||
"""Initialize domain authority scorer.
|
||||
|
||||
Args:
|
||||
domain_weights: Dict mapping domains to authority scores
|
||||
default_weight: Score for unknown domains
|
||||
weight: Overall scorer weight multiplier
|
||||
|
||||
Example:
|
||||
{
|
||||
'python.org': 1.0,
|
||||
'github.com': 0.9,
|
||||
'medium.com': 0.7
|
||||
}
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
|
||||
# Pre-process domains for faster lookup
|
||||
self._domain_weights = {
|
||||
domain.lower(): score
|
||||
for domain, score in domain_weights.items()
|
||||
}
|
||||
self._default_weight = default_weight
|
||||
|
||||
# Cache top domains for fast path
|
||||
self._top_domains = {
|
||||
domain: score
|
||||
for domain, score in sorted(
|
||||
domain_weights.items(),
|
||||
key=lambda x: -x[1]
|
||||
)[:5] # Keep top 5 highest scoring domains
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_domain(url: str) -> str:
|
||||
"""Extract domain from URL ultra-fast.
|
||||
|
||||
Handles:
|
||||
- Basic domains: "example.com"
|
||||
- Subdomains: "sub.example.com"
|
||||
- Ports: "example.com:8080"
|
||||
- IPv4: "192.168.1.1"
|
||||
|
||||
Args:
|
||||
url: Full URL to extract domain from
|
||||
|
||||
Returns:
|
||||
Lowercase domain without port
|
||||
"""
|
||||
# Find domain start
|
||||
start = url.find('://')
|
||||
if start == -1:
|
||||
start = 0
|
||||
else:
|
||||
start += 3
|
||||
|
||||
# Find domain end
|
||||
end = url.find('/', start)
|
||||
if end == -1:
|
||||
end = url.find('?', start)
|
||||
if end == -1:
|
||||
end = url.find('#', start)
|
||||
if end == -1:
|
||||
end = len(url)
|
||||
|
||||
# Extract domain and remove port
|
||||
domain = url[start:end]
|
||||
port_idx = domain.rfind(':')
|
||||
if port_idx != -1:
|
||||
domain = domain[:port_idx]
|
||||
|
||||
return domain.lower()
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate domain authority score.
|
||||
|
||||
Uses staged approach:
|
||||
1. Check top domains (fastest)
|
||||
2. Check full domain weights
|
||||
3. Return default weight
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Authority score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
domain = self._extract_domain(url)
|
||||
|
||||
# Fast path: check top domains first
|
||||
score = self._top_domains.get(domain)
|
||||
if score is not None:
|
||||
return score
|
||||
|
||||
# Regular path: check all domains
|
||||
return self._domain_weights.get(domain, self._default_weight)
|
||||
170
crawl4ai/docker_client.py
Normal file
170
crawl4ai/docker_client.py
Normal file
@@ -0,0 +1,170 @@
|
||||
from typing import List, Optional, Union, AsyncGenerator, Dict, Any
|
||||
import httpx
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
import asyncio
|
||||
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .models import CrawlResult
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
|
||||
|
||||
class Crawl4aiClientError(Exception):
|
||||
"""Base exception for Crawl4ai Docker client errors."""
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionError(Crawl4aiClientError):
|
||||
"""Raised when connection to the Docker server fails."""
|
||||
pass
|
||||
|
||||
|
||||
class RequestError(Crawl4aiClientError):
|
||||
"""Raised when the server returns an error response."""
|
||||
pass
|
||||
|
||||
|
||||
class Crawl4aiDockerClient:
|
||||
"""Client for interacting with Crawl4AI Docker server with token authentication."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str = "http://localhost:8000",
|
||||
timeout: float = 30.0,
|
||||
verify_ssl: bool = True,
|
||||
verbose: bool = True,
|
||||
log_file: Optional[str] = None
|
||||
):
|
||||
self.base_url = base_url.rstrip('/')
|
||||
self.timeout = timeout
|
||||
self.logger = AsyncLogger(log_file=log_file, log_level=LogLevel.DEBUG, verbose=verbose)
|
||||
self._http_client = httpx.AsyncClient(
|
||||
timeout=timeout,
|
||||
verify=verify_ssl,
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
self._token: Optional[str] = None
|
||||
|
||||
async def authenticate(self, email: str) -> None:
|
||||
"""Authenticate with the server and store the token."""
|
||||
url = urljoin(self.base_url, "/token")
|
||||
try:
|
||||
self.logger.info(f"Authenticating with email: {email}", tag="AUTH")
|
||||
response = await self._http_client.post(url, json={"email": email})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
self._token = data["access_token"]
|
||||
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
|
||||
self.logger.success("Authentication successful", tag="AUTH")
|
||||
except (httpx.RequestError, httpx.HTTPStatusError) as e:
|
||||
error_msg = f"Authentication failed: {str(e)}"
|
||||
self.logger.error(error_msg, tag="ERROR")
|
||||
raise ConnectionError(error_msg)
|
||||
|
||||
async def _check_server(self) -> None:
|
||||
"""Check if server is reachable, raising an error if not."""
|
||||
try:
|
||||
await self._http_client.get(urljoin(self.base_url, "/health"))
|
||||
self.logger.success(f"Connected to {self.base_url}", tag="READY")
|
||||
except httpx.RequestError as e:
|
||||
self.logger.error(f"Server unreachable: {str(e)}", tag="ERROR")
|
||||
raise ConnectionError(f"Cannot connect to server: {str(e)}")
|
||||
|
||||
def _prepare_request(self, urls: List[str], browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None) -> Dict[str, Any]:
|
||||
"""Prepare request data from configs."""
|
||||
return {
|
||||
"urls": urls,
|
||||
"browser_config": browser_config.dump() if browser_config else {},
|
||||
"crawler_config": crawler_config.dump() if crawler_config else {}
|
||||
}
|
||||
|
||||
async def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
|
||||
"""Make an HTTP request with error handling."""
|
||||
url = urljoin(self.base_url, endpoint)
|
||||
try:
|
||||
response = await self._http_client.request(method, url, **kwargs)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except httpx.TimeoutException as e:
|
||||
raise ConnectionError(f"Request timed out: {str(e)}")
|
||||
except httpx.RequestError as e:
|
||||
raise ConnectionError(f"Failed to connect: {str(e)}")
|
||||
except httpx.HTTPStatusError as e:
|
||||
error_msg = (e.response.json().get("detail", str(e))
|
||||
if "application/json" in e.response.headers.get("content-type", "")
|
||||
else str(e))
|
||||
raise RequestError(f"Server error {e.response.status_code}: {error_msg}")
|
||||
|
||||
async def crawl(
|
||||
self,
|
||||
urls: List[str],
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None
|
||||
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
|
||||
"""Execute a crawl operation."""
|
||||
if not self._token:
|
||||
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||
await self._check_server()
|
||||
|
||||
data = self._prepare_request(urls, browser_config, crawler_config)
|
||||
is_streaming = crawler_config and crawler_config.stream
|
||||
|
||||
self.logger.info(f"Crawling {len(urls)} URLs {'(streaming)' if is_streaming else ''}", tag="CRAWL")
|
||||
|
||||
if is_streaming:
|
||||
async def stream_results() -> AsyncGenerator[CrawlResult, None]:
|
||||
async with self._http_client.stream("POST", f"{self.base_url}/crawl/stream", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.aiter_lines():
|
||||
if line.strip():
|
||||
result = json.loads(line)
|
||||
if "error" in result:
|
||||
self.logger.error_status(url=result.get("url", "unknown"), error=result["error"])
|
||||
continue
|
||||
self.logger.url_status(url=result.get("url", "unknown"), success=True, timing=result.get("timing", 0.0))
|
||||
if result.get("status") == "completed":
|
||||
continue
|
||||
else:
|
||||
yield CrawlResult(**result)
|
||||
return stream_results()
|
||||
|
||||
response = await self._request("POST", "/crawl", json=data)
|
||||
result_data = response.json()
|
||||
if not result_data.get("success", False):
|
||||
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
|
||||
|
||||
results = [CrawlResult(**r) for r in result_data.get("results", [])]
|
||||
self.logger.success(f"Crawl completed with {len(results)} results", tag="CRAWL")
|
||||
return results[0] if len(results) == 1 else results
|
||||
|
||||
async def get_schema(self) -> Dict[str, Any]:
|
||||
"""Retrieve configuration schemas."""
|
||||
if not self._token:
|
||||
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||
response = await self._request("GET", "/schema")
|
||||
return response.json()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the HTTP client session."""
|
||||
self.logger.info("Closing client", tag="CLOSE")
|
||||
await self._http_client.aclose()
|
||||
|
||||
async def __aenter__(self) -> "Crawl4aiDockerClient":
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Optional[Any]) -> None:
|
||||
await self.close()
|
||||
|
||||
|
||||
# Example usage
|
||||
async def main():
|
||||
async with Crawl4aiDockerClient(verbose=True) as client:
|
||||
await client.authenticate("user@example.com")
|
||||
result = await client.crawl(["https://example.com"])
|
||||
print(result)
|
||||
schema = await client.get_schema()
|
||||
print(schema)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,13 +1,14 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import inspect
|
||||
from typing import Any, List, Dict, Optional
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
|
||||
from .prompts import PROMPT_EXTRACT_BLOCKS, PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION, PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION, JSON_SCHEMA_BUILDER_XPATH
|
||||
from .prompts import PROMPT_EXTRACT_BLOCKS, PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION, PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION, JSON_SCHEMA_BUILDER_XPATH, PROMPT_EXTRACT_INFERRED_SCHEMA
|
||||
from .config import (
|
||||
DEFAULT_PROVIDER, PROVIDER_MODELS,
|
||||
DEFAULT_PROVIDER,
|
||||
DEFAULT_PROVIDER_API_KEY,
|
||||
CHUNK_TOKEN_THRESHOLD,
|
||||
OVERLAP_RATE,
|
||||
WORD_TOKEN_RATE,
|
||||
@@ -21,6 +22,7 @@ from .utils import (
|
||||
extract_xml_data,
|
||||
split_and_parse_json_objects,
|
||||
sanitize_input_encode,
|
||||
merge_chunks,
|
||||
)
|
||||
from .models import * # noqa: F403
|
||||
|
||||
@@ -34,8 +36,9 @@ from .model_loader import (
|
||||
calculate_batch_size
|
||||
)
|
||||
|
||||
from .types import LLMConfig, create_llm_config
|
||||
|
||||
from functools import partial
|
||||
import math
|
||||
import numpy as np
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -477,8 +480,7 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
A strategy that uses an LLM to extract meaningful content from the HTML.
|
||||
|
||||
Attributes:
|
||||
provider: The provider to use for extraction. It follows the format <provider_name>/<model_name>, e.g., "ollama/llama3.3".
|
||||
api_token: The API token for the provider.
|
||||
llm_config: The LLM configuration object.
|
||||
instruction: The instruction to use for the LLM model.
|
||||
schema: Pydantic model schema for structured data.
|
||||
extraction_type: "block" or "schema".
|
||||
@@ -486,29 +488,41 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
overlap_rate: Overlap between chunks.
|
||||
word_token_rate: Word to token conversion rate.
|
||||
apply_chunking: Whether to apply chunking.
|
||||
base_url: The base URL for the API request.
|
||||
api_base: The base URL for the API request.
|
||||
extra_args: Additional arguments for the API request, such as temprature, max_tokens, etc.
|
||||
verbose: Whether to print verbose output.
|
||||
usages: List of individual token usages.
|
||||
total_usage: Accumulated token usage.
|
||||
"""
|
||||
|
||||
_UNWANTED_PROPS = {
|
||||
'provider' : 'Instead, use llm_config=LLMConfig(provider="...")',
|
||||
'api_token' : 'Instead, use llm_config=LlMConfig(api_token="...")',
|
||||
'base_url' : 'Instead, use llm_config=LLMConfig(base_url="...")',
|
||||
'api_base' : 'Instead, use llm_config=LLMConfig(base_url="...")',
|
||||
}
|
||||
def __init__(
|
||||
self,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
llm_config: 'LLMConfig' = None,
|
||||
instruction: str = None,
|
||||
schema: Dict = None,
|
||||
extraction_type="block",
|
||||
chunk_token_threshold=CHUNK_TOKEN_THRESHOLD,
|
||||
overlap_rate=OVERLAP_RATE,
|
||||
word_token_rate=WORD_TOKEN_RATE,
|
||||
apply_chunking=True,
|
||||
input_format: str = "markdown",
|
||||
force_json_response=False,
|
||||
verbose=False,
|
||||
# Deprecated arguments
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
base_url: str = None,
|
||||
api_base: str = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initialize the strategy with clustering parameters.
|
||||
|
||||
Args:
|
||||
provider: The provider to use for extraction. It follows the format <provider_name>/<model_name>, e.g., "ollama/llama3.3".
|
||||
api_token: The API token for the provider.
|
||||
llm_config: The LLM configuration object.
|
||||
instruction: The instruction to use for the LLM model.
|
||||
schema: Pydantic model schema for structured data.
|
||||
extraction_type: "block" or "schema".
|
||||
@@ -516,48 +530,59 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
overlap_rate: Overlap between chunks.
|
||||
word_token_rate: Word to token conversion rate.
|
||||
apply_chunking: Whether to apply chunking.
|
||||
input_format: Content format to use for extraction.
|
||||
Options: "markdown" (default), "html", "fit_markdown"
|
||||
force_json_response: Whether to force a JSON response from the LLM.
|
||||
verbose: Whether to print verbose output.
|
||||
|
||||
# Deprecated arguments, will be removed very soon
|
||||
provider: The provider to use for extraction. It follows the format <provider_name>/<model_name>, e.g., "ollama/llama3.3".
|
||||
api_token: The API token for the provider.
|
||||
base_url: The base URL for the API request.
|
||||
api_base: The base URL for the API request.
|
||||
extra_args: Additional arguments for the API request, such as temprature, max_tokens, etc.
|
||||
verbose: Whether to print verbose output.
|
||||
usages: List of individual token usages.
|
||||
total_usage: Accumulated token usage.
|
||||
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.provider = provider
|
||||
self.api_token = (
|
||||
api_token
|
||||
or PROVIDER_MODELS.get(provider, "no-token")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
)
|
||||
super().__init__( input_format=input_format, **kwargs)
|
||||
self.llm_config = llm_config
|
||||
if not self.llm_config:
|
||||
self.llm_config = create_llm_config(
|
||||
provider=DEFAULT_PROVIDER,
|
||||
api_token=os.environ.get(DEFAULT_PROVIDER_API_KEY),
|
||||
)
|
||||
self.instruction = instruction
|
||||
self.extract_type = extraction_type
|
||||
self.schema = schema
|
||||
if schema:
|
||||
self.extract_type = "schema"
|
||||
|
||||
self.chunk_token_threshold = kwargs.get(
|
||||
"chunk_token_threshold", CHUNK_TOKEN_THRESHOLD
|
||||
)
|
||||
self.overlap_rate = kwargs.get("overlap_rate", OVERLAP_RATE)
|
||||
self.word_token_rate = kwargs.get("word_token_rate", WORD_TOKEN_RATE)
|
||||
self.apply_chunking = kwargs.get("apply_chunking", True)
|
||||
self.base_url = kwargs.get("base_url", None)
|
||||
self.api_base = kwargs.get("api_base", kwargs.get("base_url", None))
|
||||
self.force_json_response = force_json_response
|
||||
self.chunk_token_threshold = chunk_token_threshold or CHUNK_TOKEN_THRESHOLD
|
||||
self.overlap_rate = overlap_rate
|
||||
self.word_token_rate = word_token_rate
|
||||
self.apply_chunking = apply_chunking
|
||||
self.extra_args = kwargs.get("extra_args", {})
|
||||
if not self.apply_chunking:
|
||||
self.chunk_token_threshold = 1e9
|
||||
|
||||
self.verbose = kwargs.get("verbose", False)
|
||||
self.verbose = verbose
|
||||
self.usages = [] # Store individual usages
|
||||
self.total_usage = TokenUsage() # Accumulated usage
|
||||
|
||||
if not self.api_token:
|
||||
raise ValueError(
|
||||
"API token must be provided for LLMExtractionStrategy. Update the config.py or set OPENAI_API_KEY environment variable."
|
||||
)
|
||||
self.provider = provider
|
||||
self.api_token = api_token
|
||||
self.base_url = base_url
|
||||
self.api_base = api_base
|
||||
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
"""Handle attribute setting."""
|
||||
# TODO: Planning to set properties dynamically based on the __init__ signature
|
||||
sig = inspect.signature(self.__init__)
|
||||
all_params = sig.parameters # Dictionary of parameter names and their details
|
||||
|
||||
if name in self._UNWANTED_PROPS and value is not all_params[name].default:
|
||||
raise AttributeError(f"Setting '{name}' is deprecated. {self._UNWANTED_PROPS[name]}")
|
||||
|
||||
super().__setattr__(name, value)
|
||||
|
||||
def extract(self, url: str, ix: int, html: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract meaningful blocks or chunks from the given HTML using an LLM.
|
||||
@@ -590,115 +615,111 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION
|
||||
|
||||
if self.extract_type == "schema" and self.schema:
|
||||
variable_values["SCHEMA"] = json.dumps(self.schema, indent=2)
|
||||
variable_values["SCHEMA"] = json.dumps(self.schema, indent=2) # if type of self.schema is dict else self.schema
|
||||
prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION
|
||||
|
||||
if self.extract_type == "schema" and not self.schema:
|
||||
prompt_with_variables = PROMPT_EXTRACT_INFERRED_SCHEMA
|
||||
|
||||
for variable in variable_values:
|
||||
prompt_with_variables = prompt_with_variables.replace(
|
||||
"{" + variable + "}", variable_values[variable]
|
||||
)
|
||||
|
||||
response = perform_completion_with_backoff(
|
||||
self.provider,
|
||||
prompt_with_variables,
|
||||
self.api_token,
|
||||
base_url=self.api_base or self.base_url,
|
||||
extra_args=self.extra_args,
|
||||
) # , json_response=self.extract_type == "schema")
|
||||
# Track usage
|
||||
usage = TokenUsage(
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
total_tokens=response.usage.total_tokens,
|
||||
completion_tokens_details=response.usage.completion_tokens_details.__dict__
|
||||
if response.usage.completion_tokens_details
|
||||
else {},
|
||||
prompt_tokens_details=response.usage.prompt_tokens_details.__dict__
|
||||
if response.usage.prompt_tokens_details
|
||||
else {},
|
||||
)
|
||||
self.usages.append(usage)
|
||||
|
||||
# Update totals
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
|
||||
try:
|
||||
blocks = extract_xml_data(["blocks"], response.choices[0].message.content)[
|
||||
"blocks"
|
||||
]
|
||||
blocks = json.loads(blocks)
|
||||
for block in blocks:
|
||||
block["error"] = False
|
||||
except Exception:
|
||||
parsed, unparsed = split_and_parse_json_objects(
|
||||
response.choices[0].message.content
|
||||
response = perform_completion_with_backoff(
|
||||
self.llm_config.provider,
|
||||
prompt_with_variables,
|
||||
self.llm_config.api_token,
|
||||
base_url=self.llm_config.base_url,
|
||||
json_response=self.force_json_response,
|
||||
extra_args=self.extra_args,
|
||||
) # , json_response=self.extract_type == "schema")
|
||||
# Track usage
|
||||
usage = TokenUsage(
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
total_tokens=response.usage.total_tokens,
|
||||
completion_tokens_details=response.usage.completion_tokens_details.__dict__
|
||||
if response.usage.completion_tokens_details
|
||||
else {},
|
||||
prompt_tokens_details=response.usage.prompt_tokens_details.__dict__
|
||||
if response.usage.prompt_tokens_details
|
||||
else {},
|
||||
)
|
||||
blocks = parsed
|
||||
if unparsed:
|
||||
blocks.append(
|
||||
{"index": 0, "error": True, "tags": ["error"], "content": unparsed}
|
||||
self.usages.append(usage)
|
||||
|
||||
# Update totals
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
|
||||
try:
|
||||
response = response.choices[0].message.content
|
||||
blocks = None
|
||||
|
||||
if self.force_json_response:
|
||||
blocks = json.loads(response)
|
||||
if isinstance(blocks, dict):
|
||||
# If it has only one key which calue is list then assign that to blocks, exampled: {"news": [..]}
|
||||
if len(blocks) == 1 and isinstance(list(blocks.values())[0], list):
|
||||
blocks = list(blocks.values())[0]
|
||||
else:
|
||||
# If it has only one key which value is not list then assign that to blocks, exampled: { "article_id": "1234", ... }
|
||||
blocks = [blocks]
|
||||
elif isinstance(blocks, list):
|
||||
# If it is a list then assign that to blocks
|
||||
blocks = blocks
|
||||
else:
|
||||
# blocks = extract_xml_data(["blocks"], response.choices[0].message.content)["blocks"]
|
||||
blocks = extract_xml_data(["blocks"], response)["blocks"]
|
||||
blocks = json.loads(blocks)
|
||||
|
||||
for block in blocks:
|
||||
block["error"] = False
|
||||
except Exception:
|
||||
parsed, unparsed = split_and_parse_json_objects(
|
||||
response.choices[0].message.content
|
||||
)
|
||||
blocks = parsed
|
||||
if unparsed:
|
||||
blocks.append(
|
||||
{"index": 0, "error": True, "tags": ["error"], "content": unparsed}
|
||||
)
|
||||
|
||||
if self.verbose:
|
||||
print(
|
||||
"[LOG] Extracted",
|
||||
len(blocks),
|
||||
"blocks from URL:",
|
||||
url,
|
||||
"block index:",
|
||||
ix,
|
||||
)
|
||||
return blocks
|
||||
if self.verbose:
|
||||
print(
|
||||
"[LOG] Extracted",
|
||||
len(blocks),
|
||||
"blocks from URL:",
|
||||
url,
|
||||
"block index:",
|
||||
ix,
|
||||
)
|
||||
return blocks
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"[LOG] Error in LLM extraction: {e}")
|
||||
# Add error information to extracted_content
|
||||
return [
|
||||
{
|
||||
"index": ix,
|
||||
"error": True,
|
||||
"tags": ["error"],
|
||||
"content": str(e),
|
||||
}
|
||||
]
|
||||
|
||||
def _merge(self, documents, chunk_token_threshold, overlap):
|
||||
def _merge(self, documents, chunk_token_threshold, overlap) -> List[str]:
|
||||
"""
|
||||
Merge documents into sections based on chunk_token_threshold and overlap.
|
||||
"""
|
||||
# chunks = []
|
||||
sections = []
|
||||
total_tokens = 0
|
||||
|
||||
# Calculate the total tokens across all documents
|
||||
for document in documents:
|
||||
total_tokens += len(document.split(" ")) * self.word_token_rate
|
||||
|
||||
# Calculate the number of sections needed
|
||||
num_sections = math.floor(total_tokens / chunk_token_threshold)
|
||||
if num_sections < 1:
|
||||
num_sections = 1 # Ensure there is at least one section
|
||||
adjusted_chunk_threshold = total_tokens / num_sections
|
||||
|
||||
total_token_so_far = 0
|
||||
current_chunk = []
|
||||
|
||||
for document in documents:
|
||||
tokens = document.split(" ")
|
||||
token_count = len(tokens) * self.word_token_rate
|
||||
|
||||
if total_token_so_far + token_count <= adjusted_chunk_threshold:
|
||||
current_chunk.extend(tokens)
|
||||
total_token_so_far += token_count
|
||||
else:
|
||||
# Ensure to handle the last section properly
|
||||
if len(sections) == num_sections - 1:
|
||||
current_chunk.extend(tokens)
|
||||
continue
|
||||
|
||||
# Add overlap if specified
|
||||
if overlap > 0 and current_chunk:
|
||||
overlap_tokens = current_chunk[-overlap:]
|
||||
current_chunk.extend(overlap_tokens)
|
||||
|
||||
sections.append(" ".join(current_chunk))
|
||||
current_chunk = tokens
|
||||
total_token_so_far = token_count
|
||||
|
||||
# Add the last chunk
|
||||
if current_chunk:
|
||||
sections.append(" ".join(current_chunk))
|
||||
|
||||
sections = merge_chunks(
|
||||
docs = documents,
|
||||
target_size= chunk_token_threshold,
|
||||
overlap=overlap,
|
||||
word_token_ratio=self.word_token_rate
|
||||
)
|
||||
return sections
|
||||
|
||||
def run(self, url: str, sections: List[str]) -> List[Dict[str, Any]]:
|
||||
@@ -719,7 +740,7 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
overlap=int(self.chunk_token_threshold * self.overlap_rate),
|
||||
)
|
||||
extracted_content = []
|
||||
if self.provider.startswith("groq/"):
|
||||
if self.llm_config.provider.startswith("groq/"):
|
||||
# Sequential processing with a delay
|
||||
for ix, section in enumerate(merged_sections):
|
||||
extract_func = partial(self.extract, url)
|
||||
@@ -779,8 +800,6 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
#######################################################
|
||||
# New extraction strategies for JSON-based extraction #
|
||||
#######################################################
|
||||
|
||||
|
||||
class JsonElementExtractionStrategy(ExtractionStrategy):
|
||||
"""
|
||||
Abstract base class for extracting structured JSON from HTML content.
|
||||
@@ -1060,13 +1079,20 @@ class JsonElementExtractionStrategy(ExtractionStrategy):
|
||||
"""Get attribute value from element"""
|
||||
pass
|
||||
|
||||
_GENERATE_SCHEMA_UNWANTED_PROPS = {
|
||||
'provider': 'Instead, use llm_config=LLMConfig(provider="...")',
|
||||
'api_token': 'Instead, use llm_config=LlMConfig(api_token="...")',
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def generate_schema(
|
||||
html: str,
|
||||
schema_type: str = "CSS", # or XPATH
|
||||
query: str = None,
|
||||
provider: str = "gpt-4o",
|
||||
api_token: str = os.getenv("OPENAI_API_KEY"),
|
||||
target_json_example: str = None,
|
||||
llm_config: 'LLMConfig' = create_llm_config(),
|
||||
provider: str = None,
|
||||
api_token: str = None,
|
||||
**kwargs
|
||||
) -> dict:
|
||||
"""
|
||||
@@ -1075,16 +1101,20 @@ class JsonElementExtractionStrategy(ExtractionStrategy):
|
||||
Args:
|
||||
html (str): The HTML content to analyze
|
||||
query (str, optional): Natural language description of what data to extract
|
||||
provider (str): LLM provider to use
|
||||
api_token (str): API token for LLM provider
|
||||
provider (str): Legacy Parameter. LLM provider to use
|
||||
api_token (str): Legacy Parameter. API token for LLM provider
|
||||
llm_config (LLMConfig): LLM configuration object
|
||||
prompt (str, optional): Custom prompt template to use
|
||||
**kwargs: Additional args passed to perform_completion_with_backoff
|
||||
**kwargs: Additional args passed to LLM processor
|
||||
|
||||
Returns:
|
||||
dict: Generated schema following the JsonElementExtractionStrategy format
|
||||
"""
|
||||
from .prompts import JSON_SCHEMA_BUILDER
|
||||
from .utils import perform_completion_with_backoff
|
||||
for name, message in JsonElementExtractionStrategy._GENERATE_SCHEMA_UNWANTED_PROPS.items():
|
||||
if locals()[name] is not None:
|
||||
raise AttributeError(f"Setting '{name}' is deprecated. {message}")
|
||||
|
||||
# Use default or custom prompt
|
||||
prompt_template = JSON_SCHEMA_BUILDER if schema_type == "CSS" else JSON_SCHEMA_BUILDER_XPATH
|
||||
@@ -1092,32 +1122,65 @@ class JsonElementExtractionStrategy(ExtractionStrategy):
|
||||
# Build the prompt
|
||||
system_message = {
|
||||
"role": "system",
|
||||
"content": "You are a specialized HTML schema generator. Analyze the HTML and generate a JSON schema that follows the specified format. Only output valid JSON schema, nothing else."
|
||||
"content": f"""You specialize in generating special JSON schemas for web scraping. This schema uses CSS or XPATH selectors to present a repetitive pattern in crawled HTML, such as a product in a product list or a search result item in a list of search results. We use this JSON schema to pass to a language model along with the HTML content to extract structured data from the HTML. The language model uses the JSON schema to extract data from the HTML and retrieve values for fields in the JSON schema, following the schema.
|
||||
|
||||
Generating this HTML manually is not feasible, so you need to generate the JSON schema using the HTML content. The HTML copied from the crawled website is provided below, which we believe contains the repetitive pattern.
|
||||
|
||||
# Schema main keys:
|
||||
- name: This is the name of the schema.
|
||||
- baseSelector: This is the CSS or XPATH selector that identifies the base element that contains all the repetitive patterns.
|
||||
- baseFields: This is a list of fields that you extract from the base element itself.
|
||||
- fields: This is a list of fields that you extract from the children of the base element. {{name, selector, type}} based on the type, you may have extra keys such as "attribute" when the type is "attribute".
|
||||
|
||||
# Extra Context:
|
||||
In this context, the following items may or may not be present:
|
||||
- Example of target JSON object: This is a sample of the final JSON object that we hope to extract from the HTML using the schema you are generating.
|
||||
- Extra Instructions: This is optional instructions to consider when generating the schema provided by the user.
|
||||
- Query or explanation of target/goal data item: This is a description of what data we are trying to extract from the HTML. This explanation means we're not sure about the rigid schema of the structures we want, so we leave it to you to use your expertise to create the best and most comprehensive structures aimed at maximizing data extraction from this page. You must ensure that you do not pick up nuances that may exist on a particular page. The focus should be on the data we are extracting, and it must be valid, safe, and robust based on the given HTML.
|
||||
|
||||
# What if there is no example of target JSON object and also no extra instructions or even no explanation of target/goal data item?
|
||||
In this scenario, use your best judgment to generate the schema. You need to examine the content of the page and understand the data it provides. If the page contains repetitive data, such as lists of items, products, jobs, places, books, or movies, focus on one single item that repeats. If the page is a detailed page about one product or item, create a schema to extract the entire structured data. At this stage, you must think and decide for yourself. Try to maximize the number of fields that you can extract from the HTML.
|
||||
|
||||
# What are the instructions and details for this schema generation?
|
||||
{prompt_template}"""
|
||||
}
|
||||
|
||||
user_message = {
|
||||
"role": "user",
|
||||
"content": f"""
|
||||
Instructions:
|
||||
{prompt_template}
|
||||
|
||||
HTML to analyze:
|
||||
```html
|
||||
{html}
|
||||
```
|
||||
|
||||
{"Extract the following data: " + query if query else "Please analyze the HTML structure and create the most appropriate schema for data extraction."}
|
||||
"""
|
||||
}
|
||||
|
||||
if query:
|
||||
user_message["content"] += f"\n\n## Query or explanation of target/goal data item:\n{query}"
|
||||
if target_json_example:
|
||||
user_message["content"] += f"\n\n## Example of target JSON object:\n```json\n{target_json_example}\n```"
|
||||
|
||||
if query and not target_json_example:
|
||||
user_message["content"] += """IMPORTANT: To remind you, in this process, we are not providing a rigid example of the adjacent objects we seek. We rely on your understanding of the explanation provided in the above section. Make sure to grasp what we are looking for and, based on that, create the best schema.."""
|
||||
elif not query and target_json_example:
|
||||
user_message["content"] += """IMPORTANT: Please remember that in this process, we provided a proper example of a target JSON object. Make sure to adhere to the structure and create a schema that exactly fits this example. If you find that some elements on the page do not match completely, vote for the majority."""
|
||||
elif not query and not target_json_example:
|
||||
user_message["content"] += """IMPORTANT: Since we neither have a query nor an example, it is crucial to rely solely on the HTML content provided. Leverage your expertise to determine the schema based on the repetitive patterns observed in the content."""
|
||||
|
||||
user_message["content"] += """IMPORTANT: Ensure your schema remains reliable by avoiding selectors that appear to generate dynamically and are not dependable. You want a reliable schema, as it consistently returns the same data even after many page reloads.
|
||||
|
||||
Analyze the HTML and generate a JSON schema that follows the specified format. Only output valid JSON schema, nothing else.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Call LLM with backoff handling
|
||||
response = perform_completion_with_backoff(
|
||||
provider=provider,
|
||||
provider=llm_config.provider,
|
||||
prompt_with_variables="\n\n".join([system_message["content"], user_message["content"]]),
|
||||
json_response = True,
|
||||
api_token=api_token,
|
||||
**kwargs
|
||||
api_token=llm_config.api_token,
|
||||
base_url=llm_config.base_url,
|
||||
extra_args=kwargs
|
||||
)
|
||||
|
||||
# Extract and return schema
|
||||
@@ -1126,7 +1189,6 @@ class JsonElementExtractionStrategy(ExtractionStrategy):
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to generate schema: {str(e)}")
|
||||
|
||||
|
||||
class JsonCssExtractionStrategy(JsonElementExtractionStrategy):
|
||||
"""
|
||||
Concrete implementation of `JsonElementExtractionStrategy` using CSS selectors.
|
||||
@@ -1154,7 +1216,8 @@ class JsonCssExtractionStrategy(JsonElementExtractionStrategy):
|
||||
super().__init__(schema, **kwargs)
|
||||
|
||||
def _parse_html(self, html_content: str):
|
||||
return BeautifulSoup(html_content, "html.parser")
|
||||
# return BeautifulSoup(html_content, "html.parser")
|
||||
return BeautifulSoup(html_content, "lxml")
|
||||
|
||||
def _get_base_elements(self, parsed_html, selector: str):
|
||||
return parsed_html.select(selector)
|
||||
@@ -1173,6 +1236,373 @@ class JsonCssExtractionStrategy(JsonElementExtractionStrategy):
|
||||
def _get_element_attribute(self, element, attribute: str):
|
||||
return element.get(attribute)
|
||||
|
||||
class JsonLxmlExtractionStrategy(JsonElementExtractionStrategy):
|
||||
def __init__(self, schema: Dict[str, Any], **kwargs):
|
||||
kwargs["input_format"] = "html"
|
||||
super().__init__(schema, **kwargs)
|
||||
self._selector_cache = {}
|
||||
self._xpath_cache = {}
|
||||
self._result_cache = {}
|
||||
|
||||
# Control selector optimization strategy
|
||||
self.use_caching = kwargs.get("use_caching", True)
|
||||
self.optimize_common_patterns = kwargs.get("optimize_common_patterns", True)
|
||||
|
||||
# Load lxml dependencies once
|
||||
from lxml import etree, html
|
||||
from lxml.cssselect import CSSSelector
|
||||
self.etree = etree
|
||||
self.html_parser = html
|
||||
self.CSSSelector = CSSSelector
|
||||
|
||||
def _parse_html(self, html_content: str):
|
||||
"""Parse HTML content with error recovery"""
|
||||
try:
|
||||
parser = self.etree.HTMLParser(recover=True, remove_blank_text=True)
|
||||
return self.etree.fromstring(html_content, parser)
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error parsing HTML, falling back to alternative method: {e}")
|
||||
try:
|
||||
return self.html_parser.fromstring(html_content)
|
||||
except Exception as e2:
|
||||
if self.verbose:
|
||||
print(f"Critical error parsing HTML: {e2}")
|
||||
# Create minimal document as fallback
|
||||
return self.etree.Element("html")
|
||||
|
||||
def _optimize_selector(self, selector_str):
|
||||
"""Optimize common selector patterns for better performance"""
|
||||
if not self.optimize_common_patterns:
|
||||
return selector_str
|
||||
|
||||
# Handle td:nth-child(N) pattern which is very common in table scraping
|
||||
import re
|
||||
if re.search(r'td:nth-child\(\d+\)', selector_str):
|
||||
return selector_str # Already handled specially in _apply_selector
|
||||
|
||||
# Split complex selectors into parts for optimization
|
||||
parts = selector_str.split()
|
||||
if len(parts) <= 1:
|
||||
return selector_str
|
||||
|
||||
# For very long selectors, consider using just the last specific part
|
||||
if len(parts) > 3 and any(p.startswith('.') or p.startswith('#') for p in parts):
|
||||
specific_parts = [p for p in parts if p.startswith('.') or p.startswith('#')]
|
||||
if specific_parts:
|
||||
return specific_parts[-1] # Use most specific class/id selector
|
||||
|
||||
return selector_str
|
||||
|
||||
def _create_selector_function(self, selector_str):
|
||||
"""Create a selector function that handles all edge cases"""
|
||||
original_selector = selector_str
|
||||
|
||||
# Try to optimize the selector if appropriate
|
||||
if self.optimize_common_patterns:
|
||||
selector_str = self._optimize_selector(selector_str)
|
||||
|
||||
try:
|
||||
# Attempt to compile the CSS selector
|
||||
compiled = self.CSSSelector(selector_str)
|
||||
xpath = compiled.path
|
||||
|
||||
# Store XPath for later use
|
||||
self._xpath_cache[selector_str] = xpath
|
||||
|
||||
# Create the wrapper function that implements the selection strategy
|
||||
def selector_func(element, context_sensitive=True):
|
||||
cache_key = None
|
||||
|
||||
# Use result caching if enabled
|
||||
if self.use_caching:
|
||||
# Create a cache key based on element and selector
|
||||
element_id = element.get('id', '') or str(hash(element))
|
||||
cache_key = f"{element_id}::{selector_str}"
|
||||
|
||||
if cache_key in self._result_cache:
|
||||
return self._result_cache[cache_key]
|
||||
|
||||
results = []
|
||||
try:
|
||||
# Strategy 1: Direct CSS selector application (fastest)
|
||||
results = compiled(element)
|
||||
|
||||
# If that fails and we need context sensitivity
|
||||
if not results and context_sensitive:
|
||||
# Strategy 2: Try XPath with context adjustment
|
||||
context_xpath = self._make_context_sensitive_xpath(xpath, element)
|
||||
if context_xpath:
|
||||
results = element.xpath(context_xpath)
|
||||
|
||||
# Strategy 3: Handle special case - nth-child
|
||||
if not results and 'nth-child' in original_selector:
|
||||
results = self._handle_nth_child_selector(element, original_selector)
|
||||
|
||||
# Strategy 4: Direct descendant search for class/ID selectors
|
||||
if not results:
|
||||
results = self._fallback_class_id_search(element, original_selector)
|
||||
|
||||
# Strategy 5: Last resort - tag name search for the final part
|
||||
if not results:
|
||||
parts = original_selector.split()
|
||||
if parts:
|
||||
last_part = parts[-1]
|
||||
# Extract tag name from the selector
|
||||
tag_match = re.match(r'^(\w+)', last_part)
|
||||
if tag_match:
|
||||
tag_name = tag_match.group(1)
|
||||
results = element.xpath(f".//{tag_name}")
|
||||
|
||||
# Cache results if caching is enabled
|
||||
if self.use_caching and cache_key:
|
||||
self._result_cache[cache_key] = results
|
||||
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error applying selector '{selector_str}': {e}")
|
||||
|
||||
return results
|
||||
|
||||
return selector_func
|
||||
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error compiling selector '{selector_str}': {e}")
|
||||
|
||||
# Fallback function for invalid selectors
|
||||
return lambda element, context_sensitive=True: []
|
||||
|
||||
def _make_context_sensitive_xpath(self, xpath, element):
|
||||
"""Convert absolute XPath to context-sensitive XPath"""
|
||||
try:
|
||||
# If starts with descendant-or-self, it's already context-sensitive
|
||||
if xpath.startswith('descendant-or-self::'):
|
||||
return xpath
|
||||
|
||||
# Remove leading slash if present
|
||||
if xpath.startswith('/'):
|
||||
context_xpath = f".{xpath}"
|
||||
else:
|
||||
context_xpath = f".//{xpath}"
|
||||
|
||||
# Validate the XPath by trying it
|
||||
try:
|
||||
element.xpath(context_xpath)
|
||||
return context_xpath
|
||||
except:
|
||||
# If that fails, try a simpler descendant search
|
||||
return f".//{xpath.split('/')[-1]}"
|
||||
except:
|
||||
return None
|
||||
|
||||
def _handle_nth_child_selector(self, element, selector_str):
|
||||
"""Special handling for nth-child selectors in tables"""
|
||||
import re
|
||||
results = []
|
||||
|
||||
try:
|
||||
# Extract the column number from td:nth-child(N)
|
||||
match = re.search(r'td:nth-child\((\d+)\)', selector_str)
|
||||
if match:
|
||||
col_num = match.group(1)
|
||||
|
||||
# Check if there's content after the nth-child part
|
||||
remaining_selector = selector_str.split(f"td:nth-child({col_num})", 1)[-1].strip()
|
||||
|
||||
if remaining_selector:
|
||||
# If there's a specific element we're looking for after the column
|
||||
# Extract any tag names from the remaining selector
|
||||
tag_match = re.search(r'(\w+)', remaining_selector)
|
||||
tag_name = tag_match.group(1) if tag_match else '*'
|
||||
results = element.xpath(f".//td[{col_num}]//{tag_name}")
|
||||
else:
|
||||
# Just get the column cell
|
||||
results = element.xpath(f".//td[{col_num}]")
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error handling nth-child selector: {e}")
|
||||
|
||||
return results
|
||||
|
||||
def _fallback_class_id_search(self, element, selector_str):
|
||||
"""Fallback to search by class or ID"""
|
||||
results = []
|
||||
|
||||
try:
|
||||
# Extract class selectors (.classname)
|
||||
import re
|
||||
class_matches = re.findall(r'\.([a-zA-Z0-9_-]+)', selector_str)
|
||||
|
||||
# Extract ID selectors (#idname)
|
||||
id_matches = re.findall(r'#([a-zA-Z0-9_-]+)', selector_str)
|
||||
|
||||
# Try each class
|
||||
for class_name in class_matches:
|
||||
class_results = element.xpath(f".//*[contains(@class, '{class_name}')]")
|
||||
results.extend(class_results)
|
||||
|
||||
# Try each ID (usually more specific)
|
||||
for id_name in id_matches:
|
||||
id_results = element.xpath(f".//*[@id='{id_name}']")
|
||||
results.extend(id_results)
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error in fallback class/id search: {e}")
|
||||
|
||||
return results
|
||||
|
||||
def _get_selector(self, selector_str):
|
||||
"""Get or create a selector function with caching"""
|
||||
if selector_str not in self._selector_cache:
|
||||
self._selector_cache[selector_str] = self._create_selector_function(selector_str)
|
||||
return self._selector_cache[selector_str]
|
||||
|
||||
def _get_base_elements(self, parsed_html, selector: str):
|
||||
"""Get all base elements using the selector"""
|
||||
selector_func = self._get_selector(selector)
|
||||
# For base elements, we don't need context sensitivity
|
||||
return selector_func(parsed_html, context_sensitive=False)
|
||||
|
||||
def _get_elements(self, element, selector: str):
|
||||
"""Get child elements using the selector with context sensitivity"""
|
||||
selector_func = self._get_selector(selector)
|
||||
return selector_func(element, context_sensitive=True)
|
||||
|
||||
def _get_element_text(self, element) -> str:
|
||||
"""Extract normalized text from element"""
|
||||
try:
|
||||
# Get all text nodes and normalize
|
||||
text = " ".join(t.strip() for t in element.xpath(".//text()") if t.strip())
|
||||
return text
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error extracting text: {e}")
|
||||
# Fallback
|
||||
try:
|
||||
return element.text_content().strip()
|
||||
except:
|
||||
return ""
|
||||
|
||||
def _get_element_html(self, element) -> str:
|
||||
"""Get HTML string representation of element"""
|
||||
try:
|
||||
return self.etree.tostring(element, encoding='unicode', method='html')
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error serializing HTML: {e}")
|
||||
return ""
|
||||
|
||||
def _get_element_attribute(self, element, attribute: str):
|
||||
"""Get attribute value safely"""
|
||||
try:
|
||||
return element.get(attribute)
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error getting attribute '{attribute}': {e}")
|
||||
return None
|
||||
|
||||
def _clear_caches(self):
|
||||
"""Clear caches to free memory"""
|
||||
if self.use_caching:
|
||||
self._result_cache.clear()
|
||||
|
||||
class JsonLxmlExtractionStrategy_naive(JsonElementExtractionStrategy):
|
||||
def __init__(self, schema: Dict[str, Any], **kwargs):
|
||||
kwargs["input_format"] = "html" # Force HTML input
|
||||
super().__init__(schema, **kwargs)
|
||||
self._selector_cache = {}
|
||||
|
||||
def _parse_html(self, html_content: str):
|
||||
from lxml import etree
|
||||
parser = etree.HTMLParser(recover=True)
|
||||
return etree.fromstring(html_content, parser)
|
||||
|
||||
def _get_selector(self, selector_str):
|
||||
"""Get a selector function that works within the context of an element"""
|
||||
if selector_str not in self._selector_cache:
|
||||
from lxml.cssselect import CSSSelector
|
||||
try:
|
||||
# Store both the compiled selector and its xpath translation
|
||||
compiled = CSSSelector(selector_str)
|
||||
|
||||
# Create a function that will apply this selector appropriately
|
||||
def select_func(element):
|
||||
try:
|
||||
# First attempt: direct CSS selector application
|
||||
results = compiled(element)
|
||||
if results:
|
||||
return results
|
||||
|
||||
# Second attempt: contextual XPath selection
|
||||
# Convert the root-based XPath to a context-based XPath
|
||||
xpath = compiled.path
|
||||
|
||||
# If the XPath already starts with descendant-or-self, handle it specially
|
||||
if xpath.startswith('descendant-or-self::'):
|
||||
context_xpath = xpath
|
||||
else:
|
||||
# For normal XPath expressions, make them relative to current context
|
||||
context_xpath = f"./{xpath.lstrip('/')}"
|
||||
|
||||
results = element.xpath(context_xpath)
|
||||
if results:
|
||||
return results
|
||||
|
||||
# Final fallback: simple descendant search for common patterns
|
||||
if 'nth-child' in selector_str:
|
||||
# Handle td:nth-child(N) pattern
|
||||
import re
|
||||
match = re.search(r'td:nth-child\((\d+)\)', selector_str)
|
||||
if match:
|
||||
col_num = match.group(1)
|
||||
sub_selector = selector_str.split(')', 1)[-1].strip()
|
||||
if sub_selector:
|
||||
return element.xpath(f".//td[{col_num}]//{sub_selector}")
|
||||
else:
|
||||
return element.xpath(f".//td[{col_num}]")
|
||||
|
||||
# Last resort: try each part of the selector separately
|
||||
parts = selector_str.split()
|
||||
if len(parts) > 1 and parts[-1]:
|
||||
return element.xpath(f".//{parts[-1]}")
|
||||
|
||||
return []
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error applying selector '{selector_str}': {e}")
|
||||
return []
|
||||
|
||||
self._selector_cache[selector_str] = select_func
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"Error compiling selector '{selector_str}': {e}")
|
||||
|
||||
# Fallback function for invalid selectors
|
||||
def fallback_func(element):
|
||||
return []
|
||||
|
||||
self._selector_cache[selector_str] = fallback_func
|
||||
|
||||
return self._selector_cache[selector_str]
|
||||
|
||||
def _get_base_elements(self, parsed_html, selector: str):
|
||||
selector_func = self._get_selector(selector)
|
||||
return selector_func(parsed_html)
|
||||
|
||||
def _get_elements(self, element, selector: str):
|
||||
selector_func = self._get_selector(selector)
|
||||
return selector_func(element)
|
||||
|
||||
def _get_element_text(self, element) -> str:
|
||||
return "".join(element.xpath(".//text()")).strip()
|
||||
|
||||
def _get_element_html(self, element) -> str:
|
||||
from lxml import etree
|
||||
return etree.tostring(element, encoding='unicode')
|
||||
|
||||
def _get_element_attribute(self, element, attribute: str):
|
||||
return element.get(attribute)
|
||||
|
||||
class JsonXPathExtractionStrategy(JsonElementExtractionStrategy):
|
||||
"""
|
||||
|
||||
@@ -510,6 +510,7 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
|
||||
if tag == "a" and not self.ignore_links:
|
||||
if start:
|
||||
self.inside_link = True
|
||||
if (
|
||||
"href" in attrs
|
||||
and attrs["href"] is not None
|
||||
@@ -526,6 +527,7 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
else:
|
||||
self.astack.append(None)
|
||||
else:
|
||||
self.inside_link = False
|
||||
if self.astack:
|
||||
a = self.astack.pop()
|
||||
if self.maybe_automatic_link and not self.empty_link:
|
||||
@@ -610,13 +612,22 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
self.o("[" + str(a_props.count) + "]")
|
||||
|
||||
if tag == "dl" and start:
|
||||
self.p()
|
||||
if tag == "dt" and not start:
|
||||
self.pbr()
|
||||
if tag == "dd" and start:
|
||||
self.o(" ")
|
||||
if tag == "dd" and not start:
|
||||
self.pbr()
|
||||
self.p() # Add paragraph break before list starts
|
||||
self.p_p = 0 # Reset paragraph state
|
||||
|
||||
elif tag == "dt" and start:
|
||||
if self.p_p == 0: # If not first term
|
||||
self.o("\n\n") # Add spacing before new term-definition pair
|
||||
self.p_p = 0 # Reset paragraph state
|
||||
|
||||
elif tag == "dt" and not start:
|
||||
self.o("\n") # Single newline between term and definition
|
||||
|
||||
elif tag == "dd" and start:
|
||||
self.o(" ") # Indent definition
|
||||
|
||||
elif tag == "dd" and not start:
|
||||
self.p_p = 0
|
||||
|
||||
if tag in ["ol", "ul"]:
|
||||
# Google Docs create sub lists as top level lists
|
||||
@@ -1026,6 +1037,7 @@ class CustomHTML2Text(HTML2Text):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.inside_pre = False
|
||||
self.inside_code = False
|
||||
self.inside_link = False
|
||||
self.preserve_tags = set() # Set of tags to preserve
|
||||
self.current_preserved_tag = None
|
||||
self.preserved_content = []
|
||||
@@ -1105,11 +1117,17 @@ class CustomHTML2Text(HTML2Text):
|
||||
# Ignore code tags inside pre blocks if handle_code_in_pre is False
|
||||
return
|
||||
if start:
|
||||
self.o("`") # Markdown inline code start
|
||||
if not self.inside_link:
|
||||
self.o("`") # Only output backtick if not inside a link
|
||||
self.inside_code = True
|
||||
else:
|
||||
self.o("`") # Markdown inline code end
|
||||
if not self.inside_link:
|
||||
self.o("`") # Only output backtick if not inside a link
|
||||
self.inside_code = False
|
||||
|
||||
# If inside a link, let the parent class handle the content
|
||||
if self.inside_link:
|
||||
super().handle_tag(tag, attrs, start)
|
||||
else:
|
||||
super().handle_tag(tag, attrs, start)
|
||||
|
||||
|
||||
69
crawl4ai/hub.py
Normal file
69
crawl4ai/hub.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# crawl4ai/hub.py
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Type, Union
|
||||
import logging
|
||||
import importlib
|
||||
from pathlib import Path
|
||||
import inspect
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseCrawler(ABC):
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
|
||||
@abstractmethod
|
||||
async def run(self, url: str = "", **kwargs) -> str:
|
||||
"""
|
||||
Implement this method to return JSON string.
|
||||
Must accept URL + arbitrary kwargs for flexibility.
|
||||
"""
|
||||
pass
|
||||
|
||||
def __init_subclass__(cls, **kwargs):
|
||||
"""Enforce interface validation on subclassing"""
|
||||
super().__init_subclass__(**kwargs)
|
||||
|
||||
# Verify run method signature
|
||||
run_method = cls.run
|
||||
if not run_method.__code__.co_argcount >= 2: # self + url
|
||||
raise TypeError(f"{cls.__name__} must implement 'run(self, url: str, **kwargs)'")
|
||||
|
||||
# Verify async nature
|
||||
if not inspect.iscoroutinefunction(run_method):
|
||||
raise TypeError(f"{cls.__name__}.run must be async")
|
||||
|
||||
class CrawlerHub:
|
||||
_crawlers: Dict[str, Type[BaseCrawler]] = {}
|
||||
|
||||
@classmethod
|
||||
def _discover_crawlers(cls):
|
||||
"""Dynamically load crawlers from /crawlers in 3 lines"""
|
||||
base_path = Path(__file__).parent / "crawlers"
|
||||
for crawler_dir in base_path.iterdir():
|
||||
if crawler_dir.is_dir():
|
||||
try:
|
||||
module = importlib.import_module(
|
||||
f"crawl4ai.crawlers.{crawler_dir.name}.crawler"
|
||||
)
|
||||
for attr in dir(module):
|
||||
cls._maybe_register_crawler(
|
||||
getattr(module, attr), crawler_dir.name
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed {crawler_dir.name}: {str(e)}")
|
||||
|
||||
@classmethod
|
||||
def _maybe_register_crawler(cls, obj, name: str):
|
||||
"""Brilliant one-liner registration"""
|
||||
if isinstance(obj, type) and issubclass(obj, BaseCrawler) and obj != BaseCrawler:
|
||||
module = importlib.import_module(obj.__module__)
|
||||
obj.meta = getattr(module, "__meta__", {})
|
||||
cls._crawlers[name] = obj
|
||||
|
||||
@classmethod
|
||||
def get(cls, name: str) -> Union[Type[BaseCrawler], None]:
|
||||
if not cls._crawlers:
|
||||
cls._discover_crawlers()
|
||||
return cls._crawlers.get(name)
|
||||
@@ -2,17 +2,93 @@ import subprocess
|
||||
import sys
|
||||
import asyncio
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from pathlib import Path
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Initialize logger
|
||||
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||
|
||||
def setup_home_directory():
|
||||
"""Set up the .crawl4ai folder structure in the user's home directory."""
|
||||
base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY")
|
||||
crawl4ai_folder = Path(base_dir) if base_dir else Path.home()
|
||||
crawl4ai_config = crawl4ai_folder / "global.yml"
|
||||
crawl4ai_folder = crawl4ai_folder / ".crawl4ai"
|
||||
cache_folder = crawl4ai_folder / "cache"
|
||||
content_folders = [
|
||||
"html_content",
|
||||
"cleaned_html",
|
||||
"markdown_content",
|
||||
"extracted_content",
|
||||
"screenshots",
|
||||
]
|
||||
|
||||
# Clean up old cache if exists
|
||||
if cache_folder.exists():
|
||||
shutil.rmtree(cache_folder)
|
||||
|
||||
# Create new folder structure
|
||||
crawl4ai_folder.mkdir(exist_ok=True)
|
||||
cache_folder.mkdir(exist_ok=True)
|
||||
for folder in content_folders:
|
||||
(crawl4ai_folder / folder).mkdir(exist_ok=True)
|
||||
|
||||
# If config file does not exist, create it
|
||||
if not crawl4ai_config.exists():
|
||||
with open(crawl4ai_config, "w") as f:
|
||||
f.write("")
|
||||
|
||||
def post_install():
|
||||
"""Run all post-installation tasks"""
|
||||
"""
|
||||
Run all post-installation tasks.
|
||||
Checks CRAWL4AI_MODE environment variable. If set to 'api',
|
||||
skips Playwright browser installation.
|
||||
"""
|
||||
logger.info("Running post-installation setup...", tag="INIT")
|
||||
install_playwright()
|
||||
setup_home_directory()
|
||||
|
||||
# Check environment variable to conditionally skip Playwright install
|
||||
run_mode = os.getenv('CRAWL4AI_MODE')
|
||||
if run_mode == 'api':
|
||||
logger.warning(
|
||||
"CRAWL4AI_MODE=api detected. Skipping Playwright browser installation.",
|
||||
tag="SETUP"
|
||||
)
|
||||
else:
|
||||
# Proceed with installation only if mode is not 'api'
|
||||
install_playwright()
|
||||
|
||||
run_migration()
|
||||
# TODO: Will be added in the future
|
||||
# setup_builtin_browser()
|
||||
logger.success("Post-installation setup completed!", tag="COMPLETE")
|
||||
|
||||
def setup_builtin_browser():
|
||||
"""Set up a builtin browser for use with Crawl4AI"""
|
||||
try:
|
||||
logger.info("Setting up builtin browser...", tag="INIT")
|
||||
asyncio.run(_setup_builtin_browser())
|
||||
logger.success("Builtin browser setup completed!", tag="COMPLETE")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to set up builtin browser: {e}")
|
||||
logger.warning("You can manually set up a builtin browser using 'crawl4ai-doctor builtin-browser-start'")
|
||||
|
||||
async def _setup_builtin_browser():
|
||||
try:
|
||||
# Import BrowserProfiler here to avoid circular imports
|
||||
from .browser_profiler import BrowserProfiler
|
||||
profiler = BrowserProfiler(logger=logger)
|
||||
|
||||
# Launch the builtin browser
|
||||
cdp_url = await profiler.launch_builtin_browser(headless=True)
|
||||
if cdp_url:
|
||||
logger.success(f"Builtin browser launched at {cdp_url}", tag="BROWSER")
|
||||
else:
|
||||
logger.warning("Failed to launch builtin browser", tag="BROWSER")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error setting up builtin browser: {e}", tag="BROWSER")
|
||||
raise
|
||||
|
||||
|
||||
def install_playwright():
|
||||
@@ -106,4 +182,5 @@ def doctor():
|
||||
"""Entry point for the doctor command"""
|
||||
import asyncio
|
||||
|
||||
return asyncio.run(run_doctor())
|
||||
asyncio.run(run_doctor())
|
||||
sys.exit(0)
|
||||
|
||||
@@ -115,5 +115,6 @@ async () => {
|
||||
document.body.style.overflow = "auto";
|
||||
|
||||
// Wait a bit for any animations to complete
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
document.body.scrollIntoView(false);
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
};
|
||||
|
||||
0
crawl4ai/legacy/__init__.py
Normal file
0
crawl4ai/legacy/__init__.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import click
|
||||
import sys
|
||||
import asyncio
|
||||
from typing import List
|
||||
from .docs_manager import DocsManager
|
||||
from .async_logger import AsyncLogger
|
||||
|
||||
logger = AsyncLogger(verbose=True)
|
||||
docs_manager = DocsManager(logger)
|
||||
|
||||
|
||||
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
|
||||
"""Print formatted table with headers and rows"""
|
||||
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
|
||||
border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+"
|
||||
|
||||
def format_row(row):
|
||||
return (
|
||||
"|"
|
||||
+ "|".join(
|
||||
f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
|
||||
for cell, w in zip(row, widths)
|
||||
)
|
||||
+ "|"
|
||||
)
|
||||
|
||||
click.echo(border)
|
||||
click.echo(format_row(headers))
|
||||
click.echo(border)
|
||||
for row in rows:
|
||||
click.echo(format_row(row))
|
||||
click.echo(border)
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
"""Crawl4AI Command Line Interface"""
|
||||
pass
|
||||
|
||||
|
||||
@cli.group()
|
||||
def docs():
|
||||
"""Documentation operations"""
|
||||
pass
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("sections", nargs=-1)
|
||||
@click.option(
|
||||
"--mode", type=click.Choice(["extended", "condensed"]), default="extended"
|
||||
)
|
||||
def combine(sections: tuple, mode: str):
|
||||
"""Combine documentation sections"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
click.echo(docs_manager.generate(sections, mode))
|
||||
except Exception as e:
|
||||
logger.error(str(e), tag="ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("query")
|
||||
@click.option("--top-k", "-k", default=5)
|
||||
@click.option("--build-index", is_flag=True, help="Build index if missing")
|
||||
def search(query: str, top_k: int, build_index: bool):
|
||||
"""Search documentation"""
|
||||
try:
|
||||
result = docs_manager.search(query, top_k)
|
||||
if result == "No search index available. Call build_search_index() first.":
|
||||
if build_index or click.confirm("No search index found. Build it now?"):
|
||||
asyncio.run(docs_manager.llm_text.generate_index_files())
|
||||
result = docs_manager.search(query, top_k)
|
||||
click.echo(result)
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
def update():
|
||||
"""Update docs from GitHub"""
|
||||
try:
|
||||
asyncio.run(docs_manager.fetch_docs())
|
||||
click.echo("Documentation updated successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.option("--force-facts", is_flag=True, help="Force regenerate fact files")
|
||||
@click.option("--clear-cache", is_flag=True, help="Clear BM25 cache")
|
||||
def index(force_facts: bool, clear_cache: bool):
|
||||
"""Build or rebuild search indexes"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
asyncio.run(
|
||||
docs_manager.llm_text.generate_index_files(
|
||||
force_generate_facts=force_facts, clear_bm25_cache=clear_cache
|
||||
)
|
||||
)
|
||||
click.echo("Search indexes built successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Add docs list command
|
||||
@docs.command()
|
||||
def list():
|
||||
"""List available documentation sections"""
|
||||
try:
|
||||
sections = docs_manager.list()
|
||||
print_table(["Sections"], [[section] for section in sections])
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -2,6 +2,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import Optional, Dict, Any, Tuple
|
||||
from .models import MarkdownGenerationResult
|
||||
from .html2text import CustomHTML2Text
|
||||
# from .types import RelevantContentFilter
|
||||
from .content_filter_strategy import RelevantContentFilter
|
||||
import re
|
||||
from urllib.parse import urljoin
|
||||
@@ -29,21 +30,25 @@ class MarkdownGenerationStrategy(ABC):
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
verbose: bool = False,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
self.content_filter = content_filter
|
||||
self.options = options or {}
|
||||
self.verbose = verbose
|
||||
self.content_source = content_source
|
||||
|
||||
@abstractmethod
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""Generate markdown from cleaned HTML."""
|
||||
"""Generate markdown from the selected input HTML."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -60,6 +65,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
Args:
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
|
||||
content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html".
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
@@ -69,8 +75,9 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
super().__init__(content_filter, options)
|
||||
super().__init__(content_filter, options, verbose=False, content_source=content_source)
|
||||
|
||||
def convert_links_to_citations(
|
||||
self, markdown: str, base_url: str = ""
|
||||
@@ -140,7 +147,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
@@ -149,16 +156,16 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""
|
||||
Generate markdown with citations from cleaned HTML.
|
||||
Generate markdown with citations from the provided input HTML.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
1. Generate raw markdown from the input HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
Args:
|
||||
cleaned_html (str): Cleaned HTML content.
|
||||
input_html (str): The HTML content to process (selected based on content_source).
|
||||
base_url (str): Base URL for URL joins.
|
||||
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
|
||||
@@ -176,7 +183,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
"ignore_emphasis": False,
|
||||
"ignore_links": False,
|
||||
"ignore_images": False,
|
||||
"protect_links": True,
|
||||
"protect_links": False,
|
||||
"single_line_break": True,
|
||||
"mark_code": True,
|
||||
"escape_snob": False,
|
||||
@@ -193,14 +200,14 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
h.update_params(**default_options)
|
||||
|
||||
# Ensure we have valid input
|
||||
if not cleaned_html:
|
||||
cleaned_html = ""
|
||||
elif not isinstance(cleaned_html, str):
|
||||
cleaned_html = str(cleaned_html)
|
||||
if not input_html:
|
||||
input_html = ""
|
||||
elif not isinstance(input_html, str):
|
||||
input_html = str(input_html)
|
||||
|
||||
# Generate raw markdown
|
||||
try:
|
||||
raw_markdown = h.handle(cleaned_html)
|
||||
raw_markdown = h.handle(input_html)
|
||||
except Exception as e:
|
||||
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
|
||||
|
||||
@@ -225,7 +232,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
if content_filter or self.content_filter:
|
||||
try:
|
||||
content_filter = content_filter or self.content_filter
|
||||
filtered_html = content_filter.filter_content(cleaned_html)
|
||||
filtered_html = content_filter.filter_content(input_html)
|
||||
filtered_html = "\n".join(
|
||||
"<div>{}</div>".format(s) for s in filtered_html
|
||||
)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
from pydantic import BaseModel, HttpUrl, PrivateAttr
|
||||
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
||||
from typing import AsyncGenerator
|
||||
from typing import Generic, TypeVar
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from .ssl_certificate import SSLCertificate
|
||||
@@ -24,10 +26,15 @@ class CrawlerTaskResult:
|
||||
result: "CrawlResult"
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: datetime
|
||||
end_time: datetime
|
||||
start_time: Union[datetime, float]
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
|
||||
retry_count: int = 0
|
||||
wait_time: float = 0.0
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.result.success
|
||||
|
||||
class CrawlStatus(Enum):
|
||||
QUEUED = "QUEUED"
|
||||
@@ -35,27 +42,39 @@ class CrawlStatus(Enum):
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrawlStats:
|
||||
task_id: str
|
||||
url: str
|
||||
status: CrawlStatus
|
||||
start_time: Optional[datetime] = None
|
||||
end_time: Optional[datetime] = None
|
||||
start_time: Optional[Union[datetime, float]] = None
|
||||
end_time: Optional[Union[datetime, float]] = None
|
||||
memory_usage: float = 0.0
|
||||
peak_memory: float = 0.0
|
||||
error_message: str = ""
|
||||
wait_time: float = 0.0
|
||||
retry_count: int = 0
|
||||
counted_requeue: bool = False
|
||||
|
||||
@property
|
||||
def duration(self) -> str:
|
||||
if not self.start_time:
|
||||
return "0:00"
|
||||
|
||||
# Convert start_time to datetime if it's a float
|
||||
start = self.start_time
|
||||
if isinstance(start, float):
|
||||
start = datetime.fromtimestamp(start)
|
||||
|
||||
# Get end time or use current time
|
||||
end = self.end_time or datetime.now()
|
||||
duration = end - self.start_time
|
||||
# Convert end_time to datetime if it's a float
|
||||
if isinstance(end, float):
|
||||
end = datetime.fromtimestamp(end)
|
||||
|
||||
duration = end - start
|
||||
return str(timedelta(seconds=int(duration.total_seconds())))
|
||||
|
||||
|
||||
class DisplayMode(Enum):
|
||||
DETAILED = "DETAILED"
|
||||
AGGREGATED = "AGGREGATED"
|
||||
@@ -72,12 +91,31 @@ class TokenUsage:
|
||||
completion_tokens_details: Optional[dict] = None
|
||||
prompt_tokens_details: Optional[dict] = None
|
||||
|
||||
|
||||
class UrlModel(BaseModel):
|
||||
url: HttpUrl
|
||||
forced: bool = False
|
||||
|
||||
|
||||
|
||||
@dataclass
|
||||
class TraversalStats:
|
||||
"""Statistics for the traversal process"""
|
||||
|
||||
start_time: datetime = datetime.now()
|
||||
urls_processed: int = 0
|
||||
urls_failed: int = 0
|
||||
urls_skipped: int = 0
|
||||
total_depth_reached: int = 0
|
||||
current_depth: int = 0
|
||||
|
||||
class DispatchResult(BaseModel):
|
||||
task_id: str
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: Union[datetime, float]
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
@@ -85,16 +123,9 @@ class MarkdownGenerationResult(BaseModel):
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
|
||||
|
||||
class DispatchResult(BaseModel):
|
||||
task_id: str
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: datetime
|
||||
end_time: datetime
|
||||
error_message: str = ""
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return self.raw_markdown
|
||||
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
@@ -103,12 +134,11 @@ class CrawlResult(BaseModel):
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
markdown: Optional[Union[str, MarkdownGenerationResult]] = None
|
||||
markdown_v2: Optional[MarkdownGenerationResult] = None
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
mhtml: Optional[str] = None
|
||||
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
error_message: Optional[str] = None
|
||||
@@ -118,31 +148,187 @@ class CrawlResult(BaseModel):
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# NOTE: The StringCompatibleMarkdown class, custom __init__ method, property getters/setters,
|
||||
# and model_dump override all exist to support a smooth transition from markdown as a string
|
||||
# to markdown as a MarkdownGenerationResult object, while maintaining backward compatibility.
|
||||
#
|
||||
# This allows code that expects markdown to be a string to continue working, while also
|
||||
# providing access to the full MarkdownGenerationResult object's properties.
|
||||
#
|
||||
# The markdown_v2 property is deprecated and raises an error directing users to use markdown.
|
||||
#
|
||||
# When backward compatibility is no longer needed in future versions, this entire mechanism
|
||||
# can be simplified to a standard field with no custom accessors or serialization logic.
|
||||
|
||||
def __init__(self, **data):
|
||||
markdown_result = data.pop('markdown', None)
|
||||
super().__init__(**data)
|
||||
if markdown_result is not None:
|
||||
self._markdown = (
|
||||
MarkdownGenerationResult(**markdown_result)
|
||||
if isinstance(markdown_result, dict)
|
||||
else markdown_result
|
||||
)
|
||||
|
||||
@property
|
||||
def markdown(self):
|
||||
"""
|
||||
Property that returns a StringCompatibleMarkdown object that behaves like
|
||||
a string but also provides access to MarkdownGenerationResult attributes.
|
||||
|
||||
This approach allows backward compatibility with code that expects 'markdown'
|
||||
to be a string, while providing access to the full MarkdownGenerationResult.
|
||||
"""
|
||||
if self._markdown is None:
|
||||
return None
|
||||
return StringCompatibleMarkdown(self._markdown)
|
||||
|
||||
@markdown.setter
|
||||
def markdown(self, value):
|
||||
"""
|
||||
Setter for the markdown property.
|
||||
"""
|
||||
self._markdown = value
|
||||
|
||||
@property
|
||||
def markdown_v2(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
|
||||
This property exists to inform users that 'markdown_v2' has been
|
||||
deprecated and they should use 'markdown' instead.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'markdown_v2' attribute is deprecated and has been removed. "
|
||||
"""Please use 'markdown' instead, which now returns a MarkdownGenerationResult, with
|
||||
following properties:
|
||||
- raw_markdown: The raw markdown string
|
||||
- markdown_with_citations: The markdown string with citations
|
||||
- references_markdown: The markdown string with references
|
||||
- fit_markdown: The markdown string with fit text
|
||||
"""
|
||||
)
|
||||
|
||||
@property
|
||||
def fit_markdown(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'fit_markdown' attribute is deprecated and has been removed. "
|
||||
"Please use 'markdown.fit_markdown' instead."
|
||||
)
|
||||
|
||||
@property
|
||||
def fit_html(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'fit_html' attribute is deprecated and has been removed. "
|
||||
"Please use 'markdown.fit_html' instead."
|
||||
)
|
||||
|
||||
def model_dump(self, *args, **kwargs):
|
||||
"""
|
||||
Override model_dump to include the _markdown private attribute in serialization.
|
||||
|
||||
This override is necessary because:
|
||||
1. PrivateAttr fields are excluded from serialization by default
|
||||
2. We need to maintain backward compatibility by including the 'markdown' field
|
||||
in the serialized output
|
||||
3. We're transitioning from 'markdown_v2' to enhancing 'markdown' to hold
|
||||
the same type of data
|
||||
|
||||
Future developers: This method ensures that the markdown content is properly
|
||||
serialized despite being stored in a private attribute. If the serialization
|
||||
requirements change, this is where you would update the logic.
|
||||
"""
|
||||
result = super().model_dump(*args, **kwargs)
|
||||
if self._markdown is not None:
|
||||
result["markdown"] = self._markdown.model_dump()
|
||||
return result
|
||||
|
||||
class StringCompatibleMarkdown(str):
|
||||
"""A string subclass that also provides access to MarkdownGenerationResult attributes"""
|
||||
def __new__(cls, markdown_result):
|
||||
return super().__new__(cls, markdown_result.raw_markdown)
|
||||
|
||||
def __init__(self, markdown_result):
|
||||
self._markdown_result = markdown_result
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._markdown_result, name)
|
||||
|
||||
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
|
||||
|
||||
class CrawlResultContainer(Generic[CrawlResultT]):
|
||||
def __init__(self, results: Union[CrawlResultT, List[CrawlResultT]]):
|
||||
# Normalize to a list
|
||||
if isinstance(results, list):
|
||||
self._results = results
|
||||
else:
|
||||
self._results = [results]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._results)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._results[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._results)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Delegate attribute access to the first element.
|
||||
if self._results:
|
||||
return getattr(self._results[0], attr)
|
||||
raise AttributeError(f"{self.__class__.__name__} object has no attribute '{attr}'")
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self._results!r})"
|
||||
|
||||
RunManyReturn = Union[
|
||||
CrawlResultContainer[CrawlResultT],
|
||||
AsyncGenerator[CrawlResultT, None]
|
||||
]
|
||||
|
||||
|
||||
# END of backward compatibility code for markdown/markdown_v2.
|
||||
# When removing this code in the future, make sure to:
|
||||
# 1. Replace the private attribute and property with a standard field
|
||||
# 2. Update any serialization logic that might depend on the current behavior
|
||||
|
||||
class AsyncCrawlResponse(BaseModel):
|
||||
html: str
|
||||
response_headers: Dict[str, str]
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
status_code: int
|
||||
screenshot: Optional[str] = None
|
||||
pdf_data: Optional[bytes] = None
|
||||
mhtml_data: Optional[str] = None
|
||||
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
|
||||
###############################
|
||||
# Scraping Models
|
||||
###############################
|
||||
class MediaItem(BaseModel):
|
||||
src: Optional[str] = ""
|
||||
data: Optional[str] = ""
|
||||
alt: Optional[str] = ""
|
||||
desc: Optional[str] = ""
|
||||
score: Optional[int] = 0
|
||||
@@ -167,6 +353,7 @@ class Media(BaseModel):
|
||||
audios: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
|
||||
tables: List[Dict] = [] # Table data extracted from HTML tables
|
||||
|
||||
|
||||
class Links(BaseModel):
|
||||
|
||||
165
crawl4ai/processors/pdf/__init__.py
Normal file
165
crawl4ai/processors/pdf/__init__.py
Normal file
@@ -0,0 +1,165 @@
|
||||
from pathlib import Path
|
||||
import asyncio
|
||||
from dataclasses import asdict
|
||||
from crawl4ai.async_logger import AsyncLogger
|
||||
from crawl4ai.async_crawler_strategy import AsyncCrawlerStrategy
|
||||
from crawl4ai.models import AsyncCrawlResponse, ScrapingResult
|
||||
from crawl4ai.content_scraping_strategy import ContentScrapingStrategy
|
||||
from .processor import NaivePDFProcessorStrategy # Assuming your current PDF code is in pdf_processor.py
|
||||
|
||||
class PDFCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
def __init__(self, logger: AsyncLogger = None):
|
||||
self.logger = logger
|
||||
|
||||
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
|
||||
# Just pass through with empty HTML - scraper will handle actual processing
|
||||
return AsyncCrawlResponse(
|
||||
html="", # Scraper will handle the real work
|
||||
response_headers={"Content-Type": "application/pdf"},
|
||||
status_code=200
|
||||
)
|
||||
|
||||
async def close(self):
|
||||
pass
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
class PDFContentScrapingStrategy(ContentScrapingStrategy):
|
||||
"""
|
||||
A content scraping strategy for PDF files.
|
||||
|
||||
Attributes:
|
||||
save_images_locally (bool): Whether to save images locally.
|
||||
extract_images (bool): Whether to extract images from PDF.
|
||||
image_save_dir (str): Directory to save extracted images.
|
||||
logger (AsyncLogger): Logger instance for recording events and errors.
|
||||
|
||||
Methods:
|
||||
scrap(url: str, html: str, **params) -> ScrapingResult:
|
||||
Scrap content from a PDF file.
|
||||
ascrap(url: str, html: str, **kwargs) -> ScrapingResult:
|
||||
Asynchronous version of scrap.
|
||||
|
||||
Usage:
|
||||
strategy = PDFContentScrapingStrategy(
|
||||
save_images_locally=False,
|
||||
extract_images=False,
|
||||
image_save_dir=None,
|
||||
logger=logger
|
||||
)
|
||||
|
||||
"""
|
||||
def __init__(self,
|
||||
save_images_locally : bool = False,
|
||||
extract_images : bool = False,
|
||||
image_save_dir : str = None,
|
||||
batch_size: int = 4,
|
||||
logger: AsyncLogger = None):
|
||||
self.logger = logger
|
||||
self.pdf_processor = NaivePDFProcessorStrategy(
|
||||
save_images_locally=save_images_locally,
|
||||
extract_images=extract_images,
|
||||
image_save_dir=image_save_dir,
|
||||
batch_size=batch_size
|
||||
)
|
||||
|
||||
def scrap(self, url: str, html: str, **params) -> ScrapingResult:
|
||||
"""
|
||||
Scrap content from a PDF file.
|
||||
|
||||
Args:
|
||||
url (str): The URL of the PDF file.
|
||||
html (str): The HTML content of the page.
|
||||
**params: Additional parameters.
|
||||
|
||||
Returns:
|
||||
ScrapingResult: The scraped content.
|
||||
"""
|
||||
# Download if URL or use local path
|
||||
pdf_path = self._get_pdf_path(url)
|
||||
try:
|
||||
# Process PDF
|
||||
# result = self.pdf_processor.process(Path(pdf_path))
|
||||
result = self.pdf_processor.process_batch(Path(pdf_path))
|
||||
|
||||
# Combine page HTML
|
||||
cleaned_html = f"""
|
||||
<html>
|
||||
<head><meta name="pdf-pages" content="{len(result.pages)}"></head>
|
||||
<body>
|
||||
{''.join(f'<div class="pdf-page" data-page="{i+1}">{page.html}</div>'
|
||||
for i, page in enumerate(result.pages))}
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Accumulate media and links with page numbers
|
||||
media = {"images": []}
|
||||
links = {"urls": []}
|
||||
|
||||
for page in result.pages:
|
||||
# Add page number to each image
|
||||
for img in page.images:
|
||||
img["page"] = page.page_number
|
||||
media["images"].append(img)
|
||||
|
||||
# Add page number to each link
|
||||
for link in page.links:
|
||||
links["urls"].append({
|
||||
"url": link,
|
||||
"page": page.page_number
|
||||
})
|
||||
|
||||
return ScrapingResult(
|
||||
cleaned_html=cleaned_html,
|
||||
success=True,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=asdict(result.metadata)
|
||||
)
|
||||
finally:
|
||||
# Cleanup temp file if downloaded
|
||||
if url.startswith(("http://", "https://")):
|
||||
Path(pdf_path).unlink(missing_ok=True)
|
||||
|
||||
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
|
||||
# For simple cases, you can use the sync version
|
||||
return await asyncio.to_thread(self.scrap, url, html, **kwargs)
|
||||
|
||||
|
||||
def _get_pdf_path(self, url: str) -> str:
|
||||
if url.startswith(("http://", "https://")):
|
||||
import tempfile
|
||||
import requests
|
||||
|
||||
# Create temp file with .pdf extension
|
||||
temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False)
|
||||
|
||||
try:
|
||||
# Download PDF with streaming
|
||||
response = requests.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
|
||||
# Write to temp file
|
||||
with open(temp_file.name, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
return temp_file.name
|
||||
|
||||
except Exception as e:
|
||||
# Clean up temp file if download fails
|
||||
Path(temp_file.name).unlink(missing_ok=True)
|
||||
raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}")
|
||||
|
||||
elif url.startswith("file://"):
|
||||
return url[7:] # Strip file:// prefix
|
||||
|
||||
return url # Assume local path
|
||||
|
||||
|
||||
__all__ = ["PDFCrawlerStrategy", "PDFContentScrapingStrategy"]
|
||||
487
crawl4ai/processors/pdf/processor.py
Normal file
487
crawl4ai/processors/pdf/processor.py
Normal file
@@ -0,0 +1,487 @@
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from time import time
|
||||
from dataclasses import dataclass, asdict, field
|
||||
from typing import Dict, List, Optional, Any, Union
|
||||
import base64
|
||||
import tempfile
|
||||
from .utils import *
|
||||
from .utils import (
|
||||
apply_png_predictor,
|
||||
clean_pdf_text,
|
||||
clean_pdf_text_to_html,
|
||||
)
|
||||
|
||||
# Remove direct PyPDF2 imports from the top
|
||||
# import PyPDF2
|
||||
# from PyPDF2 import PdfReader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class PDFMetadata:
|
||||
title: Optional[str] = None
|
||||
author: Optional[str] = None
|
||||
producer: Optional[str] = None
|
||||
created: Optional[datetime] = None
|
||||
modified: Optional[datetime] = None
|
||||
pages: int = 0
|
||||
encrypted: bool = False
|
||||
file_size: Optional[int] = None
|
||||
|
||||
@dataclass
|
||||
class PDFPage:
|
||||
page_number: int
|
||||
raw_text: str = ""
|
||||
markdown: str = ""
|
||||
html: str = ""
|
||||
images: List[Dict] = field(default_factory=list)
|
||||
links: List[str] = field(default_factory=list)
|
||||
layout: List[Dict] = field(default_factory=list)
|
||||
|
||||
@dataclass
|
||||
class PDFProcessResult:
|
||||
metadata: PDFMetadata
|
||||
pages: List[PDFPage]
|
||||
processing_time: float = 0.0
|
||||
version: str = "1.0"
|
||||
|
||||
class PDFProcessorStrategy(ABC):
|
||||
@abstractmethod
|
||||
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||
pass
|
||||
|
||||
class NaivePDFProcessorStrategy(PDFProcessorStrategy):
|
||||
def __init__(self, image_dpi: int = 144, image_quality: int = 85, extract_images: bool = True,
|
||||
save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4):
|
||||
# Import check at initialization time
|
||||
try:
|
||||
import PyPDF2
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
self.image_dpi = image_dpi
|
||||
self.image_quality = image_quality
|
||||
self.current_page_number = 0
|
||||
self.extract_images = extract_images
|
||||
self.save_images_locally = save_images_locally
|
||||
self.image_save_dir = image_save_dir
|
||||
self.batch_size = batch_size
|
||||
self._temp_dir = None
|
||||
|
||||
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||
# Import inside method to allow dependency to be optional
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
start_time = time()
|
||||
result = PDFProcessResult(
|
||||
metadata=PDFMetadata(),
|
||||
pages=[],
|
||||
version="1.1"
|
||||
)
|
||||
|
||||
try:
|
||||
with pdf_path.open('rb') as file:
|
||||
reader = PdfReader(file)
|
||||
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||
|
||||
# Handle image directory
|
||||
image_dir = None
|
||||
if self.extract_images and self.save_images_locally:
|
||||
if self.image_save_dir:
|
||||
image_dir = Path(self.image_save_dir)
|
||||
image_dir.mkdir(exist_ok=True, parents=True)
|
||||
else:
|
||||
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||
image_dir = Path(self._temp_dir)
|
||||
|
||||
for page_num, page in enumerate(reader.pages):
|
||||
self.current_page_number = page_num + 1
|
||||
pdf_page = self._process_page(page, image_dir)
|
||||
result.pages.append(pdf_page)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process PDF: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Cleanup temp directory if it was created
|
||||
if self._temp_dir and not self.image_save_dir:
|
||||
import shutil
|
||||
try:
|
||||
shutil.rmtree(self._temp_dir)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||
|
||||
result.processing_time = time() - start_time
|
||||
return result
|
||||
|
||||
def process_batch(self, pdf_path: Path) -> PDFProcessResult:
|
||||
"""Like process() but processes PDF pages in parallel batches"""
|
||||
# Import inside method to allow dependency to be optional
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
import PyPDF2 # For type checking
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
import concurrent.futures
|
||||
import threading
|
||||
|
||||
# Initialize PyPDF2 thread support
|
||||
if not hasattr(threading.current_thread(), "_children"):
|
||||
threading.current_thread()._children = set()
|
||||
|
||||
start_time = time()
|
||||
result = PDFProcessResult(
|
||||
metadata=PDFMetadata(),
|
||||
pages=[],
|
||||
version="1.1"
|
||||
)
|
||||
|
||||
try:
|
||||
# Get metadata and page count from main thread
|
||||
with pdf_path.open('rb') as file:
|
||||
reader = PdfReader(file)
|
||||
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||
total_pages = len(reader.pages)
|
||||
|
||||
# Handle image directory setup
|
||||
image_dir = None
|
||||
if self.extract_images and self.save_images_locally:
|
||||
if self.image_save_dir:
|
||||
image_dir = Path(self.image_save_dir)
|
||||
image_dir.mkdir(exist_ok=True, parents=True)
|
||||
else:
|
||||
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||
image_dir = Path(self._temp_dir)
|
||||
|
||||
def process_page_safely(page_num: int):
|
||||
# Each thread opens its own file handle
|
||||
with pdf_path.open('rb') as file:
|
||||
thread_reader = PdfReader(file)
|
||||
page = thread_reader.pages[page_num]
|
||||
self.current_page_number = page_num + 1
|
||||
return self._process_page(page, image_dir)
|
||||
|
||||
# Process pages in parallel batches
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=self.batch_size) as executor:
|
||||
futures = []
|
||||
for page_num in range(total_pages):
|
||||
future = executor.submit(process_page_safely, page_num)
|
||||
futures.append((page_num + 1, future))
|
||||
|
||||
# Collect results in order
|
||||
result.pages = [None] * total_pages
|
||||
for page_num, future in futures:
|
||||
try:
|
||||
pdf_page = future.result()
|
||||
result.pages[page_num - 1] = pdf_page
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process page {page_num}: {str(e)}")
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process PDF: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Cleanup temp directory if it was created
|
||||
if self._temp_dir and not self.image_save_dir:
|
||||
import shutil
|
||||
try:
|
||||
shutil.rmtree(self._temp_dir)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||
|
||||
result.processing_time = time() - start_time
|
||||
return result
|
||||
|
||||
def _process_page(self, page, image_dir: Optional[Path]) -> PDFPage:
|
||||
pdf_page = PDFPage(
|
||||
page_number=self.current_page_number,
|
||||
)
|
||||
|
||||
# Text and font extraction
|
||||
def visitor_text(text, cm, tm, font_dict, font_size):
|
||||
pdf_page.raw_text += text
|
||||
pdf_page.layout.append({
|
||||
"type": "text",
|
||||
"text": text,
|
||||
"x": tm[4],
|
||||
"y": tm[5],
|
||||
})
|
||||
|
||||
page.extract_text(visitor_text=visitor_text)
|
||||
|
||||
# Image extraction
|
||||
if self.extract_images:
|
||||
pdf_page.images = self._extract_images(page, image_dir)
|
||||
|
||||
# Link extraction
|
||||
pdf_page.links = self._extract_links(page)
|
||||
|
||||
# Add markdown content
|
||||
pdf_page.markdown = clean_pdf_text(self.current_page_number, pdf_page.raw_text)
|
||||
pdf_page.html = clean_pdf_text_to_html(self.current_page_number, pdf_page.raw_text)
|
||||
|
||||
return pdf_page
|
||||
|
||||
def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]:
|
||||
# Import PyPDF2 for type checking only when needed
|
||||
try:
|
||||
import PyPDF2
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
if not self.extract_images:
|
||||
return []
|
||||
|
||||
images = []
|
||||
try:
|
||||
resources = page.get("/Resources")
|
||||
if resources: # Check if resources exist
|
||||
resources = resources.get_object() # Resolve IndirectObject
|
||||
if '/XObject' in resources:
|
||||
xobjects = resources['/XObject'].get_object()
|
||||
img_count = 0
|
||||
for obj_name in xobjects:
|
||||
xobj = xobjects[obj_name]
|
||||
if hasattr(xobj, 'get_object') and callable(xobj.get_object):
|
||||
xobj = xobj.get_object()
|
||||
if xobj.get('/Subtype') == '/Image':
|
||||
try:
|
||||
img_count += 1
|
||||
img_filename = f"page_{self.current_page_number}_img_{img_count}"
|
||||
data = xobj.get_data()
|
||||
filters = xobj.get('/Filter', [])
|
||||
if not isinstance(filters, list):
|
||||
filters = [filters]
|
||||
|
||||
# Resolve IndirectObjects in properties
|
||||
width = xobj.get('/Width', 0)
|
||||
height = xobj.get('/Height', 0)
|
||||
color_space = xobj.get('/ColorSpace', '/DeviceRGB')
|
||||
if isinstance(color_space, PyPDF2.generic.IndirectObject):
|
||||
color_space = color_space.get_object()
|
||||
|
||||
# Handle different image encodings
|
||||
success = False
|
||||
image_format = 'bin'
|
||||
image_data = None
|
||||
|
||||
if '/FlateDecode' in filters:
|
||||
try:
|
||||
decode_parms = xobj.get('/DecodeParms', {})
|
||||
if isinstance(decode_parms, PyPDF2.generic.IndirectObject):
|
||||
decode_parms = decode_parms.get_object()
|
||||
|
||||
predictor = decode_parms.get('/Predictor', 1)
|
||||
bits = xobj.get('/BitsPerComponent', 8)
|
||||
colors = 3 if color_space == '/DeviceRGB' else 1
|
||||
|
||||
if predictor >= 10:
|
||||
data = apply_png_predictor(data, width, bits, colors)
|
||||
|
||||
# Create PIL Image
|
||||
from PIL import Image
|
||||
mode = 'RGB' if color_space == '/DeviceRGB' else 'L'
|
||||
img = Image.frombytes(mode, (width, height), data)
|
||||
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.png')
|
||||
img.save(final_path)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
import io
|
||||
img_byte_arr = io.BytesIO()
|
||||
img.save(img_byte_arr, format='PNG')
|
||||
image_data = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
|
||||
|
||||
success = True
|
||||
image_format = 'png'
|
||||
except Exception as e:
|
||||
logger.error(f"FlateDecode error: {str(e)}")
|
||||
|
||||
elif '/DCTDecode' in filters:
|
||||
# JPEG image
|
||||
try:
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.jpg')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'jpeg'
|
||||
except Exception as e:
|
||||
logger.error(f"JPEG save error: {str(e)}")
|
||||
|
||||
elif '/CCITTFaxDecode' in filters:
|
||||
try:
|
||||
if data[:4] != b'II*\x00':
|
||||
# Add TIFF header if missing
|
||||
tiff_header = b'II*\x00\x08\x00\x00\x00\x0e\x00\x00\x01\x03\x00\x01\x00\x00\x00' + \
|
||||
width.to_bytes(4, 'little') + \
|
||||
b'\x01\x03\x00\x01\x00\x00\x00' + \
|
||||
height.to_bytes(4, 'little') + \
|
||||
b'\x01\x12\x00\x03\x00\x00\x00\x01\x00\x01\x00\x00\x01\x17\x00\x04\x00\x00\x00\x01\x00\x00\x00J\x01\x1B\x00\x05\x00\x00\x00\x01\x00\x00\x00R\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\x00'
|
||||
data = tiff_header + data
|
||||
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.tiff')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'tiff'
|
||||
except Exception as e:
|
||||
logger.error(f"CCITT save error: {str(e)}")
|
||||
|
||||
elif '/JPXDecode' in filters:
|
||||
# JPEG 2000
|
||||
try:
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.jp2')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'jpeg2000'
|
||||
except Exception as e:
|
||||
logger.error(f"JPEG2000 save error: {str(e)}")
|
||||
|
||||
if success and image_data:
|
||||
image_info = {
|
||||
"format": image_format,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"color_space": str(color_space),
|
||||
"bits_per_component": xobj.get('/BitsPerComponent', 1)
|
||||
}
|
||||
|
||||
if self.save_images_locally:
|
||||
image_info["path"] = image_data
|
||||
else:
|
||||
image_info["data"] = image_data
|
||||
|
||||
images.append(image_info)
|
||||
else:
|
||||
# Fallback: Save raw data
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.bin')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
logger.warning(f"Saved raw image data to {final_path}")
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
images.append({
|
||||
"format": "bin",
|
||||
"width": width,
|
||||
"height": height,
|
||||
"color_space": str(color_space),
|
||||
"bits_per_component": xobj.get('/BitsPerComponent', 1),
|
||||
"data": image_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing image: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Image extraction error: {str(e)}")
|
||||
|
||||
return images
|
||||
|
||||
def _extract_links(self, page) -> List[str]:
|
||||
links = []
|
||||
if '/Annots' in page:
|
||||
try:
|
||||
for annot in page['/Annots']:
|
||||
a = annot.get_object()
|
||||
if '/A' in a and '/URI' in a['/A']:
|
||||
links.append(a['/A']['/URI'])
|
||||
except Exception as e:
|
||||
print(f"Link error: {str(e)}")
|
||||
return links
|
||||
|
||||
def _extract_metadata(self, pdf_path: Path, reader = None) -> PDFMetadata:
|
||||
# Import inside method to allow dependency to be optional
|
||||
if reader is None:
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
reader = PdfReader(pdf_path)
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
meta = reader.metadata or {}
|
||||
created = self._parse_pdf_date(meta.get('/CreationDate', ''))
|
||||
modified = self._parse_pdf_date(meta.get('/ModDate', ''))
|
||||
|
||||
return PDFMetadata(
|
||||
title=meta.get('/Title'),
|
||||
author=meta.get('/Author'),
|
||||
producer=meta.get('/Producer'),
|
||||
created=created,
|
||||
modified=modified,
|
||||
pages=len(reader.pages),
|
||||
encrypted=reader.is_encrypted,
|
||||
file_size=pdf_path.stat().st_size
|
||||
)
|
||||
|
||||
def _parse_pdf_date(self, date_str: str) -> Optional[datetime]:
|
||||
try:
|
||||
match = re.match(r'D:(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})', date_str)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
return datetime(
|
||||
year=int(match[1]),
|
||||
month=int(match[2]),
|
||||
day=int(match[3]),
|
||||
hour=int(match[4]),
|
||||
minute=int(match[5]),
|
||||
second=int(match[6])
|
||||
)
|
||||
except:
|
||||
return None
|
||||
|
||||
# Usage example
|
||||
if __name__ == "__main__":
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
# Import PyPDF2 only when running the file directly
|
||||
import PyPDF2
|
||||
from PyPDF2 import PdfReader
|
||||
except ImportError:
|
||||
print("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
exit(1)
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
pdf_path = f'{current_dir}/test.pdf'
|
||||
|
||||
strategy = NaivePDFProcessorStrategy()
|
||||
result = strategy.process(Path(pdf_path))
|
||||
|
||||
# Convert to JSON
|
||||
json_output = asdict(result)
|
||||
print(json.dumps(json_output, indent=2, default=str))
|
||||
|
||||
with open(f'{current_dir}/test.html', 'w') as f:
|
||||
for page in result.pages:
|
||||
f.write(f'<h1>Page {page["page_number"]}</h1>')
|
||||
f.write(page['html'])
|
||||
with open(f'{current_dir}/test.md', 'w') as f:
|
||||
for page in result.pages:
|
||||
f.write(f'# Page {page["page_number"]}\n\n')
|
||||
f.write(clean_pdf_text(page["page_number"], page['raw_text']))
|
||||
f.write('\n\n')
|
||||
350
crawl4ai/processors/pdf/utils.py
Normal file
350
crawl4ai/processors/pdf/utils.py
Normal file
@@ -0,0 +1,350 @@
|
||||
import re
|
||||
|
||||
def apply_png_predictor(data, width, bits, color_channels):
|
||||
"""Decode PNG predictor (PDF 1.5+ filter)"""
|
||||
bytes_per_pixel = (bits * color_channels) // 8
|
||||
if (bits * color_channels) % 8 != 0:
|
||||
bytes_per_pixel += 1
|
||||
|
||||
stride = width * bytes_per_pixel
|
||||
scanline_length = stride + 1 # +1 for filter byte
|
||||
|
||||
if len(data) % scanline_length != 0:
|
||||
raise ValueError("Invalid scanline structure")
|
||||
|
||||
num_lines = len(data) // scanline_length
|
||||
output = bytearray()
|
||||
prev_line = b'\x00' * stride
|
||||
|
||||
for i in range(num_lines):
|
||||
line = data[i*scanline_length:(i+1)*scanline_length]
|
||||
filter_type = line[0]
|
||||
filtered = line[1:]
|
||||
|
||||
if filter_type == 0: # None
|
||||
decoded = filtered
|
||||
elif filter_type == 1: # Sub
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(bytes_per_pixel, len(decoded)):
|
||||
decoded[j] = (decoded[j] + decoded[j - bytes_per_pixel]) % 256
|
||||
elif filter_type == 2: # Up
|
||||
decoded = bytearray([(filtered[j] + prev_line[j]) % 256
|
||||
for j in range(len(filtered))])
|
||||
elif filter_type == 3: # Average
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(len(decoded)):
|
||||
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
up = prev_line[j]
|
||||
avg = (left + up) // 2
|
||||
decoded[j] = (decoded[j] + avg) % 256
|
||||
elif filter_type == 4: # Paeth
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(len(decoded)):
|
||||
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
up = prev_line[j]
|
||||
up_left = prev_line[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
paeth = paeth_predictor(left, up, up_left)
|
||||
decoded[j] = (decoded[j] + paeth) % 256
|
||||
else:
|
||||
raise ValueError(f"Unsupported filter type: {filter_type}")
|
||||
|
||||
output.extend(decoded)
|
||||
prev_line = decoded
|
||||
|
||||
return bytes(output)
|
||||
|
||||
def paeth_predictor(a, b, c):
|
||||
p = a + b - c
|
||||
pa = abs(p - a)
|
||||
pb = abs(p - b)
|
||||
pc = abs(p - c)
|
||||
if pa <= pb and pa <= pc:
|
||||
return a
|
||||
elif pb <= pc:
|
||||
return b
|
||||
else:
|
||||
return c
|
||||
|
||||
import re
|
||||
import html
|
||||
|
||||
def clean_pdf_text_to_html(page_number, text):
|
||||
# Decode Unicode escapes and handle surrogate pairs
|
||||
try:
|
||||
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||
except Exception as e:
|
||||
decoded = text # Fallback if decoding fails
|
||||
|
||||
article_title_detected = False
|
||||
# decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||
# decoded = re.sub(r'\.\n', '<|break|>', decoded)
|
||||
lines = decoded.split('\n')
|
||||
output = []
|
||||
current_paragraph = []
|
||||
in_header = False
|
||||
email_pattern = re.compile(r'\{.*?\}')
|
||||
affiliation_pattern = re.compile(r'^†')
|
||||
quote_pattern = re.compile(r'^["“]')
|
||||
author_pattern = re.compile(
|
||||
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||
)
|
||||
|
||||
def flush_paragraph():
|
||||
if current_paragraph:
|
||||
para = ' '.join(current_paragraph)
|
||||
para = re.sub(r'\s+', ' ', para).strip()
|
||||
if para:
|
||||
# escaped_para = html.escape(para)
|
||||
escaped_para = para
|
||||
# escaped_para = re.sub(r'\.\n', '.\n\n', escaped_para)
|
||||
# Split escaped_para by <|break|> to avoid HTML escaping
|
||||
escaped_para = escaped_para.split('.\n\n')
|
||||
# Wrap each part in <p> tag
|
||||
escaped_para = [f'<p>{part}</p>' for part in escaped_para]
|
||||
output.append(f'<div class="paragraph">{"".join(escaped_para)}</div><hr/>')
|
||||
current_paragraph.clear()
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
|
||||
# Handle empty lines
|
||||
if not line:
|
||||
flush_paragraph()
|
||||
continue
|
||||
|
||||
# Detect article title (first line with reasonable length)
|
||||
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and len(lines) > 1:
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<h2>{escaped_line}</h2>')
|
||||
article_title_detected = True
|
||||
continue
|
||||
|
||||
# Detect numbered headers like "2.1 Background"
|
||||
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||
if i > 0 and not lines[i-1].strip() and numbered_header:
|
||||
flush_paragraph()
|
||||
level = numbered_header.group(1).count('.') + 1
|
||||
header_text = numbered_header.group(2)
|
||||
md_level = min(level + 1, 6)
|
||||
escaped_header = html.escape(header_text)
|
||||
output.append(f'<h{md_level}>{escaped_header}</h{md_level}>')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
# Detect authors
|
||||
if page_number == 1 and author_pattern.match(line):
|
||||
authors = re.sub(r'[†â€]', '', line)
|
||||
authors = re.split(r', | and ', authors)
|
||||
formatted_authors = []
|
||||
for author in authors:
|
||||
if author.strip():
|
||||
parts = [p for p in author.strip().split() if p]
|
||||
formatted = ' '.join(parts)
|
||||
escaped_author = html.escape(formatted)
|
||||
formatted_authors.append(f'<strong>{escaped_author}</strong>')
|
||||
|
||||
if len(formatted_authors) > 1:
|
||||
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||
else:
|
||||
joined = formatted_authors[0]
|
||||
|
||||
output.append(f'<p>{joined}</p>')
|
||||
continue
|
||||
|
||||
# Detect affiliation
|
||||
if affiliation_pattern.match(line):
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<p><em>{escaped_line}</em></p>')
|
||||
continue
|
||||
|
||||
# Detect emails
|
||||
if email_pattern.match(line):
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<p><code>{escaped_line}</code></p>')
|
||||
continue
|
||||
|
||||
# Detect section headers
|
||||
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<h2 class="section-header"><em>{escaped_line}</em></h2>')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
# Handle quotes
|
||||
if quote_pattern.match(line):
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<blockquote><p>{escaped_line}</p></blockquote>')
|
||||
continue
|
||||
|
||||
# Handle hyphenated words
|
||||
if line.endswith('-'):
|
||||
current_paragraph.append(line[:-1].strip())
|
||||
else:
|
||||
current_paragraph.append(line)
|
||||
|
||||
# Handle paragraph breaks after headers
|
||||
if in_header and not line.endswith(('.', '!', '?')):
|
||||
flush_paragraph()
|
||||
in_header = False
|
||||
|
||||
flush_paragraph()
|
||||
|
||||
# Post-process HTML
|
||||
html_output = '\n'.join(output)
|
||||
|
||||
# Fix common citation patterns
|
||||
html_output = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'<cite>\1</cite>', html_output)
|
||||
|
||||
# Fix escaped characters
|
||||
html_output = html_output.replace('\\ud835', '').replace('\\u2020', '†')
|
||||
|
||||
# Remove leftover hyphens and fix spacing
|
||||
html_output = re.sub(r'\s+-\s+', '', html_output)
|
||||
html_output = re.sub(r'\s+([.,!?)])', r'\1', html_output)
|
||||
|
||||
return html_output
|
||||
|
||||
def clean_pdf_text(page_number, text):
|
||||
# Decode Unicode escapes and handle surrogate pairs
|
||||
try:
|
||||
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||
except Exception as e:
|
||||
decoded = text # Fallback if decoding fails
|
||||
|
||||
article_title_detected = False
|
||||
decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||
lines = decoded.split('\n')
|
||||
output = []
|
||||
current_paragraph = []
|
||||
in_header = False
|
||||
email_pattern = re.compile(r'\{.*?\}')
|
||||
affiliation_pattern = re.compile(r'^†')
|
||||
quote_pattern = re.compile(r'^["“]')
|
||||
author_pattern = re.compile(
|
||||
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||
)
|
||||
|
||||
def flush_paragraph():
|
||||
if current_paragraph:
|
||||
para = ' '.join(current_paragraph)
|
||||
para = re.sub(r'\s+', ' ', para).strip()
|
||||
if para:
|
||||
output.append(para)
|
||||
current_paragraph.clear()
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
|
||||
# Handle special patterns
|
||||
if not line:
|
||||
flush_paragraph()
|
||||
continue
|
||||
|
||||
# Detect headline (first line, reasonable length, surrounded by empty lines)
|
||||
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and (len(lines) > 1):
|
||||
flush_paragraph()
|
||||
output.append(f'## {line}')
|
||||
continue
|
||||
|
||||
# Detect paragraph breaks for ALL paragraphs
|
||||
if not line and current_paragraph:
|
||||
flush_paragraph()
|
||||
output.append('') # Add empty line between paragraphs
|
||||
continue
|
||||
|
||||
# Detect numbered headers like "2.1 Background"
|
||||
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||
if not lines[i-1].strip() and numbered_header:
|
||||
flush_paragraph()
|
||||
level = numbered_header.group(1).count('.') + 1 # Convert 2.1 → level 2
|
||||
header_text = numbered_header.group(2)
|
||||
# Never go beyond ### for subsections
|
||||
md_level = min(level + 1, 6) # 1 → ##, 2 → ###, 3 → #### etc
|
||||
output.append(f'{"#" * md_level} {header_text}')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
|
||||
# Detect authors
|
||||
if page_number == 1 and author_pattern.match(line):
|
||||
# Clean and format author names
|
||||
authors = re.sub(r'[†â€]', '', line) # Remove affiliation markers
|
||||
authors = re.split(r', | and ', authors)
|
||||
formatted_authors = []
|
||||
for author in authors:
|
||||
if author.strip():
|
||||
# Handle "First Last" formatting
|
||||
parts = [p for p in author.strip().split() if p]
|
||||
formatted = ' '.join(parts)
|
||||
formatted_authors.append(f'**{formatted}**')
|
||||
|
||||
# Join with commas and "and"
|
||||
if len(formatted_authors) > 1:
|
||||
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||
else:
|
||||
joined = formatted_authors[0]
|
||||
|
||||
output.append(joined)
|
||||
continue
|
||||
|
||||
# Detect affiliation
|
||||
if affiliation_pattern.match(line):
|
||||
output.append(f'*{line}*')
|
||||
continue
|
||||
|
||||
# Detect emails
|
||||
if email_pattern.match(line):
|
||||
output.append(f'`{line}`')
|
||||
continue
|
||||
|
||||
# Detect section headers
|
||||
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||
flush_paragraph()
|
||||
output.append(f'_[{line}]_')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
|
||||
# Handle quotes
|
||||
if quote_pattern.match(line):
|
||||
flush_paragraph()
|
||||
output.append(f'> {line}')
|
||||
continue
|
||||
|
||||
# Handle hyphenated words
|
||||
if line.endswith('-'):
|
||||
current_paragraph.append(line[:-1].strip())
|
||||
else:
|
||||
current_paragraph.append(line)
|
||||
|
||||
# Handle paragraph breaks after headers
|
||||
if in_header and not line.endswith(('.', '!', '?')):
|
||||
flush_paragraph()
|
||||
in_header = False
|
||||
|
||||
flush_paragraph()
|
||||
|
||||
# Post-processing
|
||||
markdown = '\n\n'.join(output)
|
||||
|
||||
# Fix common citation patterns
|
||||
markdown = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'[\1]', markdown)
|
||||
|
||||
# Fix escaped characters
|
||||
markdown = markdown.replace('\\ud835', '').replace('\\u2020', '†')
|
||||
|
||||
# Remove leftover hyphens and fix spacing
|
||||
markdown = re.sub(r'\s+-\s+', '', markdown) # Join hyphenated words
|
||||
markdown = re.sub(r'\s+([.,!?)])', r'\1', markdown) # Fix punctuation spacing
|
||||
|
||||
|
||||
return markdown
|
||||
@@ -198,25 +198,70 @@ Avoid Common Mistakes:
|
||||
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
|
||||
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
|
||||
- Do not miss closing </blocks> tag at the end of the JSON output.
|
||||
- Do not generate the Python coee show me how to do the task, this is your task to extract the information and return it in JSON format.
|
||||
- Do not generate the Python code show me how to do the task, this is your task to extract the information and return it in JSON format.
|
||||
|
||||
Result
|
||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
|
||||
|
||||
PROMPT_EXTRACT_INFERRED_SCHEMA = """Here is the content from the URL:
|
||||
<url>{URL}</url>
|
||||
|
||||
<url_content>
|
||||
{HTML}
|
||||
</url_content>
|
||||
|
||||
Please carefully read the URL content and the user's request. Analyze the page structure and infer the most appropriate JSON schema based on the content and request.
|
||||
|
||||
Extraction Strategy:
|
||||
1. First, determine if the page contains repetitive items (like multiple products, articles, etc.) or a single content item (like a single article or page).
|
||||
2. For repetitive items: Identify the common pattern and extract each instance as a separate JSON object in an array.
|
||||
3. For single content: Extract the key information into a comprehensive JSON object that captures the essential details.
|
||||
|
||||
Extraction instructions:
|
||||
Return the extracted information as a list of JSON objects. For repetitive content, each object in the list should correspond to a distinct item. For single content, you may return just one detailed JSON object. Wrap the entire JSON list in <blocks>...</blocks> XML tags.
|
||||
|
||||
Schema Design Guidelines:
|
||||
- Create meaningful property names that clearly describe the data they contain
|
||||
- Use nested objects for hierarchical information
|
||||
- Use arrays for lists of related items
|
||||
- Include all information requested by the user
|
||||
- Maintain consistency in property names and data structures
|
||||
- Only include properties that are actually present in the content
|
||||
- For dates, prefer ISO format (YYYY-MM-DD)
|
||||
- For prices or numeric values, extract them without currency symbols when possible
|
||||
|
||||
Quality Reflection:
|
||||
Before outputting your final answer, double check that:
|
||||
1. The inferred schema makes logical sense for the type of content
|
||||
2. All requested information is included
|
||||
3. The JSON is valid and could be parsed without errors
|
||||
4. Property names are consistent and descriptive
|
||||
5. The structure is optimal for the type of data being represented
|
||||
|
||||
Avoid Common Mistakes:
|
||||
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
|
||||
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
|
||||
- Do not miss closing </blocks> tag at the end of the JSON output.
|
||||
- Do not generate Python code showing how to do the task; this is your task to extract the information and return it in JSON format.
|
||||
- Ensure consistency in property names across all objects
|
||||
- Don't include empty properties or null values unless they're meaningful
|
||||
- For repetitive content, ensure all objects follow the same schema
|
||||
|
||||
Important: If user specific instruction is provided, then stress significantly on what user is requesting and describing about the schema of end result (if any). If user is requesting to extract specific information, then focus on that and ignore the rest of the content.
|
||||
<user_request>
|
||||
{REQUEST}
|
||||
</user_request>
|
||||
|
||||
Result:
|
||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly.
|
||||
|
||||
DO NOT ADD ANY PRE OR POST COMMENTS. JUST RETURN THE JSON OBJECTS INSIDE <blocks>...</blocks> TAGS.
|
||||
|
||||
CRITICAL: The content inside the <blocks> tags MUST be a direct array of JSON objects (starting with '[' and ending with ']'), not a dictionary/object containing an array. For example, use <blocks>[{...}, {...}]</blocks> instead of <blocks>{"items": [{...}, {...}]}</blocks>. This is essential for proper parsing.
|
||||
"""
|
||||
|
||||
PROMPT_FILTER_CONTENT = """Your task is to filter and convert HTML content into clean, focused markdown that's optimized for use with LLMs and information retrieval systems.
|
||||
|
||||
INPUT HTML:
|
||||
<|HTML_CONTENT_START|>
|
||||
{HTML}
|
||||
<|HTML_CONTENT_END|>
|
||||
|
||||
|
||||
SPECIFIC INSTRUCTION:
|
||||
<|USER_INSTRUCTION_START|>
|
||||
{REQUEST}
|
||||
<|USER_INSTRUCTION_END|>
|
||||
|
||||
TASK DETAILS:
|
||||
1. Content Selection
|
||||
- DO: Keep essential information, main content, key details
|
||||
@@ -240,15 +285,7 @@ TASK DETAILS:
|
||||
- DON'T: Fragment related content
|
||||
- DON'T: Duplicate information
|
||||
|
||||
Example Input:
|
||||
<div class="main-content"><h1>Setup Guide</h1><p>Follow these steps...</p></div>
|
||||
<div class="sidebar">Related articles...</div>
|
||||
|
||||
Example Output:
|
||||
# Setup Guide
|
||||
Follow these steps...
|
||||
|
||||
IMPORTANT: If specific instruction is provided above, prioritize those requirements over these general guidelines.
|
||||
IMPORTANT: If user specific instruction is provided, ignore above guideline and prioritize those requirements over these general guidelines.
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Wrap your response in <content> tags. Use proper markdown throughout.
|
||||
@@ -256,7 +293,18 @@ Wrap your response in <content> tags. Use proper markdown throughout.
|
||||
[Your markdown content here]
|
||||
</content>
|
||||
|
||||
Begin filtering now."""
|
||||
Begin filtering now.
|
||||
|
||||
--------------------------------------------
|
||||
|
||||
<|HTML_CONTENT_START|>
|
||||
{HTML}
|
||||
<|HTML_CONTENT_END|>
|
||||
|
||||
<|USER_INSTRUCTION_START|>
|
||||
{REQUEST}
|
||||
<|USER_INSTRUCTION_END|>
|
||||
"""
|
||||
|
||||
JSON_SCHEMA_BUILDER= """
|
||||
# HTML Schema Generation Instructions
|
||||
|
||||
158
crawl4ai/proxy_strategy.py
Normal file
158
crawl4ai/proxy_strategy.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from typing import List, Dict, Optional
|
||||
from abc import ABC, abstractmethod
|
||||
from itertools import cycle
|
||||
import os
|
||||
|
||||
|
||||
########### ATTENTION PEOPLE OF EARTH ###########
|
||||
# I have moved this config to async_configs.py, kept it here, in case someone still importing it, however
|
||||
# be a dear and follow `from crawl4ai import ProxyConfig` instead :)
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
server: str,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
):
|
||||
"""Configuration class for a single proxy.
|
||||
|
||||
Args:
|
||||
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||
username: Optional username for proxy authentication
|
||||
password: Optional password for proxy authentication
|
||||
ip: Optional IP address for verification purposes
|
||||
"""
|
||||
self.server = server
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
# Extract IP from server if not explicitly provided
|
||||
self.ip = ip or self._extract_ip_from_server()
|
||||
|
||||
def _extract_ip_from_server(self) -> Optional[str]:
|
||||
"""Extract IP address from server URL."""
|
||||
try:
|
||||
# Simple extraction assuming http://ip:port format
|
||||
if "://" in self.server:
|
||||
parts = self.server.split("://")[1].split(":")
|
||||
return parts[0]
|
||||
else:
|
||||
parts = self.server.split(":")
|
||||
return parts[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||
parts = proxy_str.split(":")
|
||||
if len(parts) == 4: # ip:port:username:password
|
||||
ip, port, username, password = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
username=username,
|
||||
password=password,
|
||||
ip=ip
|
||||
)
|
||||
elif len(parts) == 2: # ip:port only
|
||||
ip, port = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
ip=ip
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a dictionary."""
|
||||
return ProxyConfig(
|
||||
server=proxy_dict.get("server"),
|
||||
username=proxy_dict.get("username"),
|
||||
password=proxy_dict.get("password"),
|
||||
ip=proxy_dict.get("ip")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||
"""Load proxies from environment variable.
|
||||
|
||||
Args:
|
||||
env_var: Name of environment variable containing comma-separated proxy strings
|
||||
|
||||
Returns:
|
||||
List of ProxyConfig objects
|
||||
"""
|
||||
proxies = []
|
||||
try:
|
||||
proxy_list = os.getenv(env_var, "").split(",")
|
||||
for proxy in proxy_list:
|
||||
if not proxy:
|
||||
continue
|
||||
proxies.append(ProxyConfig.from_string(proxy))
|
||||
except Exception as e:
|
||||
print(f"Error loading proxies from environment: {e}")
|
||||
return proxies
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"server": self.server,
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"ip": self.ip
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "ProxyConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
ProxyConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return ProxyConfig.from_dict(config_dict)
|
||||
|
||||
|
||||
class ProxyRotationStrategy(ABC):
|
||||
"""Base abstract class for proxy rotation strategies"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy configuration from the strategy"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add proxy configurations to the strategy"""
|
||||
pass
|
||||
|
||||
class RoundRobinProxyStrategy:
|
||||
"""Simple round-robin proxy rotation strategy using ProxyConfig objects"""
|
||||
|
||||
def __init__(self, proxies: List[ProxyConfig] = None):
|
||||
"""
|
||||
Initialize with optional list of proxy configurations
|
||||
|
||||
Args:
|
||||
proxies: List of ProxyConfig objects
|
||||
"""
|
||||
self._proxies = []
|
||||
self._proxy_cycle = None
|
||||
if proxies:
|
||||
self.add_proxies(proxies)
|
||||
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add new proxies to the rotation pool"""
|
||||
self._proxies.extend(proxies)
|
||||
self._proxy_cycle = cycle(self._proxies)
|
||||
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy in round-robin fashion"""
|
||||
if not self._proxy_cycle:
|
||||
return None
|
||||
return next(self._proxy_cycle)
|
||||
@@ -9,83 +9,44 @@ from urllib.parse import urlparse
|
||||
import OpenSSL.crypto
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class SSLCertificate:
|
||||
# === Inherit from dict ===
|
||||
class SSLCertificate(dict):
|
||||
"""
|
||||
A class representing an SSL certificate with methods to export in various formats.
|
||||
A class representing an SSL certificate, behaving like a dictionary
|
||||
for direct JSON serialization. It stores the certificate information internally
|
||||
and provides methods for export and property access.
|
||||
|
||||
Attributes:
|
||||
cert_info (Dict[str, Any]): The certificate information.
|
||||
|
||||
Methods:
|
||||
from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']: Create SSLCertificate instance from a URL.
|
||||
from_file(file_path: str) -> Optional['SSLCertificate']: Create SSLCertificate instance from a file.
|
||||
from_binary(binary_data: bytes) -> Optional['SSLCertificate']: Create SSLCertificate instance from binary data.
|
||||
export_as_pem() -> str: Export the certificate as PEM format.
|
||||
export_as_der() -> bytes: Export the certificate as DER format.
|
||||
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
|
||||
export_as_text() -> str: Export the certificate as text format.
|
||||
Inherits from dict, so instances are directly JSON serializable.
|
||||
"""
|
||||
|
||||
# Use __slots__ for potential memory optimization if desired, though less common when inheriting dict
|
||||
# __slots__ = ("_cert_info",) # If using slots, be careful with dict inheritance interaction
|
||||
|
||||
def __init__(self, cert_info: Dict[str, Any]):
|
||||
self._cert_info = self._decode_cert_data(cert_info)
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL.
|
||||
Initializes the SSLCertificate object.
|
||||
|
||||
Args:
|
||||
url (str): URL of the website.
|
||||
timeout (int): Timeout for the connection (default: 10).
|
||||
|
||||
Returns:
|
||||
Optional[SSLCertificate]: SSLCertificate instance if successful, None otherwise.
|
||||
cert_info (Dict[str, Any]): The raw certificate dictionary.
|
||||
"""
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
# 1. Decode the data (handle bytes -> str)
|
||||
decoded_info = self._decode_cert_data(cert_info)
|
||||
|
||||
context = ssl.create_default_context()
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
# 2. Store the decoded info internally (optional but good practice)
|
||||
# self._cert_info = decoded_info # You can keep this if methods rely on it
|
||||
|
||||
cert_info = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(),
|
||||
"not_after": x509.get_notAfter(),
|
||||
"fingerprint": x509.digest("sha256").hex(),
|
||||
"signature_algorithm": x509.get_signature_algorithm(),
|
||||
"raw_cert": base64.b64encode(cert_binary),
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info["extensions"] = extensions
|
||||
|
||||
return SSLCertificate(cert_info)
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
# 3. Initialize the dictionary part of the object with the decoded data
|
||||
super().__init__(decoded_info)
|
||||
|
||||
@staticmethod
|
||||
def _decode_cert_data(data: Any) -> Any:
|
||||
"""Helper method to decode bytes in certificate data."""
|
||||
if isinstance(data, bytes):
|
||||
return data.decode("utf-8")
|
||||
try:
|
||||
# Try UTF-8 first, fallback to latin-1 for arbitrary bytes
|
||||
return data.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
return data.decode("latin-1") # Or handle as needed, maybe hex representation
|
||||
elif isinstance(data, dict):
|
||||
return {
|
||||
(
|
||||
@@ -97,36 +58,119 @@ class SSLCertificate:
|
||||
return [SSLCertificate._decode_cert_data(item) for item in data]
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL. Fetches cert info and initializes.
|
||||
(Fetching logic remains the same)
|
||||
"""
|
||||
cert_info_raw = None # Variable to hold the fetched dict
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
|
||||
context = ssl.create_default_context()
|
||||
# Set check_hostname to False and verify_mode to CERT_NONE temporarily
|
||||
# for potentially problematic certificates during fetch, but parse the result regardless.
|
||||
# context.check_hostname = False
|
||||
# context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
if not cert_binary:
|
||||
print(f"Warning: No certificate returned for {hostname}")
|
||||
return None
|
||||
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
|
||||
# Create the dictionary directly
|
||||
cert_info_raw = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(), # Keep as bytes initially, _decode handles it
|
||||
"not_after": x509.get_notAfter(), # Keep as bytes initially
|
||||
"fingerprint": x509.digest("sha256").hex(), # hex() is already string
|
||||
"signature_algorithm": x509.get_signature_algorithm(), # Keep as bytes
|
||||
"raw_cert": base64.b64encode(cert_binary), # Base64 is bytes, _decode handles it
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
# get_short_name() returns bytes, str(ext) handles value conversion
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info_raw["extensions"] = extensions
|
||||
|
||||
except ssl.SSLCertVerificationError as e:
|
||||
print(f"SSL Verification Error for {url}: {e}")
|
||||
# Decide if you want to proceed or return None based on your needs
|
||||
# You might try fetching without verification here if needed, but be cautious.
|
||||
return None
|
||||
except socket.gaierror:
|
||||
print(f"Could not resolve hostname: {hostname}")
|
||||
return None
|
||||
except socket.timeout:
|
||||
print(f"Connection timed out for {url}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error fetching/processing certificate for {url}: {e}")
|
||||
# Log the full error details if needed: logging.exception("Cert fetch error")
|
||||
return None
|
||||
|
||||
# If successful, create the SSLCertificate instance from the dictionary
|
||||
if cert_info_raw:
|
||||
return SSLCertificate(cert_info_raw)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
# --- Properties now access the dictionary items directly via self[] ---
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
return self.get("issuer", {}) # Use self.get for safety
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
return self.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
return self.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
return self.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
return self.get("fingerprint", "")
|
||||
|
||||
# --- Export methods can use `self` directly as it is the dict ---
|
||||
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as JSON.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the JSON file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: JSON string if successful, None otherwise.
|
||||
"""
|
||||
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
|
||||
"""Export certificate as JSON."""
|
||||
# `self` is already the dictionary we want to serialize
|
||||
json_str = json.dumps(self, indent=2, ensure_ascii=False)
|
||||
if filepath:
|
||||
Path(filepath).write_text(json_str, encoding="utf-8")
|
||||
return None
|
||||
return json_str
|
||||
|
||||
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as PEM.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the PEM file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: PEM string if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as PEM."""
|
||||
try:
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
raw_cert_bytes = base64.b64decode(self.get("raw_cert", ""))
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1,
|
||||
base64.b64decode(self._cert_info["raw_cert"]),
|
||||
OpenSSL.crypto.FILETYPE_ASN1, raw_cert_bytes
|
||||
)
|
||||
pem_data = OpenSSL.crypto.dump_certificate(
|
||||
OpenSSL.crypto.FILETYPE_PEM, x509
|
||||
@@ -136,49 +180,25 @@ class SSLCertificate:
|
||||
Path(filepath).write_text(pem_data, encoding="utf-8")
|
||||
return None
|
||||
return pem_data
|
||||
except Exception:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error converting to PEM: {e}")
|
||||
return None
|
||||
|
||||
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
||||
"""
|
||||
Export certificate as DER.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the DER file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[bytes]: DER bytes if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as DER."""
|
||||
try:
|
||||
der_data = base64.b64decode(self._cert_info["raw_cert"])
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
der_data = base64.b64decode(self.get("raw_cert", ""))
|
||||
if filepath:
|
||||
Path(filepath).write_bytes(der_data)
|
||||
return None
|
||||
return der_data
|
||||
except Exception:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error converting to DER: {e}")
|
||||
return None
|
||||
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
"""Get certificate issuer information."""
|
||||
return self._cert_info.get("issuer", {})
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
"""Get certificate subject information."""
|
||||
return self._cert_info.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
"""Get certificate validity start date."""
|
||||
return self._cert_info.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
"""Get certificate validity end date."""
|
||||
return self._cert_info.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
"""Get certificate fingerprint."""
|
||||
return self._cert_info.get("fingerprint", "")
|
||||
# Optional: Add __repr__ for better debugging
|
||||
def __repr__(self) -> str:
|
||||
subject_cn = self.subject.get('CN', 'N/A')
|
||||
issuer_cn = self.issuer.get('CN', 'N/A')
|
||||
return f"<SSLCertificate Subject='{subject_cn}' Issuer='{issuer_cn}'>"
|
||||
187
crawl4ai/types.py
Normal file
187
crawl4ai/types.py
Normal file
@@ -0,0 +1,187 @@
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
# Logger types
|
||||
AsyncLoggerBase = Union['AsyncLoggerBaseType']
|
||||
AsyncLogger = Union['AsyncLoggerType']
|
||||
|
||||
# Crawler core types
|
||||
AsyncWebCrawler = Union['AsyncWebCrawlerType']
|
||||
CacheMode = Union['CacheModeType']
|
||||
CrawlResult = Union['CrawlResultType']
|
||||
CrawlerHub = Union['CrawlerHubType']
|
||||
BrowserProfiler = Union['BrowserProfilerType']
|
||||
|
||||
# Configuration types
|
||||
BrowserConfig = Union['BrowserConfigType']
|
||||
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
|
||||
LLMConfig = Union['LLMConfigType']
|
||||
|
||||
# Content scraping types
|
||||
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
|
||||
WebScrapingStrategy = Union['WebScrapingStrategyType']
|
||||
LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
|
||||
|
||||
# Proxy types
|
||||
ProxyRotationStrategy = Union['ProxyRotationStrategyType']
|
||||
RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType']
|
||||
|
||||
# Extraction types
|
||||
ExtractionStrategy = Union['ExtractionStrategyType']
|
||||
LLMExtractionStrategy = Union['LLMExtractionStrategyType']
|
||||
CosineStrategy = Union['CosineStrategyType']
|
||||
JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType']
|
||||
JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType']
|
||||
|
||||
# Chunking types
|
||||
ChunkingStrategy = Union['ChunkingStrategyType']
|
||||
RegexChunking = Union['RegexChunkingType']
|
||||
|
||||
# Markdown generation types
|
||||
DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType']
|
||||
MarkdownGenerationResult = Union['MarkdownGenerationResultType']
|
||||
|
||||
# Content filter types
|
||||
RelevantContentFilter = Union['RelevantContentFilterType']
|
||||
PruningContentFilter = Union['PruningContentFilterType']
|
||||
BM25ContentFilter = Union['BM25ContentFilterType']
|
||||
LLMContentFilter = Union['LLMContentFilterType']
|
||||
|
||||
# Dispatcher types
|
||||
BaseDispatcher = Union['BaseDispatcherType']
|
||||
MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType']
|
||||
SemaphoreDispatcher = Union['SemaphoreDispatcherType']
|
||||
RateLimiter = Union['RateLimiterType']
|
||||
CrawlerMonitor = Union['CrawlerMonitorType']
|
||||
DisplayMode = Union['DisplayModeType']
|
||||
RunManyReturn = Union['RunManyReturnType']
|
||||
|
||||
# Docker client
|
||||
Crawl4aiDockerClient = Union['Crawl4aiDockerClientType']
|
||||
|
||||
# Deep crawling types
|
||||
DeepCrawlStrategy = Union['DeepCrawlStrategyType']
|
||||
BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType']
|
||||
FilterChain = Union['FilterChainType']
|
||||
ContentTypeFilter = Union['ContentTypeFilterType']
|
||||
DomainFilter = Union['DomainFilterType']
|
||||
URLFilter = Union['URLFilterType']
|
||||
FilterStats = Union['FilterStatsType']
|
||||
SEOFilter = Union['SEOFilterType']
|
||||
KeywordRelevanceScorer = Union['KeywordRelevanceScorerType']
|
||||
URLScorer = Union['URLScorerType']
|
||||
CompositeScorer = Union['CompositeScorerType']
|
||||
DomainAuthorityScorer = Union['DomainAuthorityScorerType']
|
||||
FreshnessScorer = Union['FreshnessScorerType']
|
||||
PathDepthScorer = Union['PathDepthScorerType']
|
||||
BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType']
|
||||
DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType']
|
||||
DeepCrawlDecorator = Union['DeepCrawlDecoratorType']
|
||||
|
||||
# Only import types during type checking to avoid circular imports
|
||||
if TYPE_CHECKING:
|
||||
# Logger imports
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase as AsyncLoggerBaseType,
|
||||
AsyncLogger as AsyncLoggerType,
|
||||
)
|
||||
|
||||
# Crawler core imports
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler as AsyncWebCrawlerType,
|
||||
CacheMode as CacheModeType,
|
||||
)
|
||||
from .models import CrawlResult as CrawlResultType
|
||||
from .hub import CrawlerHub as CrawlerHubType
|
||||
from .browser_profiler import BrowserProfiler as BrowserProfilerType
|
||||
|
||||
# Configuration imports
|
||||
from .async_configs import (
|
||||
BrowserConfig as BrowserConfigType,
|
||||
CrawlerRunConfig as CrawlerRunConfigType,
|
||||
HTTPCrawlerConfig as HTTPCrawlerConfigType,
|
||||
LLMConfig as LLMConfigType,
|
||||
)
|
||||
|
||||
# Content scraping imports
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy as ContentScrapingStrategyType,
|
||||
WebScrapingStrategy as WebScrapingStrategyType,
|
||||
LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType,
|
||||
)
|
||||
|
||||
# Proxy imports
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy as ProxyRotationStrategyType,
|
||||
RoundRobinProxyStrategy as RoundRobinProxyStrategyType,
|
||||
)
|
||||
|
||||
# Extraction imports
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy as ExtractionStrategyType,
|
||||
LLMExtractionStrategy as LLMExtractionStrategyType,
|
||||
CosineStrategy as CosineStrategyType,
|
||||
JsonCssExtractionStrategy as JsonCssExtractionStrategyType,
|
||||
JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType,
|
||||
)
|
||||
|
||||
# Chunking imports
|
||||
from .chunking_strategy import (
|
||||
ChunkingStrategy as ChunkingStrategyType,
|
||||
RegexChunking as RegexChunkingType,
|
||||
)
|
||||
|
||||
# Markdown generation imports
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator as DefaultMarkdownGeneratorType,
|
||||
)
|
||||
from .models import MarkdownGenerationResult as MarkdownGenerationResultType
|
||||
|
||||
# Content filter imports
|
||||
from .content_filter_strategy import (
|
||||
RelevantContentFilter as RelevantContentFilterType,
|
||||
PruningContentFilter as PruningContentFilterType,
|
||||
BM25ContentFilter as BM25ContentFilterType,
|
||||
LLMContentFilter as LLMContentFilterType,
|
||||
)
|
||||
|
||||
# Dispatcher imports
|
||||
from .async_dispatcher import (
|
||||
BaseDispatcher as BaseDispatcherType,
|
||||
MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType,
|
||||
SemaphoreDispatcher as SemaphoreDispatcherType,
|
||||
RateLimiter as RateLimiterType,
|
||||
CrawlerMonitor as CrawlerMonitorType,
|
||||
DisplayMode as DisplayModeType,
|
||||
RunManyReturn as RunManyReturnType,
|
||||
)
|
||||
|
||||
# Docker client
|
||||
from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType
|
||||
|
||||
# Deep crawling imports
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy as DeepCrawlStrategyType,
|
||||
BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType,
|
||||
FilterChain as FilterChainType,
|
||||
ContentTypeFilter as ContentTypeFilterType,
|
||||
DomainFilter as DomainFilterType,
|
||||
URLFilter as URLFilterType,
|
||||
FilterStats as FilterStatsType,
|
||||
SEOFilter as SEOFilterType,
|
||||
KeywordRelevanceScorer as KeywordRelevanceScorerType,
|
||||
URLScorer as URLScorerType,
|
||||
CompositeScorer as CompositeScorerType,
|
||||
DomainAuthorityScorer as DomainAuthorityScorerType,
|
||||
FreshnessScorer as FreshnessScorerType,
|
||||
PathDepthScorer as PathDepthScorerType,
|
||||
BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType,
|
||||
DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType,
|
||||
DeepCrawlDecorator as DeepCrawlDecoratorType,
|
||||
)
|
||||
|
||||
|
||||
|
||||
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
|
||||
from .async_configs import LLMConfig
|
||||
return LLMConfig(*args, **kwargs)
|
||||
@@ -3,12 +3,11 @@ from typing import Optional, Literal, List, Dict, Tuple
|
||||
import re
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import random
|
||||
from fake_useragent import UserAgent
|
||||
import requests
|
||||
from lxml import html
|
||||
import json
|
||||
from typing import Optional, List, Union, Dict
|
||||
from typing import Union
|
||||
|
||||
class UAGen(ABC):
|
||||
@abstractmethod
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from bs4 import BeautifulSoup, Comment, element, Tag, NavigableString
|
||||
import json
|
||||
import html
|
||||
import lxml
|
||||
import re
|
||||
import os
|
||||
import platform
|
||||
from .prompts import PROMPT_EXTRACT_BLOCKS
|
||||
from .config import *
|
||||
from array import array
|
||||
from .html2text import html2text, CustomHTML2Text
|
||||
# from .config import *
|
||||
from .config import MIN_WORD_THRESHOLD, IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD, IMAGE_SCORE_THRESHOLD, DEFAULT_PROVIDER, PROVIDER_MODELS
|
||||
import httpx
|
||||
from socket import gaierror
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, List, Optional, Callable
|
||||
from urllib.parse import urljoin
|
||||
import requests
|
||||
from requests.exceptions import InvalidSchema
|
||||
from typing import Dict, Any
|
||||
import xxhash
|
||||
from colorama import Fore, Style, init
|
||||
import textwrap
|
||||
@@ -22,12 +26,191 @@ import cProfile
|
||||
import pstats
|
||||
from functools import wraps
|
||||
import asyncio
|
||||
|
||||
from lxml import etree, html as lhtml
|
||||
import sqlite3
|
||||
import hashlib
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
from urllib.robotparser import RobotFileParser
|
||||
import aiohttp
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
from functools import lru_cache
|
||||
|
||||
from packaging import version
|
||||
from . import __version__
|
||||
from typing import Sequence
|
||||
|
||||
from itertools import chain
|
||||
from collections import deque
|
||||
from typing import Generator, Iterable
|
||||
|
||||
def chunk_documents(
|
||||
documents: Iterable[str],
|
||||
chunk_token_threshold: int,
|
||||
overlap: int,
|
||||
word_token_rate: float = 0.75,
|
||||
tokenizer: Optional[Callable[[str], List[str]]] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Efficiently chunks documents into token-limited sections with overlap between chunks.
|
||||
|
||||
Args:
|
||||
documents: Iterable of document strings
|
||||
chunk_token_threshold: Maximum tokens per chunk
|
||||
overlap: Number of tokens to overlap between chunks
|
||||
word_token_rate: Token estimate per word when not using a tokenizer
|
||||
tokenizer: Function that splits text into tokens (if available)
|
||||
|
||||
Yields:
|
||||
Text chunks as strings
|
||||
"""
|
||||
token_queue = deque()
|
||||
contribution_queue = deque()
|
||||
current_token_count = 0.0
|
||||
|
||||
for doc in documents:
|
||||
# Tokenize document
|
||||
if tokenizer:
|
||||
tokens = tokenizer(doc)
|
||||
contributions = [1.0] * len(tokens)
|
||||
else:
|
||||
tokens = doc.split()
|
||||
contributions = [word_token_rate] * len(tokens)
|
||||
|
||||
# Add to processing queues
|
||||
token_queue.extend(tokens)
|
||||
contribution_queue.extend(contributions)
|
||||
current_token_count += sum(contributions)
|
||||
|
||||
# Process full chunks
|
||||
while current_token_count >= chunk_token_threshold:
|
||||
# Find chunk split point
|
||||
chunk_tokens = []
|
||||
chunk_contrib = []
|
||||
chunk_total = 0.0
|
||||
|
||||
# Build chunk up to threshold
|
||||
while contribution_queue:
|
||||
next_contrib = contribution_queue[0]
|
||||
if chunk_total + next_contrib > chunk_token_threshold:
|
||||
break
|
||||
|
||||
chunk_total += next_contrib
|
||||
chunk_contrib.append(contribution_queue.popleft())
|
||||
chunk_tokens.append(token_queue.popleft())
|
||||
|
||||
# Handle edge case where first token exceeds threshold
|
||||
if not chunk_contrib: # Single token exceeds threshold
|
||||
chunk_contrib.append(contribution_queue.popleft())
|
||||
chunk_tokens.append(token_queue.popleft())
|
||||
|
||||
# Calculate overlap
|
||||
overlap_total = 0.0
|
||||
overlap_idx = 0
|
||||
for contrib in reversed(chunk_contrib):
|
||||
if overlap_total + contrib > overlap:
|
||||
break
|
||||
overlap_total += contrib
|
||||
overlap_idx += 1
|
||||
|
||||
# Prepend overlap to queues
|
||||
if overlap_idx > 0:
|
||||
overlap_tokens = chunk_tokens[-overlap_idx:]
|
||||
overlap_contrib = chunk_contrib[-overlap_idx:]
|
||||
|
||||
token_queue.extendleft(reversed(overlap_tokens))
|
||||
contribution_queue.extendleft(reversed(overlap_contrib))
|
||||
current_token_count += overlap_total
|
||||
|
||||
# Update current token count and yield chunk
|
||||
current_token_count -= sum(chunk_contrib)
|
||||
yield " ".join(chunk_tokens[:len(chunk_tokens)-overlap_idx] if overlap_idx else chunk_tokens)
|
||||
|
||||
# Yield remaining tokens
|
||||
if token_queue:
|
||||
yield " ".join(token_queue)
|
||||
|
||||
def merge_chunks(
|
||||
docs: Sequence[str],
|
||||
target_size: int,
|
||||
overlap: int = 0,
|
||||
word_token_ratio: float = 1.0,
|
||||
splitter: Callable = None
|
||||
) -> List[str]:
|
||||
"""Merges documents into chunks of specified token size.
|
||||
|
||||
Args:
|
||||
docs: Input documents
|
||||
target_size: Desired token count per chunk
|
||||
overlap: Number of tokens to overlap between chunks
|
||||
word_token_ratio: Multiplier for word->token conversion
|
||||
"""
|
||||
# Pre-tokenize all docs and store token counts
|
||||
splitter = splitter or str.split
|
||||
token_counts = array('I')
|
||||
all_tokens: List[List[str]] = []
|
||||
total_tokens = 0
|
||||
|
||||
for doc in docs:
|
||||
tokens = doc.split()
|
||||
count = int(len(tokens) * word_token_ratio)
|
||||
if count: # Skip empty docs
|
||||
token_counts.append(count)
|
||||
all_tokens.append(tokens)
|
||||
total_tokens += count
|
||||
|
||||
if not total_tokens:
|
||||
return []
|
||||
|
||||
# Pre-allocate chunks
|
||||
num_chunks = max(1, (total_tokens + target_size - 1) // target_size)
|
||||
chunks: List[List[str]] = [[] for _ in range(num_chunks)]
|
||||
|
||||
curr_chunk = 0
|
||||
curr_size = 0
|
||||
|
||||
# Distribute tokens
|
||||
for tokens in chain.from_iterable(all_tokens):
|
||||
if curr_size >= target_size and curr_chunk < num_chunks - 1:
|
||||
if overlap > 0:
|
||||
overlap_tokens = chunks[curr_chunk][-overlap:]
|
||||
curr_chunk += 1
|
||||
chunks[curr_chunk].extend(overlap_tokens)
|
||||
curr_size = len(overlap_tokens)
|
||||
else:
|
||||
curr_chunk += 1
|
||||
curr_size = 0
|
||||
|
||||
chunks[curr_chunk].append(tokens)
|
||||
curr_size += 1
|
||||
|
||||
# Return only non-empty chunks
|
||||
return [' '.join(chunk) for chunk in chunks if chunk]
|
||||
|
||||
|
||||
class VersionManager:
|
||||
def __init__(self):
|
||||
self.home_dir = Path.home() / ".crawl4ai"
|
||||
self.version_file = self.home_dir / "version.txt"
|
||||
|
||||
def get_installed_version(self):
|
||||
"""Get the version recorded in home directory"""
|
||||
if not self.version_file.exists():
|
||||
return None
|
||||
try:
|
||||
return version.parse(self.version_file.read_text().strip())
|
||||
except Exception as _ex:
|
||||
return None
|
||||
|
||||
def update_version(self):
|
||||
"""Update the version file to current library version"""
|
||||
self.version_file.write_text(__version__.__version__)
|
||||
|
||||
def needs_update(self):
|
||||
"""Check if database needs update based on version"""
|
||||
installed = self.get_installed_version()
|
||||
current = version.parse(__version__.__version__)
|
||||
return installed is None or installed < current
|
||||
|
||||
|
||||
class RobotsParser:
|
||||
# Default 7 days cache TTL
|
||||
@@ -107,7 +290,7 @@ class RobotsParser:
|
||||
domain = parsed.netloc
|
||||
if not domain:
|
||||
return True
|
||||
except:
|
||||
except Exception as _ex:
|
||||
return True
|
||||
|
||||
# Fast path - check cache first
|
||||
@@ -127,7 +310,7 @@ class RobotsParser:
|
||||
self._cache_rules(domain, rules)
|
||||
else:
|
||||
return True
|
||||
except:
|
||||
except Exception as _ex:
|
||||
# On any error (timeout, connection failed, etc), allow access
|
||||
return True
|
||||
|
||||
@@ -160,6 +343,77 @@ class InvalidCSSSelectorError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
SPLITS = bytearray([
|
||||
# Control chars (0-31) + space (32)
|
||||
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
|
||||
# Special chars (33-47): ! " # $ % & ' ( ) * + , - . /
|
||||
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
|
||||
# Numbers (48-57): Treat as non-splits
|
||||
0,0,0,0,0,0,0,0,0,0,
|
||||
# More special chars (58-64): : ; < = > ? @
|
||||
1,1,1,1,1,1,1,
|
||||
# Uppercase (65-90): Keep
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
# More special chars (91-96): [ \ ] ^ _ `
|
||||
1,1,1,1,1,1,
|
||||
# Lowercase (97-122): Keep
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
# Special chars (123-126): { | } ~
|
||||
1,1,1,1,
|
||||
# Extended ASCII
|
||||
*([1] * 128)
|
||||
])
|
||||
|
||||
# Additional split chars for HTML/code
|
||||
HTML_CODE_CHARS = {
|
||||
# HTML specific
|
||||
'•', '►', '▼', '©', '®', '™', '→', '⇒', '≈', '≤', '≥',
|
||||
# Programming symbols
|
||||
'+=', '-=', '*=', '/=', '=>', '<=>', '!=', '==', '===',
|
||||
'++', '--', '<<', '>>', '&&', '||', '??', '?:', '?.',
|
||||
# Common Unicode
|
||||
'…', '"', '"', ''', ''', '«', '»', '—', '–',
|
||||
# Additional splits
|
||||
'+', '=', '~', '@', '#', '$', '%', '^', '&', '*',
|
||||
'(', ')', '{', '}', '[', ']', '|', '\\', '/', '`',
|
||||
'<', '>', ',', '.', '?', '!', ':', ';', '-', '_'
|
||||
}
|
||||
|
||||
def advanced_split(text: str) -> list[str]:
|
||||
result = []
|
||||
word = array('u')
|
||||
|
||||
i = 0
|
||||
text_len = len(text)
|
||||
|
||||
while i < text_len:
|
||||
char = text[i]
|
||||
o = ord(char)
|
||||
|
||||
# Fast path for ASCII
|
||||
if o < 256 and SPLITS[o]:
|
||||
if word:
|
||||
result.append(word.tounicode())
|
||||
word = array('u')
|
||||
# Check for multi-char symbols
|
||||
elif i < text_len - 1:
|
||||
two_chars = char + text[i + 1]
|
||||
if two_chars in HTML_CODE_CHARS:
|
||||
if word:
|
||||
result.append(word.tounicode())
|
||||
word = array('u')
|
||||
i += 1 # Skip next char since we used it
|
||||
else:
|
||||
word.append(char)
|
||||
else:
|
||||
word.append(char)
|
||||
i += 1
|
||||
|
||||
if word:
|
||||
result.append(word.tounicode())
|
||||
|
||||
return result
|
||||
|
||||
def create_box_message(
|
||||
message: str,
|
||||
type: str = "info",
|
||||
@@ -1124,7 +1378,7 @@ def get_content_of_website_optimized(
|
||||
src = img.get("src", "")
|
||||
if base64_pattern.match(src):
|
||||
img["src"] = base64_pattern.sub("", src)
|
||||
except:
|
||||
except Exception as _ex:
|
||||
pass
|
||||
|
||||
cleaned_html = str(body).replace("\n\n", "\n").replace(" ", " ")
|
||||
@@ -1162,7 +1416,7 @@ def extract_metadata_using_lxml(html, doc=None):
|
||||
|
||||
if doc is None:
|
||||
try:
|
||||
doc = lhtml.document_fromstring(html)
|
||||
doc = lxml.html.document_fromstring(html)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
@@ -1297,7 +1551,7 @@ def extract_xml_tags(string):
|
||||
return list(set(tags))
|
||||
|
||||
|
||||
def extract_xml_data(tags, string):
|
||||
def extract_xml_data_legacy(tags, string):
|
||||
"""
|
||||
Extract data for specified XML tags from a string.
|
||||
|
||||
@@ -1326,6 +1580,38 @@ def extract_xml_data(tags, string):
|
||||
|
||||
return data
|
||||
|
||||
def extract_xml_data(tags, string):
|
||||
"""
|
||||
Extract data for specified XML tags from a string, returning the longest content for each tag.
|
||||
|
||||
How it works:
|
||||
1. Finds all occurrences of each tag in the string using regex.
|
||||
2. For each tag, selects the occurrence with the longest content.
|
||||
3. Returns a dictionary of tag-content pairs.
|
||||
|
||||
Args:
|
||||
tags (List[str]): The list of XML tags to extract.
|
||||
string (str): The input string containing XML data.
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: A dictionary with tag names as keys and longest extracted content as values.
|
||||
"""
|
||||
|
||||
data = {}
|
||||
|
||||
for tag in tags:
|
||||
pattern = f"<{tag}>(.*?)</{tag}>"
|
||||
matches = re.findall(pattern, string, re.DOTALL)
|
||||
|
||||
if matches:
|
||||
# Find the longest content for this tag
|
||||
longest_content = max(matches, key=len).strip()
|
||||
data[tag] = longest_content
|
||||
else:
|
||||
data[tag] = ""
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def perform_completion_with_backoff(
|
||||
provider,
|
||||
@@ -1394,6 +1680,19 @@ def perform_completion_with_backoff(
|
||||
"content": ["Rate limit error. Please try again later."],
|
||||
}
|
||||
]
|
||||
except Exception as e:
|
||||
raise e # Raise any other exceptions immediately
|
||||
# print("Error during completion request:", str(e))
|
||||
# error_message = e.message
|
||||
# return [
|
||||
# {
|
||||
# "index": 0,
|
||||
# "tags": ["error"],
|
||||
# "content": [
|
||||
# f"Error during LLM completion request. {error_message}"
|
||||
# ],
|
||||
# }
|
||||
# ]
|
||||
|
||||
|
||||
def extract_blocks(url, html, provider=DEFAULT_PROVIDER, api_token=None, base_url=None):
|
||||
@@ -1478,10 +1777,10 @@ def extract_blocks_batch(batch_data, provider="groq/llama3-70b-8192", api_token=
|
||||
|
||||
messages = []
|
||||
|
||||
for url, html in batch_data:
|
||||
for url, _html in batch_data:
|
||||
variable_values = {
|
||||
"URL": url,
|
||||
"HTML": html,
|
||||
"HTML": _html,
|
||||
}
|
||||
|
||||
prompt_with_variables = PROMPT_EXTRACT_BLOCKS
|
||||
@@ -1663,7 +1962,7 @@ def fast_format_html(html_string):
|
||||
indent = 0
|
||||
indent_str = " " # Two spaces for indentation
|
||||
formatted = []
|
||||
in_content = False
|
||||
# in_content = False
|
||||
|
||||
# Split by < and > to separate tags and content
|
||||
parts = html_string.replace(">", ">\n").replace("<", "\n<").split("\n")
|
||||
@@ -1704,11 +2003,91 @@ def normalize_url(href, base_url):
|
||||
if not parsed_base.scheme or not parsed_base.netloc:
|
||||
raise ValueError(f"Invalid base URL format: {base_url}")
|
||||
|
||||
# Ensure base_url ends with a trailing slash if it's a directory path
|
||||
if not base_url.endswith('/'):
|
||||
base_url = base_url + '/'
|
||||
|
||||
# Use urljoin to handle all cases
|
||||
normalized = urljoin(base_url, href.strip())
|
||||
return normalized
|
||||
|
||||
|
||||
def normalize_url_for_deep_crawl(href, base_url):
|
||||
"""Normalize URLs to ensure consistent format"""
|
||||
from urllib.parse import urljoin, urlparse, urlunparse, parse_qs, urlencode
|
||||
|
||||
# Handle None or empty values
|
||||
if not href:
|
||||
return None
|
||||
|
||||
# Use urljoin to handle relative URLs
|
||||
full_url = urljoin(base_url, href.strip())
|
||||
|
||||
# Parse the URL for normalization
|
||||
parsed = urlparse(full_url)
|
||||
|
||||
# Convert hostname to lowercase
|
||||
netloc = parsed.netloc.lower()
|
||||
|
||||
# Remove fragment entirely
|
||||
fragment = ''
|
||||
|
||||
# Normalize query parameters if needed
|
||||
query = parsed.query
|
||||
if query:
|
||||
# Parse query parameters
|
||||
params = parse_qs(query)
|
||||
|
||||
# Remove tracking parameters (example - customize as needed)
|
||||
tracking_params = ['utm_source', 'utm_medium', 'utm_campaign', 'ref', 'fbclid']
|
||||
for param in tracking_params:
|
||||
if param in params:
|
||||
del params[param]
|
||||
|
||||
# Rebuild query string, sorted for consistency
|
||||
query = urlencode(params, doseq=True) if params else ''
|
||||
|
||||
# Build normalized URL
|
||||
normalized = urlunparse((
|
||||
parsed.scheme,
|
||||
netloc,
|
||||
parsed.path.rstrip('/'), # Normalize trailing slash
|
||||
parsed.params,
|
||||
query,
|
||||
fragment
|
||||
))
|
||||
|
||||
return normalized
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def efficient_normalize_url_for_deep_crawl(href, base_url):
|
||||
"""Efficient URL normalization with proper parsing"""
|
||||
from urllib.parse import urljoin
|
||||
|
||||
if not href:
|
||||
return None
|
||||
|
||||
# Resolve relative URLs
|
||||
full_url = urljoin(base_url, href.strip())
|
||||
|
||||
# Use proper URL parsing
|
||||
parsed = urlparse(full_url)
|
||||
|
||||
# Only perform the most critical normalizations
|
||||
# 1. Lowercase hostname
|
||||
# 2. Remove fragment
|
||||
normalized = urlunparse((
|
||||
parsed.scheme,
|
||||
parsed.netloc.lower(),
|
||||
parsed.path.rstrip('/'),
|
||||
parsed.params,
|
||||
parsed.query,
|
||||
'' # Remove fragment
|
||||
))
|
||||
|
||||
return normalized
|
||||
|
||||
|
||||
def normalize_url_tmp(href, base_url):
|
||||
"""Normalize URLs to ensure consistent format"""
|
||||
# Extract protocol and domain from base URL
|
||||
@@ -2207,3 +2586,196 @@ def get_error_context(exc_info, context_lines: int = 5):
|
||||
"function": func_name,
|
||||
"code_context": code_context,
|
||||
}
|
||||
|
||||
def truncate(value, threshold):
|
||||
if len(value) > threshold:
|
||||
return value[:threshold] + '...' # Add ellipsis to indicate truncation
|
||||
return value
|
||||
|
||||
def optimize_html(html_str, threshold=200):
|
||||
root = lxml.html.fromstring(html_str)
|
||||
|
||||
for _element in root.iter():
|
||||
# Process attributes
|
||||
for attr in list(_element.attrib):
|
||||
_element.attrib[attr] = truncate(_element.attrib[attr], threshold)
|
||||
|
||||
# Process text content
|
||||
if _element.text and len(_element.text) > threshold:
|
||||
_element.text = truncate(_element.text, threshold)
|
||||
|
||||
# Process tail text
|
||||
if _element.tail and len(_element.tail) > threshold:
|
||||
_element.tail = truncate(_element.tail, threshold)
|
||||
|
||||
return lxml.html.tostring(root, encoding='unicode', pretty_print=False)
|
||||
|
||||
class HeadPeekr:
|
||||
@staticmethod
|
||||
async def fetch_head_section(url, timeout=0.3):
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (compatible; CrawlBot/1.0)",
|
||||
"Accept": "text/html",
|
||||
"Connection": "close" # Force close after response
|
||||
}
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
response = await client.get(url, headers=headers, follow_redirects=True)
|
||||
|
||||
# Handle redirects explicitly by using the final URL
|
||||
if response.url != url:
|
||||
url = str(response.url)
|
||||
response = await client.get(url, headers=headers)
|
||||
|
||||
content = b""
|
||||
async for chunk in response.aiter_bytes():
|
||||
content += chunk
|
||||
if b"</head>" in content:
|
||||
break # Stop after detecting </head>
|
||||
return content.split(b"</head>")[0] + b"</head>"
|
||||
except (httpx.HTTPError, gaierror) :
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def peek_html(url, timeout=0.3):
|
||||
head_section = await HeadPeekr.fetch_head_section(url, timeout=timeout)
|
||||
if head_section:
|
||||
return head_section.decode("utf-8", errors="ignore")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def extract_meta_tags(head_content: str):
|
||||
meta_tags = {}
|
||||
|
||||
# Find all meta tags
|
||||
meta_pattern = r'<meta[^>]+>'
|
||||
for meta_tag in re.finditer(meta_pattern, head_content):
|
||||
tag = meta_tag.group(0)
|
||||
|
||||
# Extract name/property and content
|
||||
name_match = re.search(r'name=["\'](.*?)["\']', tag)
|
||||
property_match = re.search(r'property=["\'](.*?)["\']', tag)
|
||||
content_match = re.search(r'content=["\'](.*?)["\']', tag)
|
||||
|
||||
if content_match and (name_match or property_match):
|
||||
key = name_match.group(1) if name_match else property_match.group(1)
|
||||
meta_tags[key] = content_match.group(1)
|
||||
|
||||
return meta_tags
|
||||
|
||||
def get_title(head_content: str):
|
||||
title_match = re.search(r'<title>(.*?)</title>', head_content, re.IGNORECASE | re.DOTALL)
|
||||
return title_match.group(1) if title_match else None
|
||||
|
||||
def preprocess_html_for_schema(html_content, text_threshold=100, attr_value_threshold=200, max_size=100000):
|
||||
"""
|
||||
Preprocess HTML to reduce size while preserving structure for schema generation.
|
||||
|
||||
Args:
|
||||
html_content (str): Raw HTML content
|
||||
text_threshold (int): Maximum length for text nodes before truncation
|
||||
attr_value_threshold (int): Maximum length for attribute values before truncation
|
||||
max_size (int): Target maximum size for output HTML
|
||||
|
||||
Returns:
|
||||
str: Preprocessed HTML content
|
||||
"""
|
||||
try:
|
||||
# Parse HTML with error recovery
|
||||
parser = etree.HTMLParser(remove_comments=True, remove_blank_text=True)
|
||||
tree = lhtml.fromstring(html_content, parser=parser)
|
||||
|
||||
# 1. Remove HEAD section (keep only BODY)
|
||||
head_elements = tree.xpath('//head')
|
||||
for head in head_elements:
|
||||
if head.getparent() is not None:
|
||||
head.getparent().remove(head)
|
||||
|
||||
# 2. Define tags to remove completely
|
||||
tags_to_remove = [
|
||||
'script', 'style', 'noscript', 'iframe', 'canvas', 'svg',
|
||||
'video', 'audio', 'source', 'track', 'map', 'area'
|
||||
]
|
||||
|
||||
# Remove unwanted elements
|
||||
for tag in tags_to_remove:
|
||||
elements = tree.xpath(f'//{tag}')
|
||||
for element in elements:
|
||||
if element.getparent() is not None:
|
||||
element.getparent().remove(element)
|
||||
|
||||
# 3. Process remaining elements to clean attributes and truncate text
|
||||
for element in tree.iter():
|
||||
# Skip if we're at the root level
|
||||
if element.getparent() is None:
|
||||
continue
|
||||
|
||||
# Clean non-essential attributes but preserve structural ones
|
||||
# attribs_to_keep = {'id', 'class', 'name', 'href', 'src', 'type', 'value', 'data-'}
|
||||
|
||||
# This is more aggressive than the previous version
|
||||
attribs_to_keep = {'id', 'class', 'name', 'type', 'value'}
|
||||
|
||||
# attributes_hates_truncate = ['id', 'class', "data-"]
|
||||
|
||||
# This means, I don't care, if an attribute is too long, truncate it, go and find a better css selector to build a schema
|
||||
attributes_hates_truncate = []
|
||||
|
||||
# Process each attribute
|
||||
for attrib in list(element.attrib.keys()):
|
||||
# Keep if it's essential or starts with data-
|
||||
if not (attrib in attribs_to_keep or attrib.startswith('data-')):
|
||||
element.attrib.pop(attrib)
|
||||
# Truncate long attribute values except for selectors
|
||||
elif attrib not in attributes_hates_truncate and len(element.attrib[attrib]) > attr_value_threshold:
|
||||
element.attrib[attrib] = element.attrib[attrib][:attr_value_threshold] + '...'
|
||||
|
||||
# Truncate text content if it's too long
|
||||
if element.text and len(element.text.strip()) > text_threshold:
|
||||
element.text = element.text.strip()[:text_threshold] + '...'
|
||||
|
||||
# Also truncate tail text if present
|
||||
if element.tail and len(element.tail.strip()) > text_threshold:
|
||||
element.tail = element.tail.strip()[:text_threshold] + '...'
|
||||
|
||||
# 4. Find repeated patterns and keep only a few examples
|
||||
# This is a simplistic approach - more sophisticated pattern detection could be implemented
|
||||
pattern_elements = {}
|
||||
for element in tree.xpath('//*[contains(@class, "")]'):
|
||||
parent = element.getparent()
|
||||
if parent is None:
|
||||
continue
|
||||
|
||||
# Create a signature based on tag and classes
|
||||
classes = element.get('class', '')
|
||||
if not classes:
|
||||
continue
|
||||
signature = f"{element.tag}.{classes}"
|
||||
|
||||
if signature in pattern_elements:
|
||||
pattern_elements[signature].append(element)
|
||||
else:
|
||||
pattern_elements[signature] = [element]
|
||||
|
||||
# Keep only 3 examples of each repeating pattern
|
||||
for signature, elements in pattern_elements.items():
|
||||
if len(elements) > 3:
|
||||
# Keep the first 2 and last elements
|
||||
for element in elements[2:-1]:
|
||||
if element.getparent() is not None:
|
||||
element.getparent().remove(element)
|
||||
|
||||
# 5. Convert back to string
|
||||
result = etree.tostring(tree, encoding='unicode', method='html')
|
||||
|
||||
# If still over the size limit, apply more aggressive truncation
|
||||
if len(result) > max_size:
|
||||
return result[:max_size] + "..."
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Fallback for parsing errors
|
||||
return html_content[:max_size] if len(html_content) > max_size else html_content
|
||||
|
||||
|
||||
|
||||
31
deploy/docker/.dockerignore
Normal file
31
deploy/docker/.dockerignore
Normal file
@@ -0,0 +1,31 @@
|
||||
# .dockerignore
|
||||
*
|
||||
|
||||
# Allow specific files and directories when using local installation
|
||||
!crawl4ai/
|
||||
!docs/
|
||||
!deploy/docker/
|
||||
!setup.py
|
||||
!pyproject.toml
|
||||
!README.md
|
||||
!LICENSE
|
||||
!MANIFEST.in
|
||||
!setup.cfg
|
||||
!mkdocs.yml
|
||||
|
||||
.git/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
.DS_Store
|
||||
.env
|
||||
.venv
|
||||
venv/
|
||||
tests/
|
||||
coverage.xml
|
||||
*.log
|
||||
*.swp
|
||||
*.egg-info/
|
||||
dist/
|
||||
build/
|
||||
8
deploy/docker/.llm.env.example
Normal file
8
deploy/docker/.llm.env.example
Normal file
@@ -0,0 +1,8 @@
|
||||
# LLM Provider Keys
|
||||
OPENAI_API_KEY=your_openai_key_here
|
||||
DEEPSEEK_API_KEY=your_deepseek_key_here
|
||||
ANTHROPIC_API_KEY=your_anthropic_key_here
|
||||
GROQ_API_KEY=your_groq_key_here
|
||||
TOGETHER_API_KEY=your_together_key_here
|
||||
MISTRAL_API_KEY=your_mistral_key_here
|
||||
GEMINI_API_TOKEN=your_gemini_key_here
|
||||
821
deploy/docker/README.md
Normal file
821
deploy/docker/README.md
Normal file
@@ -0,0 +1,821 @@
|
||||
# Crawl4AI Docker Guide 🐳
|
||||
|
||||
## Table of Contents
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Option 1: Using Pre-built Docker Hub Images (Recommended)](#option-1-using-pre-built-docker-hub-images-recommended)
|
||||
- [Option 2: Using Docker Compose](#option-2-using-docker-compose)
|
||||
- [Option 3: Manual Local Build & Run](#option-3-manual-local-build--run)
|
||||
- [Dockerfile Parameters](#dockerfile-parameters)
|
||||
- [Using the API](#using-the-api)
|
||||
- [Playground Interface](#playground-interface)
|
||||
- [Python SDK](#python-sdk)
|
||||
- [Understanding Request Schema](#understanding-request-schema)
|
||||
- [REST API Examples](#rest-api-examples)
|
||||
- [Additional API Endpoints](#additional-api-endpoints)
|
||||
- [HTML Extraction Endpoint](#html-extraction-endpoint)
|
||||
- [Screenshot Endpoint](#screenshot-endpoint)
|
||||
- [PDF Export Endpoint](#pdf-export-endpoint)
|
||||
- [JavaScript Execution Endpoint](#javascript-execution-endpoint)
|
||||
- [Library Context Endpoint](#library-context-endpoint)
|
||||
- [MCP (Model Context Protocol) Support](#mcp-model-context-protocol-support)
|
||||
- [What is MCP?](#what-is-mcp)
|
||||
- [Connecting via MCP](#connecting-via-mcp)
|
||||
- [Using with Claude Code](#using-with-claude-code)
|
||||
- [Available MCP Tools](#available-mcp-tools)
|
||||
- [Testing MCP Connections](#testing-mcp-connections)
|
||||
- [MCP Schemas](#mcp-schemas)
|
||||
- [Metrics & Monitoring](#metrics--monitoring)
|
||||
- [Deployment Scenarios](#deployment-scenarios)
|
||||
- [Complete Examples](#complete-examples)
|
||||
- [Server Configuration](#server-configuration)
|
||||
- [Understanding config.yml](#understanding-configyml)
|
||||
- [JWT Authentication](#jwt-authentication)
|
||||
- [Configuration Tips and Best Practices](#configuration-tips-and-best-practices)
|
||||
- [Customizing Your Configuration](#customizing-your-configuration)
|
||||
- [Configuration Recommendations](#configuration-recommendations)
|
||||
- [Getting Help](#getting-help)
|
||||
- [Summary](#summary)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before we dive in, make sure you have:
|
||||
- Docker installed and running (version 20.10.0 or higher), including `docker compose` (usually bundled with Docker Desktop).
|
||||
- `git` for cloning the repository.
|
||||
- At least 4GB of RAM available for the container (more recommended for heavy use).
|
||||
- Python 3.10+ (if using the Python SDK).
|
||||
- Node.js 16+ (if using the Node.js examples).
|
||||
|
||||
> 💡 **Pro tip**: Run `docker info` to check your Docker installation and available resources.
|
||||
|
||||
## Installation
|
||||
|
||||
We offer several ways to get the Crawl4AI server running. The quickest way is to use our pre-built Docker Hub images.
|
||||
|
||||
### Option 1: Using Pre-built Docker Hub Images (Recommended)
|
||||
|
||||
Pull and run images directly from Docker Hub without building locally.
|
||||
|
||||
#### 1. Pull the Image
|
||||
|
||||
Our latest release candidate is `0.6.0rc1-r1`. Images are built with multi-arch manifests, so Docker automatically pulls the correct version for your system.
|
||||
|
||||
```bash
|
||||
# Pull the release candidate (recommended for latest features)
|
||||
docker pull unclecode/crawl4ai:0.6.0rc1-r1
|
||||
|
||||
# Or pull the latest stable version
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
#### 2. Setup Environment (API Keys)
|
||||
|
||||
If you plan to use LLMs, create a `.llm.env` file in your working directory:
|
||||
|
||||
```bash
|
||||
# Create a .llm.env file with your API keys
|
||||
cat > .llm.env << EOL
|
||||
# OpenAI
|
||||
OPENAI_API_KEY=sk-your-key
|
||||
|
||||
# Anthropic
|
||||
ANTHROPIC_API_KEY=your-anthropic-key
|
||||
|
||||
# Other providers as needed
|
||||
# DEEPSEEK_API_KEY=your-deepseek-key
|
||||
# GROQ_API_KEY=your-groq-key
|
||||
# TOGETHER_API_KEY=your-together-key
|
||||
# MISTRAL_API_KEY=your-mistral-key
|
||||
# GEMINI_API_TOKEN=your-gemini-token
|
||||
EOL
|
||||
```
|
||||
> 🔑 **Note**: Keep your API keys secure! Never commit `.llm.env` to version control.
|
||||
|
||||
#### 3. Run the Container
|
||||
|
||||
* **Basic run:**
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.6.0rc1-r1
|
||||
```
|
||||
|
||||
* **With LLM support:**
|
||||
```bash
|
||||
# Make sure .llm.env is in the current directory
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.6.0rc1-r1
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`. Visit `/playground` to access the interactive testing interface.
|
||||
|
||||
#### 4. Stopping the Container
|
||||
|
||||
```bash
|
||||
docker stop crawl4ai && docker rm crawl4ai
|
||||
```
|
||||
|
||||
#### Docker Hub Versioning Explained
|
||||
|
||||
* **Image Name:** `unclecode/crawl4ai`
|
||||
* **Tag Format:** `LIBRARY_VERSION[-SUFFIX]` (e.g., `0.6.0rc1-r1`)
|
||||
* `LIBRARY_VERSION`: The semantic version of the core `crawl4ai` Python library
|
||||
* `SUFFIX`: Optional tag for release candidates (`rc1`) and revisions (`r1`)
|
||||
* **`latest` Tag:** Points to the most recent stable version
|
||||
* **Multi-Architecture Support:** All images support both `linux/amd64` and `linux/arm64` architectures through a single tag
|
||||
|
||||
### Option 2: Using Docker Compose
|
||||
|
||||
Docker Compose simplifies building and running the service, especially for local development and testing.
|
||||
|
||||
#### 1. Clone Repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/unclecode/crawl4ai.git
|
||||
cd crawl4ai
|
||||
```
|
||||
|
||||
#### 2. Environment Setup (API Keys)
|
||||
|
||||
If you plan to use LLMs, copy the example environment file and add your API keys. This file should be in the **project root directory**.
|
||||
|
||||
```bash
|
||||
# Make sure you are in the 'crawl4ai' root directory
|
||||
cp deploy/docker/.llm.env.example .llm.env
|
||||
|
||||
# Now edit .llm.env and add your API keys
|
||||
```
|
||||
|
||||
#### 3. Build and Run with Compose
|
||||
|
||||
The `docker-compose.yml` file in the project root provides a simplified approach that automatically handles architecture detection using buildx.
|
||||
|
||||
* **Run Pre-built Image from Docker Hub:**
|
||||
```bash
|
||||
# Pulls and runs the release candidate from Docker Hub
|
||||
# Automatically selects the correct architecture
|
||||
IMAGE=unclecode/crawl4ai:0.6.0rc1-r1 docker compose up -d
|
||||
```
|
||||
|
||||
* **Build and Run Locally:**
|
||||
```bash
|
||||
# Builds the image locally using Dockerfile and runs it
|
||||
# Automatically uses the correct architecture for your machine
|
||||
docker compose up --build -d
|
||||
```
|
||||
|
||||
* **Customize the Build:**
|
||||
```bash
|
||||
# Build with all features (includes torch and transformers)
|
||||
INSTALL_TYPE=all docker compose up --build -d
|
||||
|
||||
# Build with GPU support (for AMD64 platforms)
|
||||
ENABLE_GPU=true docker compose up --build -d
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`.
|
||||
|
||||
#### 4. Stopping the Service
|
||||
|
||||
```bash
|
||||
# Stop the service
|
||||
docker compose down
|
||||
```
|
||||
|
||||
### Option 3: Manual Local Build & Run
|
||||
|
||||
If you prefer not to use Docker Compose for direct control over the build and run process.
|
||||
|
||||
#### 1. Clone Repository & Setup Environment
|
||||
|
||||
Follow steps 1 and 2 from the Docker Compose section above (clone repo, `cd crawl4ai`, create `.llm.env` in the root).
|
||||
|
||||
#### 2. Build the Image (Multi-Arch)
|
||||
|
||||
Use `docker buildx` to build the image. Crawl4AI now uses buildx to handle multi-architecture builds automatically.
|
||||
|
||||
```bash
|
||||
# Make sure you are in the 'crawl4ai' root directory
|
||||
# Build for the current architecture and load it into Docker
|
||||
docker buildx build -t crawl4ai-local:latest --load .
|
||||
|
||||
# Or build for multiple architectures (useful for publishing)
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t crawl4ai-local:latest --load .
|
||||
|
||||
# Build with additional options
|
||||
docker buildx build \
|
||||
--build-arg INSTALL_TYPE=all \
|
||||
--build-arg ENABLE_GPU=false \
|
||||
-t crawl4ai-local:latest --load .
|
||||
```
|
||||
|
||||
#### 3. Run the Container
|
||||
|
||||
* **Basic run (no LLM support):**
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-standalone \
|
||||
--shm-size=1g \
|
||||
crawl4ai-local:latest
|
||||
```
|
||||
|
||||
* **With LLM support:**
|
||||
```bash
|
||||
# Make sure .llm.env is in the current directory (project root)
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-standalone \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
crawl4ai-local:latest
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`.
|
||||
|
||||
#### 4. Stopping the Manual Container
|
||||
|
||||
```bash
|
||||
docker stop crawl4ai-standalone && docker rm crawl4ai-standalone
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## MCP (Model Context Protocol) Support
|
||||
|
||||
Crawl4AI server includes support for the Model Context Protocol (MCP), allowing you to connect the server's capabilities directly to MCP-compatible clients like Claude Code.
|
||||
|
||||
### What is MCP?
|
||||
|
||||
MCP is an open protocol that standardizes how applications provide context to LLMs. It allows AI models to access external tools, data sources, and services through a standardized interface.
|
||||
|
||||
### Connecting via MCP
|
||||
|
||||
The Crawl4AI server exposes two MCP endpoints:
|
||||
|
||||
- **Server-Sent Events (SSE)**: `http://localhost:11235/mcp/sse`
|
||||
- **WebSocket**: `ws://localhost:11235/mcp/ws`
|
||||
|
||||
### Using with Claude Code
|
||||
|
||||
You can add Crawl4AI as an MCP tool provider in Claude Code with a simple command:
|
||||
|
||||
```bash
|
||||
# Add the Crawl4AI server as an MCP provider
|
||||
claude mcp add --transport sse c4ai-sse http://localhost:11235/mcp/sse
|
||||
|
||||
# List all MCP providers to verify it was added
|
||||
claude mcp list
|
||||
```
|
||||
|
||||
Once connected, Claude Code can directly use Crawl4AI's capabilities like screenshot capture, PDF generation, and HTML processing without having to make separate API calls.
|
||||
|
||||
### Available MCP Tools
|
||||
|
||||
When connected via MCP, the following tools are available:
|
||||
|
||||
- `md` - Generate markdown from web content
|
||||
- `html` - Extract preprocessed HTML
|
||||
- `screenshot` - Capture webpage screenshots
|
||||
- `pdf` - Generate PDF documents
|
||||
- `execute_js` - Run JavaScript on web pages
|
||||
- `crawl` - Perform multi-URL crawling
|
||||
- `ask` - Query the Crawl4AI library context
|
||||
|
||||
### Testing MCP Connections
|
||||
|
||||
You can test the MCP WebSocket connection using the test file included in the repository:
|
||||
|
||||
```bash
|
||||
# From the repository root
|
||||
python tests/mcp/test_mcp_socket.py
|
||||
```
|
||||
|
||||
### MCP Schemas
|
||||
|
||||
Access the MCP tool schemas at `http://localhost:11235/mcp/schema` for detailed information on each tool's parameters and capabilities.
|
||||
|
||||
---
|
||||
|
||||
## Additional API Endpoints
|
||||
|
||||
In addition to the core `/crawl` and `/crawl/stream` endpoints, the server provides several specialized endpoints:
|
||||
|
||||
### HTML Extraction Endpoint
|
||||
|
||||
```
|
||||
POST /html
|
||||
```
|
||||
|
||||
Crawls the URL and returns preprocessed HTML optimized for schema extraction.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com"
|
||||
}
|
||||
```
|
||||
|
||||
### Screenshot Endpoint
|
||||
|
||||
```
|
||||
POST /screenshot
|
||||
```
|
||||
|
||||
Captures a full-page PNG screenshot of the specified URL.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"screenshot_wait_for": 2,
|
||||
"output_path": "/path/to/save/screenshot.png"
|
||||
}
|
||||
```
|
||||
|
||||
- `screenshot_wait_for`: Optional delay in seconds before capture (default: 2)
|
||||
- `output_path`: Optional path to save the screenshot (recommended)
|
||||
|
||||
### PDF Export Endpoint
|
||||
|
||||
```
|
||||
POST /pdf
|
||||
```
|
||||
|
||||
Generates a PDF document of the specified URL.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"output_path": "/path/to/save/document.pdf"
|
||||
}
|
||||
```
|
||||
|
||||
- `output_path`: Optional path to save the PDF (recommended)
|
||||
|
||||
### JavaScript Execution Endpoint
|
||||
|
||||
```
|
||||
POST /execute_js
|
||||
```
|
||||
|
||||
Executes JavaScript snippets on the specified URL and returns the full crawl result.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"scripts": [
|
||||
"return document.title",
|
||||
"return Array.from(document.querySelectorAll('a')).map(a => a.href)"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- `scripts`: List of JavaScript snippets to execute sequentially
|
||||
|
||||
---
|
||||
|
||||
## Dockerfile Parameters
|
||||
|
||||
You can customize the image build process using build arguments (`--build-arg`). These are typically used via `docker buildx build` or within the `docker-compose.yml` file.
|
||||
|
||||
```bash
|
||||
# Example: Build with 'all' features using buildx
|
||||
docker buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--build-arg INSTALL_TYPE=all \
|
||||
-t yourname/crawl4ai-all:latest \
|
||||
--load \
|
||||
. # Build from root context
|
||||
```
|
||||
|
||||
### Build Arguments Explained
|
||||
|
||||
| Argument | Description | Default | Options |
|
||||
| :----------- | :--------------------------------------- | :-------- | :--------------------------------- |
|
||||
| INSTALL_TYPE | Feature set | `default` | `default`, `all`, `torch`, `transformer` |
|
||||
| ENABLE_GPU | GPU support (CUDA for AMD64) | `false` | `true`, `false` |
|
||||
| APP_HOME | Install path inside container (advanced) | `/app` | any valid path |
|
||||
| USE_LOCAL | Install library from local source | `true` | `true`, `false` |
|
||||
| GITHUB_REPO | Git repo to clone if USE_LOCAL=false | *(see Dockerfile)* | any git URL |
|
||||
| GITHUB_BRANCH| Git branch to clone if USE_LOCAL=false | `main` | any branch name |
|
||||
|
||||
*(Note: PYTHON_VERSION is fixed by the `FROM` instruction in the Dockerfile)*
|
||||
|
||||
### Build Best Practices
|
||||
|
||||
1. **Choose the Right Install Type**
|
||||
* `default`: Basic installation, smallest image size. Suitable for most standard web scraping and markdown generation.
|
||||
* `all`: Full features including `torch` and `transformers` for advanced extraction strategies (e.g., CosineStrategy, certain LLM filters). Significantly larger image. Ensure you need these extras.
|
||||
2. **Platform Considerations**
|
||||
* Use `buildx` for building multi-architecture images, especially for pushing to registries.
|
||||
* Use `docker compose` profiles (`local-amd64`, `local-arm64`) for easy platform-specific local builds.
|
||||
3. **Performance Optimization**
|
||||
* The image automatically includes platform-specific optimizations (OpenMP for AMD64, OpenBLAS for ARM64).
|
||||
|
||||
---
|
||||
|
||||
## Using the API
|
||||
|
||||
Communicate with the running Docker server via its REST API (defaulting to `http://localhost:11235`). You can use the Python SDK or make direct HTTP requests.
|
||||
|
||||
### Playground Interface
|
||||
|
||||
A built-in web playground is available at `http://localhost:11235/playground` for testing and generating API requests. The playground allows you to:
|
||||
|
||||
1. Configure `CrawlerRunConfig` and `BrowserConfig` using the main library's Python syntax
|
||||
2. Test crawling operations directly from the interface
|
||||
3. Generate corresponding JSON for REST API requests based on your configuration
|
||||
|
||||
This is the easiest way to translate Python configuration to JSON requests when building integrations.
|
||||
|
||||
### Python SDK
|
||||
|
||||
Install the SDK: `pip install crawl4ai`
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||
from crawl4ai import BrowserConfig, CrawlerRunConfig, CacheMode # Assuming you have crawl4ai installed
|
||||
|
||||
async def main():
|
||||
# Point to the correct server port
|
||||
async with Crawl4aiDockerClient(base_url="http://localhost:11235", verbose=True) as client:
|
||||
# If JWT is enabled on the server, authenticate first:
|
||||
# await client.authenticate("user@example.com") # See Server Configuration section
|
||||
|
||||
# Example Non-streaming crawl
|
||||
print("--- Running Non-Streaming Crawl ---")
|
||||
results = await client.crawl(
|
||||
["https://httpbin.org/html"],
|
||||
browser_config=BrowserConfig(headless=True), # Use library classes for config aid
|
||||
crawler_config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
)
|
||||
if results: # client.crawl returns None on failure
|
||||
print(f"Non-streaming results success: {results.success}")
|
||||
if results.success:
|
||||
for result in results: # Iterate through the CrawlResultContainer
|
||||
print(f"URL: {result.url}, Success: {result.success}")
|
||||
else:
|
||||
print("Non-streaming crawl failed.")
|
||||
|
||||
|
||||
# Example Streaming crawl
|
||||
print("\n--- Running Streaming Crawl ---")
|
||||
stream_config = CrawlerRunConfig(stream=True, cache_mode=CacheMode.BYPASS)
|
||||
try:
|
||||
async for result in await client.crawl( # client.crawl returns an async generator for streaming
|
||||
["https://httpbin.org/html", "https://httpbin.org/links/5/0"],
|
||||
browser_config=BrowserConfig(headless=True),
|
||||
crawler_config=stream_config
|
||||
):
|
||||
print(f"Streamed result: URL: {result.url}, Success: {result.success}")
|
||||
except Exception as e:
|
||||
print(f"Streaming crawl failed: {e}")
|
||||
|
||||
|
||||
# Example Get schema
|
||||
print("\n--- Getting Schema ---")
|
||||
schema = await client.get_schema()
|
||||
print(f"Schema received: {bool(schema)}") # Print whether schema was received
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
*(SDK parameters like timeout, verify_ssl etc. remain the same)*
|
||||
|
||||
### Second Approach: Direct API Calls
|
||||
|
||||
Crucially, when sending configurations directly via JSON, they **must** follow the `{"type": "ClassName", "params": {...}}` structure for any non-primitive value (like config objects or strategies). Dictionaries must be wrapped as `{"type": "dict", "value": {...}}`.
|
||||
|
||||
*(Keep the detailed explanation of Configuration Structure, Basic Pattern, Simple vs Complex, Strategy Pattern, Complex Nested Example, Quick Grammar Overview, Important Rules, Pro Tip)*
|
||||
|
||||
#### More Examples *(Ensure Schema example uses type/value wrapper)*
|
||||
|
||||
**Advanced Crawler Configuration**
|
||||
*(Keep example, ensure cache_mode uses valid enum value like "bypass")*
|
||||
|
||||
**Extraction Strategy**
|
||||
```json
|
||||
{
|
||||
"crawler_config": {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {
|
||||
"extraction_strategy": {
|
||||
"type": "JsonCssExtractionStrategy",
|
||||
"params": {
|
||||
"schema": {
|
||||
"type": "dict",
|
||||
"value": {
|
||||
"baseSelector": "article.post",
|
||||
"fields": [
|
||||
{"name": "title", "selector": "h1", "type": "text"},
|
||||
{"name": "content", "selector": ".content", "type": "html"}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**LLM Extraction Strategy** *(Keep example, ensure schema uses type/value wrapper)*
|
||||
*(Keep Deep Crawler Example)*
|
||||
|
||||
### REST API Examples
|
||||
|
||||
Update URLs to use port `11235`.
|
||||
|
||||
#### Simple Crawl
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Configuration objects converted to the required JSON structure
|
||||
browser_config_payload = {
|
||||
"type": "BrowserConfig",
|
||||
"params": {"headless": True}
|
||||
}
|
||||
crawler_config_payload = {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {"stream": False, "cache_mode": "bypass"} # Use string value of enum
|
||||
}
|
||||
|
||||
crawl_payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"browser_config": browser_config_payload,
|
||||
"crawler_config": crawler_config_payload
|
||||
}
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl", # Updated port
|
||||
# headers={"Authorization": f"Bearer {token}"}, # If JWT is enabled
|
||||
json=crawl_payload
|
||||
)
|
||||
print(f"Status Code: {response.status_code}")
|
||||
if response.ok:
|
||||
print(response.json())
|
||||
else:
|
||||
print(f"Error: {response.text}")
|
||||
|
||||
```
|
||||
|
||||
#### Streaming Results
|
||||
|
||||
```python
|
||||
import json
|
||||
import httpx # Use httpx for async streaming example
|
||||
|
||||
async def test_stream_crawl(token: str = None): # Made token optional
|
||||
"""Test the /crawl/stream endpoint with multiple URLs."""
|
||||
url = "http://localhost:11235/crawl/stream" # Updated port
|
||||
payload = {
|
||||
"urls": [
|
||||
"https://httpbin.org/html",
|
||||
"https://httpbin.org/links/5/0",
|
||||
],
|
||||
"browser_config": {
|
||||
"type": "BrowserConfig",
|
||||
"params": {"headless": True, "viewport": {"type": "dict", "value": {"width": 1200, "height": 800}}} # Viewport needs type:dict
|
||||
},
|
||||
"crawler_config": {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {"stream": True, "cache_mode": "bypass"}
|
||||
}
|
||||
}
|
||||
|
||||
headers = {}
|
||||
# if token:
|
||||
# headers = {"Authorization": f"Bearer {token}"} # If JWT is enabled
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with client.stream("POST", url, json=payload, headers=headers, timeout=120.0) as response:
|
||||
print(f"Status: {response.status_code} (Expected: 200)")
|
||||
response.raise_for_status() # Raise exception for bad status codes
|
||||
|
||||
# Read streaming response line-by-line (NDJSON)
|
||||
async for line in response.aiter_lines():
|
||||
if line:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
# Check for completion marker
|
||||
if data.get("status") == "completed":
|
||||
print("Stream completed.")
|
||||
break
|
||||
print(f"Streamed Result: {json.dumps(data, indent=2)}")
|
||||
except json.JSONDecodeError:
|
||||
print(f"Warning: Could not decode JSON line: {line}")
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
print(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
|
||||
except Exception as e:
|
||||
print(f"Error in streaming crawl test: {str(e)}")
|
||||
|
||||
# To run this example:
|
||||
# import asyncio
|
||||
# asyncio.run(test_stream_crawl())
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Metrics & Monitoring
|
||||
|
||||
Keep an eye on your crawler with these endpoints:
|
||||
|
||||
- `/health` - Quick health check
|
||||
- `/metrics` - Detailed Prometheus metrics
|
||||
- `/schema` - Full API schema
|
||||
|
||||
Example health check:
|
||||
```bash
|
||||
curl http://localhost:11235/health
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*(Deployment Scenarios and Complete Examples sections remain the same, maybe update links if examples moved)*
|
||||
|
||||
---
|
||||
|
||||
## Server Configuration
|
||||
|
||||
The server's behavior can be customized through the `config.yml` file.
|
||||
|
||||
### Understanding config.yml
|
||||
|
||||
The configuration file is loaded from `/app/config.yml` inside the container. By default, the file from `deploy/docker/config.yml` in the repository is copied there during the build.
|
||||
|
||||
Here's a detailed breakdown of the configuration options (using defaults from `deploy/docker/config.yml`):
|
||||
|
||||
```yaml
|
||||
# Application Configuration
|
||||
app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0" # Consider setting this to match library version, e.g., "0.5.1"
|
||||
host: "0.0.0.0"
|
||||
port: 8020 # NOTE: This port is used ONLY when running server.py directly. Gunicorn overrides this (see supervisord.conf).
|
||||
reload: False # Default set to False - suitable for production
|
||||
timeout_keep_alive: 300
|
||||
|
||||
# Default LLM Configuration
|
||||
llm:
|
||||
provider: "openai/gpt-4o-mini"
|
||||
api_key_env: "OPENAI_API_KEY"
|
||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
||||
|
||||
# Redis Configuration (Used by internal Redis server managed by supervisord)
|
||||
redis:
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
db: 0
|
||||
password: ""
|
||||
# ... other redis options ...
|
||||
|
||||
# Rate Limiting Configuration
|
||||
rate_limiting:
|
||||
enabled: True
|
||||
default_limit: "1000/minute"
|
||||
trusted_proxies: []
|
||||
storage_uri: "memory://" # Use "redis://localhost:6379" if you need persistent/shared limits
|
||||
|
||||
# Security Configuration
|
||||
security:
|
||||
enabled: false # Master toggle for security features
|
||||
jwt_enabled: false # Enable JWT authentication (requires security.enabled=true)
|
||||
https_redirect: false # Force HTTPS (requires security.enabled=true)
|
||||
trusted_hosts: ["*"] # Allowed hosts (use specific domains in production)
|
||||
headers: # Security headers (applied if security.enabled=true)
|
||||
x_content_type_options: "nosniff"
|
||||
x_frame_options: "DENY"
|
||||
content_security_policy: "default-src 'self'"
|
||||
strict_transport_security: "max-age=63072000; includeSubDomains"
|
||||
|
||||
# Crawler Configuration
|
||||
crawler:
|
||||
memory_threshold_percent: 95.0
|
||||
rate_limiter:
|
||||
base_delay: [1.0, 2.0] # Min/max delay between requests in seconds for dispatcher
|
||||
timeouts:
|
||||
stream_init: 30.0 # Timeout for stream initialization
|
||||
batch_process: 300.0 # Timeout for non-streaming /crawl processing
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
level: "INFO"
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Observability Configuration
|
||||
observability:
|
||||
prometheus:
|
||||
enabled: True
|
||||
endpoint: "/metrics"
|
||||
health_check:
|
||||
endpoint: "/health"
|
||||
```
|
||||
|
||||
*(JWT Authentication section remains the same, just note the default port is now 11235 for requests)*
|
||||
|
||||
*(Configuration Tips and Best Practices remain the same)*
|
||||
|
||||
### Customizing Your Configuration
|
||||
|
||||
You can override the default `config.yml`.
|
||||
|
||||
#### Method 1: Modify Before Build
|
||||
|
||||
1. Edit the `deploy/docker/config.yml` file in your local repository clone.
|
||||
2. Build the image using `docker buildx` or `docker compose --profile local-... up --build`. The modified file will be copied into the image.
|
||||
|
||||
#### Method 2: Runtime Mount (Recommended for Custom Deploys)
|
||||
|
||||
1. Create your custom configuration file, e.g., `my-custom-config.yml` locally. Ensure it contains all necessary sections.
|
||||
2. Mount it when running the container:
|
||||
|
||||
* **Using `docker run`:**
|
||||
```bash
|
||||
# Assumes my-custom-config.yml is in the current directory
|
||||
docker run -d -p 11235:11235 \
|
||||
--name crawl4ai-custom-config \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
-v $(pwd)/my-custom-config.yml:/app/config.yml \
|
||||
unclecode/crawl4ai:latest # Or your specific tag
|
||||
```
|
||||
|
||||
* **Using `docker-compose.yml`:** Add a `volumes` section to the service definition:
|
||||
```yaml
|
||||
services:
|
||||
crawl4ai-hub-amd64: # Or your chosen service
|
||||
image: unclecode/crawl4ai:latest
|
||||
profiles: ["hub-amd64"]
|
||||
<<: *base-config
|
||||
volumes:
|
||||
# Mount local custom config over the default one in the container
|
||||
- ./my-custom-config.yml:/app/config.yml
|
||||
# Keep the shared memory volume from base-config
|
||||
- /dev/shm:/dev/shm
|
||||
```
|
||||
*(Note: Ensure `my-custom-config.yml` is in the same directory as `docker-compose.yml`)*
|
||||
|
||||
> 💡 When mounting, your custom file *completely replaces* the default one. Ensure it's a valid and complete configuration.
|
||||
|
||||
### Configuration Recommendations
|
||||
|
||||
1. **Security First** 🔒
|
||||
- Always enable security in production
|
||||
- Use specific trusted_hosts instead of wildcards
|
||||
- Set up proper rate limiting to protect your server
|
||||
- Consider your environment before enabling HTTPS redirect
|
||||
|
||||
2. **Resource Management** 💻
|
||||
- Adjust memory_threshold_percent based on available RAM
|
||||
- Set timeouts according to your content size and network conditions
|
||||
- Use Redis for rate limiting in multi-container setups
|
||||
|
||||
3. **Monitoring** 📊
|
||||
- Enable Prometheus if you need metrics
|
||||
- Set DEBUG logging in development, INFO in production
|
||||
- Regular health check monitoring is crucial
|
||||
|
||||
4. **Performance Tuning** ⚡
|
||||
- Start with conservative rate limiter delays
|
||||
- Increase batch_process timeout for large content
|
||||
- Adjust stream_init timeout based on initial response times
|
||||
|
||||
## Getting Help
|
||||
|
||||
We're here to help you succeed with Crawl4AI! Here's how to get support:
|
||||
|
||||
- 📖 Check our [full documentation](https://docs.crawl4ai.com)
|
||||
- 🐛 Found a bug? [Open an issue](https://github.com/unclecode/crawl4ai/issues)
|
||||
- 💬 Join our [Discord community](https://discord.gg/crawl4ai)
|
||||
- ⭐ Star us on GitHub to show support!
|
||||
|
||||
## Summary
|
||||
|
||||
In this guide, we've covered everything you need to get started with Crawl4AI's Docker deployment:
|
||||
- Building and running the Docker container
|
||||
- Configuring the environment
|
||||
- Using the interactive playground for testing
|
||||
- Making API requests with proper typing
|
||||
- Using the Python SDK
|
||||
- Leveraging specialized endpoints for screenshots, PDFs, and JavaScript execution
|
||||
- Connecting via the Model Context Protocol (MCP)
|
||||
- Monitoring your deployment
|
||||
|
||||
The new playground interface at `http://localhost:11235/playground` makes it much easier to test configurations and generate the corresponding JSON for API requests.
|
||||
|
||||
For AI application developers, the MCP integration allows tools like Claude Code to directly access Crawl4AI's capabilities without complex API handling.
|
||||
|
||||
Remember, the examples in the `examples` folder are your friends - they show real-world usage patterns that you can adapt for your needs.
|
||||
|
||||
Keep exploring, and don't hesitate to reach out if you need help! We're building something amazing together. 🚀
|
||||
|
||||
Happy crawling! 🕷️
|
||||
523
deploy/docker/api.py
Normal file
523
deploy/docker/api.py
Normal file
@@ -0,0 +1,523 @@
|
||||
import os
|
||||
import json
|
||||
import asyncio
|
||||
from typing import List, Tuple
|
||||
from functools import partial
|
||||
|
||||
import logging
|
||||
from typing import Optional, AsyncGenerator
|
||||
from urllib.parse import unquote
|
||||
from fastapi import HTTPException, Request, status
|
||||
from fastapi.background import BackgroundTasks
|
||||
from fastapi.responses import JSONResponse
|
||||
from redis import asyncio as aioredis
|
||||
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
CrawlerRunConfig,
|
||||
LLMExtractionStrategy,
|
||||
CacheMode,
|
||||
BrowserConfig,
|
||||
MemoryAdaptiveDispatcher,
|
||||
RateLimiter,
|
||||
LLMConfig
|
||||
)
|
||||
from crawl4ai.utils import perform_completion_with_backoff
|
||||
from crawl4ai.content_filter_strategy import (
|
||||
PruningContentFilter,
|
||||
BM25ContentFilter,
|
||||
LLMContentFilter
|
||||
)
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy
|
||||
|
||||
from utils import (
|
||||
TaskStatus,
|
||||
FilterType,
|
||||
get_base_url,
|
||||
is_task_id,
|
||||
should_cleanup_task,
|
||||
decode_redis_hash
|
||||
)
|
||||
|
||||
import psutil, time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# --- Helper to get memory ---
|
||||
def _get_memory_mb():
|
||||
try:
|
||||
return psutil.Process().memory_info().rss / (1024 * 1024)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get memory info: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def handle_llm_qa(
|
||||
url: str,
|
||||
query: str,
|
||||
config: dict
|
||||
) -> str:
|
||||
"""Process QA using LLM with crawled content as context."""
|
||||
try:
|
||||
if not url.startswith(('http://', 'https://')):
|
||||
url = 'https://' + url
|
||||
# Extract base URL by finding last '?q=' occurrence
|
||||
last_q_index = url.rfind('?q=')
|
||||
if last_q_index != -1:
|
||||
url = url[:last_q_index]
|
||||
|
||||
# Get markdown content
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url)
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=result.error_message
|
||||
)
|
||||
content = result.markdown.fit_markdown or result.markdown.raw_markdown
|
||||
|
||||
# Create prompt and get LLM response
|
||||
prompt = f"""Use the following content as context to answer the question.
|
||||
Content:
|
||||
{content}
|
||||
|
||||
Question: {query}
|
||||
|
||||
Answer:"""
|
||||
|
||||
response = perform_completion_with_backoff(
|
||||
provider=config["llm"]["provider"],
|
||||
prompt_with_variables=prompt,
|
||||
api_token=os.environ.get(config["llm"].get("api_key_env", ""))
|
||||
)
|
||||
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
logger.error(f"QA processing error: {str(e)}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
async def process_llm_extraction(
|
||||
redis: aioredis.Redis,
|
||||
config: dict,
|
||||
task_id: str,
|
||||
url: str,
|
||||
instruction: str,
|
||||
schema: Optional[str] = None,
|
||||
cache: str = "0"
|
||||
) -> None:
|
||||
"""Process LLM extraction in background."""
|
||||
try:
|
||||
# If config['llm'] has api_key then ignore the api_key_env
|
||||
api_key = ""
|
||||
if "api_key" in config["llm"]:
|
||||
api_key = config["llm"]["api_key"]
|
||||
else:
|
||||
api_key = os.environ.get(config["llm"].get("api_key_env", None), "")
|
||||
llm_strategy = LLMExtractionStrategy(
|
||||
llm_config=LLMConfig(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=api_key
|
||||
),
|
||||
instruction=instruction,
|
||||
schema=json.loads(schema) if schema else None,
|
||||
)
|
||||
|
||||
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
config=CrawlerRunConfig(
|
||||
extraction_strategy=llm_strategy,
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
cache_mode=cache_mode
|
||||
)
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.FAILED,
|
||||
"error": result.error_message
|
||||
})
|
||||
return
|
||||
|
||||
try:
|
||||
content = json.loads(result.extracted_content)
|
||||
except json.JSONDecodeError:
|
||||
content = result.extracted_content
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.COMPLETED,
|
||||
"result": json.dumps(content)
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LLM extraction error: {str(e)}", exc_info=True)
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.FAILED,
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
async def handle_markdown_request(
|
||||
url: str,
|
||||
filter_type: FilterType,
|
||||
query: Optional[str] = None,
|
||||
cache: str = "0",
|
||||
config: Optional[dict] = None
|
||||
) -> str:
|
||||
"""Handle markdown generation requests."""
|
||||
try:
|
||||
decoded_url = unquote(url)
|
||||
if not decoded_url.startswith(('http://', 'https://')):
|
||||
decoded_url = 'https://' + decoded_url
|
||||
|
||||
if filter_type == FilterType.RAW:
|
||||
md_generator = DefaultMarkdownGenerator()
|
||||
else:
|
||||
content_filter = {
|
||||
FilterType.FIT: PruningContentFilter(),
|
||||
FilterType.BM25: BM25ContentFilter(user_query=query or ""),
|
||||
FilterType.LLM: LLMContentFilter(
|
||||
llm_config=LLMConfig(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=os.environ.get(config["llm"].get("api_key_env", None), ""),
|
||||
),
|
||||
instruction=query or "Extract main content"
|
||||
)
|
||||
}[filter_type]
|
||||
md_generator = DefaultMarkdownGenerator(content_filter=content_filter)
|
||||
|
||||
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url=decoded_url,
|
||||
config=CrawlerRunConfig(
|
||||
markdown_generator=md_generator,
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
cache_mode=cache_mode
|
||||
)
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=result.error_message
|
||||
)
|
||||
|
||||
return (result.markdown.raw_markdown
|
||||
if filter_type == FilterType.RAW
|
||||
else result.markdown.fit_markdown)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Markdown error: {str(e)}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
async def handle_llm_request(
|
||||
redis: aioredis.Redis,
|
||||
background_tasks: BackgroundTasks,
|
||||
request: Request,
|
||||
input_path: str,
|
||||
query: Optional[str] = None,
|
||||
schema: Optional[str] = None,
|
||||
cache: str = "0",
|
||||
config: Optional[dict] = None
|
||||
) -> JSONResponse:
|
||||
"""Handle LLM extraction requests."""
|
||||
base_url = get_base_url(request)
|
||||
|
||||
try:
|
||||
if is_task_id(input_path):
|
||||
return await handle_task_status(
|
||||
redis, input_path, base_url
|
||||
)
|
||||
|
||||
if not query:
|
||||
return JSONResponse({
|
||||
"message": "Please provide an instruction",
|
||||
"_links": {
|
||||
"example": {
|
||||
"href": f"{base_url}/llm/{input_path}?q=Extract+main+content",
|
||||
"title": "Try this example"
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return await create_new_task(
|
||||
redis,
|
||||
background_tasks,
|
||||
input_path,
|
||||
query,
|
||||
schema,
|
||||
cache,
|
||||
base_url,
|
||||
config
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LLM endpoint error: {str(e)}", exc_info=True)
|
||||
return JSONResponse({
|
||||
"error": str(e),
|
||||
"_links": {
|
||||
"retry": {"href": str(request.url)}
|
||||
}
|
||||
}, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
async def handle_task_status(
|
||||
redis: aioredis.Redis,
|
||||
task_id: str,
|
||||
base_url: str
|
||||
) -> JSONResponse:
|
||||
"""Handle task status check requests."""
|
||||
task = await redis.hgetall(f"task:{task_id}")
|
||||
if not task:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Task not found"
|
||||
)
|
||||
|
||||
task = decode_redis_hash(task)
|
||||
response = create_task_response(task, task_id, base_url)
|
||||
|
||||
if task["status"] in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
|
||||
if should_cleanup_task(task["created_at"]):
|
||||
await redis.delete(f"task:{task_id}")
|
||||
|
||||
return JSONResponse(response)
|
||||
|
||||
async def create_new_task(
|
||||
redis: aioredis.Redis,
|
||||
background_tasks: BackgroundTasks,
|
||||
input_path: str,
|
||||
query: str,
|
||||
schema: Optional[str],
|
||||
cache: str,
|
||||
base_url: str,
|
||||
config: dict
|
||||
) -> JSONResponse:
|
||||
"""Create and initialize a new task."""
|
||||
decoded_url = unquote(input_path)
|
||||
if not decoded_url.startswith(('http://', 'https://')):
|
||||
decoded_url = 'https://' + decoded_url
|
||||
|
||||
from datetime import datetime
|
||||
task_id = f"llm_{int(datetime.now().timestamp())}_{id(background_tasks)}"
|
||||
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.PROCESSING,
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"url": decoded_url
|
||||
})
|
||||
|
||||
background_tasks.add_task(
|
||||
process_llm_extraction,
|
||||
redis,
|
||||
config,
|
||||
task_id,
|
||||
decoded_url,
|
||||
query,
|
||||
schema,
|
||||
cache
|
||||
)
|
||||
|
||||
return JSONResponse({
|
||||
"task_id": task_id,
|
||||
"status": TaskStatus.PROCESSING,
|
||||
"url": decoded_url,
|
||||
"_links": {
|
||||
"self": {"href": f"{base_url}/llm/{task_id}"},
|
||||
"status": {"href": f"{base_url}/llm/{task_id}"}
|
||||
}
|
||||
})
|
||||
|
||||
def create_task_response(task: dict, task_id: str, base_url: str) -> dict:
|
||||
"""Create response for task status check."""
|
||||
response = {
|
||||
"task_id": task_id,
|
||||
"status": task["status"],
|
||||
"created_at": task["created_at"],
|
||||
"url": task["url"],
|
||||
"_links": {
|
||||
"self": {"href": f"{base_url}/llm/{task_id}"},
|
||||
"refresh": {"href": f"{base_url}/llm/{task_id}"}
|
||||
}
|
||||
}
|
||||
|
||||
if task["status"] == TaskStatus.COMPLETED:
|
||||
response["result"] = json.loads(task["result"])
|
||||
elif task["status"] == TaskStatus.FAILED:
|
||||
response["error"] = task["error"]
|
||||
|
||||
return response
|
||||
|
||||
async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator) -> AsyncGenerator[bytes, None]:
|
||||
"""Stream results with heartbeats and completion markers."""
|
||||
import json
|
||||
from utils import datetime_handler
|
||||
|
||||
try:
|
||||
async for result in results_gen:
|
||||
try:
|
||||
server_memory_mb = _get_memory_mb()
|
||||
result_dict = result.model_dump()
|
||||
result_dict['server_memory_mb'] = server_memory_mb
|
||||
logger.info(f"Streaming result for {result_dict.get('url', 'unknown')}")
|
||||
data = json.dumps(result_dict, default=datetime_handler) + "\n"
|
||||
yield data.encode('utf-8')
|
||||
except Exception as e:
|
||||
logger.error(f"Serialization error: {e}")
|
||||
error_response = {"error": str(e), "url": getattr(result, 'url', 'unknown')}
|
||||
yield (json.dumps(error_response) + "\n").encode('utf-8')
|
||||
|
||||
yield json.dumps({"status": "completed"}).encode('utf-8')
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.warning("Client disconnected during streaming")
|
||||
finally:
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as e:
|
||||
# logger.error(f"Crawler cleanup error: {e}")
|
||||
pass
|
||||
|
||||
async def handle_crawl_request(
|
||||
urls: List[str],
|
||||
browser_config: dict,
|
||||
crawler_config: dict,
|
||||
config: dict
|
||||
) -> dict:
|
||||
"""Handle non-streaming crawl requests."""
|
||||
start_mem_mb = _get_memory_mb() # <--- Get memory before
|
||||
start_time = time.time()
|
||||
mem_delta_mb = None
|
||||
peak_mem_mb = start_mem_mb
|
||||
|
||||
try:
|
||||
urls = [('https://' + url) if not url.startswith(('http://', 'https://')) else url for url in urls]
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||
) if config["crawler"]["rate_limiter"]["enabled"] else None
|
||||
)
|
||||
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
# crawler: AsyncWebCrawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
base_config = config["crawler"]["base_config"]
|
||||
# Iterate on key-value pairs in global_config then use haseattr to set them
|
||||
for key, value in base_config.items():
|
||||
if hasattr(crawler_config, key):
|
||||
setattr(crawler_config, key, value)
|
||||
|
||||
results = []
|
||||
func = getattr(crawler, "arun" if len(urls) == 1 else "arun_many")
|
||||
partial_func = partial(func,
|
||||
urls[0] if len(urls) == 1 else urls,
|
||||
config=crawler_config,
|
||||
dispatcher=dispatcher)
|
||||
results = await partial_func()
|
||||
|
||||
# await crawler.close()
|
||||
|
||||
end_mem_mb = _get_memory_mb() # <--- Get memory after
|
||||
end_time = time.time()
|
||||
|
||||
if start_mem_mb is not None and end_mem_mb is not None:
|
||||
mem_delta_mb = end_mem_mb - start_mem_mb # <--- Calculate delta
|
||||
peak_mem_mb = max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb) # <--- Get peak memory
|
||||
logger.info(f"Memory usage: Start: {start_mem_mb} MB, End: {end_mem_mb} MB, Delta: {mem_delta_mb} MB, Peak: {peak_mem_mb} MB")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"results": [result.model_dump() for result in results],
|
||||
"server_processing_time_s": end_time - start_time,
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": peak_mem_mb
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Crawl error: {str(e)}", exc_info=True)
|
||||
if 'crawler' in locals() and crawler.ready: # Check if crawler was initialized and started
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during exception handling: {close_e}")
|
||||
logger.error(f"Error closing crawler during exception handling: {close_e}")
|
||||
|
||||
# Measure memory even on error if possible
|
||||
end_mem_mb_error = _get_memory_mb()
|
||||
if start_mem_mb is not None and end_mem_mb_error is not None:
|
||||
mem_delta_mb = end_mem_mb_error - start_mem_mb
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=json.dumps({ # Send structured error
|
||||
"error": str(e),
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb_error or 0)
|
||||
})
|
||||
)
|
||||
|
||||
async def handle_stream_crawl_request(
|
||||
urls: List[str],
|
||||
browser_config: dict,
|
||||
crawler_config: dict,
|
||||
config: dict
|
||||
) -> Tuple[AsyncWebCrawler, AsyncGenerator]:
|
||||
"""Handle streaming crawl requests."""
|
||||
try:
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
# browser_config.verbose = True # Set to False or remove for production stress testing
|
||||
browser_config.verbose = False
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
crawler_config.scraping_strategy = LXMLWebScrapingStrategy()
|
||||
crawler_config.stream = True
|
||||
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||
)
|
||||
)
|
||||
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
# crawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
results_gen = await crawler.arun_many(
|
||||
urls=urls,
|
||||
config=crawler_config,
|
||||
dispatcher=dispatcher
|
||||
)
|
||||
|
||||
return crawler, results_gen
|
||||
|
||||
except Exception as e:
|
||||
# Make sure to close crawler if started during an error here
|
||||
if 'crawler' in locals() and crawler.ready:
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during stream setup exception: {close_e}")
|
||||
logger.error(f"Error closing crawler during stream setup exception: {close_e}")
|
||||
logger.error(f"Stream crawl error: {str(e)}", exc_info=True)
|
||||
# Raising HTTPException here will prevent streaming response
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
55
deploy/docker/auth.py
Normal file
55
deploy/docker/auth.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Dict, Optional
|
||||
from jwt import JWT, jwk_from_dict
|
||||
from jwt.utils import get_int_from_datetime
|
||||
from fastapi import Depends, HTTPException
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
from pydantic import EmailStr
|
||||
from pydantic.main import BaseModel
|
||||
import base64
|
||||
|
||||
instance = JWT()
|
||||
security = HTTPBearer(auto_error=False)
|
||||
SECRET_KEY = os.environ.get("SECRET_KEY", "mysecret")
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES = 60
|
||||
|
||||
def get_jwk_from_secret(secret: str):
|
||||
"""Convert a secret string into a JWK object."""
|
||||
secret_bytes = secret.encode('utf-8')
|
||||
b64_secret = base64.urlsafe_b64encode(secret_bytes).rstrip(b'=').decode('utf-8')
|
||||
return jwk_from_dict({"kty": "oct", "k": b64_secret})
|
||||
|
||||
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
|
||||
"""Create a JWT access token with an expiration."""
|
||||
to_encode = data.copy()
|
||||
expire = datetime.now(timezone.utc) + (expires_delta or timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES))
|
||||
to_encode.update({"exp": get_int_from_datetime(expire)})
|
||||
signing_key = get_jwk_from_secret(SECRET_KEY)
|
||||
return instance.encode(to_encode, signing_key, alg='HS256')
|
||||
|
||||
def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)) -> Dict:
|
||||
"""Verify the JWT token from the Authorization header."""
|
||||
|
||||
if credentials is None:
|
||||
return None
|
||||
token = credentials.credentials
|
||||
verifying_key = get_jwk_from_secret(SECRET_KEY)
|
||||
try:
|
||||
payload = instance.decode(token, verifying_key, do_time_check=True, algorithms='HS256')
|
||||
return payload
|
||||
except Exception:
|
||||
raise HTTPException(status_code=401, detail="Invalid or expired token")
|
||||
|
||||
|
||||
def get_token_dependency(config: Dict):
|
||||
"""Return the token dependency if JWT is enabled, else a function that returns None."""
|
||||
|
||||
if config.get("security", {}).get("jwt_enabled", False):
|
||||
return verify_token
|
||||
else:
|
||||
return lambda: None
|
||||
|
||||
|
||||
class TokenRequest(BaseModel):
|
||||
email: EmailStr
|
||||
11631
deploy/docker/c4ai-code-context.md
Normal file
11631
deploy/docker/c4ai-code-context.md
Normal file
File diff suppressed because it is too large
Load Diff
8899
deploy/docker/c4ai-doc-context.md
Normal file
8899
deploy/docker/c4ai-doc-context.md
Normal file
File diff suppressed because it is too large
Load Diff
91
deploy/docker/config.yml
Normal file
91
deploy/docker/config.yml
Normal file
@@ -0,0 +1,91 @@
|
||||
# Application Configuration
|
||||
app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0"
|
||||
host: "0.0.0.0"
|
||||
port: 11235
|
||||
reload: False
|
||||
workers: 1
|
||||
timeout_keep_alive: 300
|
||||
|
||||
# Default LLM Configuration
|
||||
llm:
|
||||
provider: "openai/gpt-4o-mini"
|
||||
api_key_env: "OPENAI_API_KEY"
|
||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
||||
|
||||
# Redis Configuration
|
||||
redis:
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
db: 0
|
||||
password: ""
|
||||
ssl: False
|
||||
ssl_cert_reqs: None
|
||||
ssl_ca_certs: None
|
||||
ssl_certfile: None
|
||||
ssl_keyfile: None
|
||||
ssl_cert_reqs: None
|
||||
ssl_ca_certs: None
|
||||
ssl_certfile: None
|
||||
ssl_keyfile: None
|
||||
|
||||
# Rate Limiting Configuration
|
||||
rate_limiting:
|
||||
enabled: True
|
||||
default_limit: "1000/minute"
|
||||
trusted_proxies: []
|
||||
storage_uri: "memory://" # Use "redis://localhost:6379" for production
|
||||
|
||||
# Security Configuration
|
||||
security:
|
||||
enabled: false
|
||||
jwt_enabled: false
|
||||
https_redirect: false
|
||||
trusted_hosts: ["*"]
|
||||
headers:
|
||||
x_content_type_options: "nosniff"
|
||||
x_frame_options: "DENY"
|
||||
content_security_policy: "default-src 'self'"
|
||||
strict_transport_security: "max-age=63072000; includeSubDomains"
|
||||
|
||||
# Crawler Configuration
|
||||
crawler:
|
||||
base_config:
|
||||
simulate_user: true
|
||||
memory_threshold_percent: 95.0
|
||||
rate_limiter:
|
||||
enabled: true
|
||||
base_delay: [1.0, 2.0]
|
||||
timeouts:
|
||||
stream_init: 30.0 # Timeout for stream initialization
|
||||
batch_process: 300.0 # Timeout for batch processing
|
||||
pool:
|
||||
max_pages: 40 # ← GLOBAL_SEM permits
|
||||
idle_ttl_sec: 1800 # ← 30 min janitor cutoff
|
||||
browser:
|
||||
kwargs:
|
||||
headless: true
|
||||
text_mode: true
|
||||
extra_args:
|
||||
# - "--single-process"
|
||||
- "--no-sandbox"
|
||||
- "--disable-dev-shm-usage"
|
||||
- "--disable-gpu"
|
||||
- "--disable-software-rasterizer"
|
||||
- "--disable-web-security"
|
||||
- "--allow-insecure-localhost"
|
||||
- "--ignore-certificate-errors"
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
level: "INFO"
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Observability Configuration
|
||||
observability:
|
||||
prometheus:
|
||||
enabled: True
|
||||
endpoint: "/metrics"
|
||||
health_check:
|
||||
endpoint: "/health"
|
||||
60
deploy/docker/crawler_pool.py
Normal file
60
deploy/docker/crawler_pool.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# crawler_pool.py (new file)
|
||||
import asyncio, json, hashlib, time, psutil
|
||||
from contextlib import suppress
|
||||
from typing import Dict
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
from typing import Dict
|
||||
from utils import load_config
|
||||
|
||||
CONFIG = load_config()
|
||||
|
||||
POOL: Dict[str, AsyncWebCrawler] = {}
|
||||
LAST_USED: Dict[str, float] = {}
|
||||
LOCK = asyncio.Lock()
|
||||
|
||||
MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) # % RAM – refuse new browsers above this
|
||||
IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800) # close if unused for 30 min
|
||||
|
||||
def _sig(cfg: BrowserConfig) -> str:
|
||||
payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":"))
|
||||
return hashlib.sha1(payload.encode()).hexdigest()
|
||||
|
||||
async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
|
||||
try:
|
||||
sig = _sig(cfg)
|
||||
async with LOCK:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time();
|
||||
return POOL[sig]
|
||||
if psutil.virtual_memory().percent >= MEM_LIMIT:
|
||||
raise MemoryError("RAM pressure – new browser denied")
|
||||
crawler = AsyncWebCrawler(config=cfg, thread_safe=False)
|
||||
await crawler.start()
|
||||
POOL[sig] = crawler; LAST_USED[sig] = time.time()
|
||||
return crawler
|
||||
except MemoryError as e:
|
||||
raise MemoryError(f"RAM pressure – new browser denied: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to start browser: {e}")
|
||||
finally:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
else:
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
async def close_all():
|
||||
async with LOCK:
|
||||
await asyncio.gather(*(c.close() for c in POOL.values()), return_exceptions=True)
|
||||
POOL.clear(); LAST_USED.clear()
|
||||
|
||||
async def janitor():
|
||||
while True:
|
||||
await asyncio.sleep(60)
|
||||
now = time.time()
|
||||
async with LOCK:
|
||||
for sig, crawler in list(POOL.items()):
|
||||
if now - LAST_USED[sig] > IDLE_TTL:
|
||||
with suppress(Exception): await crawler.close()
|
||||
POOL.pop(sig, None); LAST_USED.pop(sig, None)
|
||||
252
deploy/docker/mcp_bridge.py
Normal file
252
deploy/docker/mcp_bridge.py
Normal file
@@ -0,0 +1,252 @@
|
||||
# deploy/docker/mcp_bridge.py
|
||||
|
||||
from __future__ import annotations
|
||||
import inspect, json, re, anyio
|
||||
from contextlib import suppress
|
||||
from typing import Any, Callable, Dict, List, Tuple
|
||||
import httpx
|
||||
|
||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
from fastapi import Request
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
from pydantic import BaseModel
|
||||
from mcp.server.sse import SseServerTransport
|
||||
|
||||
import mcp.types as t
|
||||
from mcp.server.lowlevel.server import Server, NotificationOptions
|
||||
from mcp.server.models import InitializationOptions
|
||||
|
||||
# ── opt‑in decorators ───────────────────────────────────────────
|
||||
def mcp_resource(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "resource", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
def mcp_template(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "template", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
def mcp_tool(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "tool", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
# ── HTTP‑proxy helper for FastAPI endpoints ─────────────────────
|
||||
def _make_http_proxy(base_url: str, route):
|
||||
method = list(route.methods - {"HEAD", "OPTIONS"})[0]
|
||||
async def proxy(**kwargs):
|
||||
# replace `/items/{id}` style params first
|
||||
path = route.path
|
||||
for k, v in list(kwargs.items()):
|
||||
placeholder = "{" + k + "}"
|
||||
if placeholder in path:
|
||||
path = path.replace(placeholder, str(v))
|
||||
kwargs.pop(k)
|
||||
url = base_url.rstrip("/") + path
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
try:
|
||||
r = (
|
||||
await client.get(url, params=kwargs)
|
||||
if method == "GET"
|
||||
else await client.request(method, url, json=kwargs)
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.text if method == "GET" else r.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
# surface FastAPI error details instead of plain 500
|
||||
raise HTTPException(e.response.status_code, e.response.text)
|
||||
return proxy
|
||||
|
||||
# ── main entry point ────────────────────────────────────────────
|
||||
def attach_mcp(
|
||||
app: FastAPI,
|
||||
*, # keyword‑only
|
||||
base: str = "/mcp",
|
||||
name: str | None = None,
|
||||
base_url: str, # eg. "http://127.0.0.1:8020"
|
||||
) -> None:
|
||||
"""Call once after all routes are declared to expose WS+SSE MCP endpoints."""
|
||||
server_name = name or app.title or "FastAPI-MCP"
|
||||
mcp = Server(server_name)
|
||||
|
||||
# tools: Dict[str, Callable] = {}
|
||||
tools: Dict[str, Tuple[Callable, Callable]] = {}
|
||||
resources: Dict[str, Callable] = {}
|
||||
templates: Dict[str, Callable] = {}
|
||||
|
||||
# register decorated FastAPI routes
|
||||
for route in app.routes:
|
||||
fn = getattr(route, "endpoint", None)
|
||||
kind = getattr(fn, "__mcp_kind__", None)
|
||||
if not kind:
|
||||
continue
|
||||
|
||||
key = fn.__mcp_name__ or re.sub(r"[/{}}]", "_", route.path).strip("_")
|
||||
|
||||
# if kind == "tool":
|
||||
# tools[key] = _make_http_proxy(base_url, route)
|
||||
if kind == "tool":
|
||||
proxy = _make_http_proxy(base_url, route)
|
||||
tools[key] = (proxy, fn)
|
||||
continue
|
||||
if kind == "resource":
|
||||
resources[key] = fn
|
||||
if kind == "template":
|
||||
templates[key] = fn
|
||||
|
||||
# helpers for JSON‑Schema
|
||||
def _schema(model: type[BaseModel] | None) -> dict:
|
||||
return {"type": "object"} if model is None else model.model_json_schema()
|
||||
|
||||
def _body_model(fn: Callable) -> type[BaseModel] | None:
|
||||
for p in inspect.signature(fn).parameters.values():
|
||||
a = p.annotation
|
||||
if inspect.isclass(a) and issubclass(a, BaseModel):
|
||||
return a
|
||||
return None
|
||||
|
||||
# MCP handlers
|
||||
@mcp.list_tools()
|
||||
async def _list_tools() -> List[t.Tool]:
|
||||
out = []
|
||||
for k, (proxy, orig_fn) in tools.items():
|
||||
desc = getattr(orig_fn, "__mcp_description__", None) or inspect.getdoc(orig_fn) or ""
|
||||
schema = getattr(orig_fn, "__mcp_schema__", None) or _schema(_body_model(orig_fn))
|
||||
out.append(
|
||||
t.Tool(name=k, description=desc, inputSchema=schema)
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
@mcp.call_tool()
|
||||
async def _call_tool(name: str, arguments: Dict | None) -> List[t.TextContent]:
|
||||
if name not in tools:
|
||||
raise HTTPException(404, "tool not found")
|
||||
|
||||
proxy, _ = tools[name]
|
||||
try:
|
||||
res = await proxy(**(arguments or {}))
|
||||
except HTTPException as exc:
|
||||
# map server‑side errors into MCP "text/error" payloads
|
||||
err = {"error": exc.status_code, "detail": exc.detail}
|
||||
return [t.TextContent(type = "text", text=json.dumps(err))]
|
||||
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
|
||||
|
||||
@mcp.list_resources()
|
||||
async def _list_resources() -> List[t.Resource]:
|
||||
return [
|
||||
t.Resource(name=k, description=inspect.getdoc(f) or "", mime_type="application/json")
|
||||
for k, f in resources.items()
|
||||
]
|
||||
|
||||
@mcp.read_resource()
|
||||
async def _read_resource(name: str) -> List[t.TextContent]:
|
||||
if name not in resources:
|
||||
raise HTTPException(404, "resource not found")
|
||||
res = resources[name]()
|
||||
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
|
||||
|
||||
@mcp.list_resource_templates()
|
||||
async def _list_templates() -> List[t.ResourceTemplate]:
|
||||
return [
|
||||
t.ResourceTemplate(
|
||||
name=k,
|
||||
description=inspect.getdoc(f) or "",
|
||||
parameters={
|
||||
p: {"type": "string"} for p in _path_params(app, f)
|
||||
},
|
||||
)
|
||||
for k, f in templates.items()
|
||||
]
|
||||
|
||||
init_opts = InitializationOptions(
|
||||
server_name=server_name,
|
||||
server_version="0.1.0",
|
||||
capabilities=mcp.get_capabilities(
|
||||
notification_options=NotificationOptions(),
|
||||
experimental_capabilities={},
|
||||
),
|
||||
)
|
||||
|
||||
# ── WebSocket transport ────────────────────────────────────
|
||||
@app.websocket_route(f"{base}/ws")
|
||||
async def _ws(ws: WebSocket):
|
||||
await ws.accept()
|
||||
c2s_send, c2s_recv = anyio.create_memory_object_stream(100)
|
||||
s2c_send, s2c_recv = anyio.create_memory_object_stream(100)
|
||||
|
||||
from pydantic import TypeAdapter
|
||||
from mcp.types import JSONRPCMessage
|
||||
adapter = TypeAdapter(JSONRPCMessage)
|
||||
|
||||
init_done = anyio.Event()
|
||||
|
||||
async def srv_to_ws():
|
||||
first = True
|
||||
try:
|
||||
async for msg in s2c_recv:
|
||||
await ws.send_json(msg.model_dump())
|
||||
if first:
|
||||
init_done.set()
|
||||
first = False
|
||||
finally:
|
||||
# make sure cleanup survives TaskGroup cancellation
|
||||
with anyio.CancelScope(shield=True):
|
||||
with suppress(RuntimeError): # idempotent close
|
||||
await ws.close()
|
||||
|
||||
async def ws_to_srv():
|
||||
try:
|
||||
# 1st frame is always "initialize"
|
||||
first = adapter.validate_python(await ws.receive_json())
|
||||
await c2s_send.send(first)
|
||||
await init_done.wait() # block until server ready
|
||||
while True:
|
||||
data = await ws.receive_json()
|
||||
await c2s_send.send(adapter.validate_python(data))
|
||||
except WebSocketDisconnect:
|
||||
await c2s_send.aclose()
|
||||
|
||||
async with anyio.create_task_group() as tg:
|
||||
tg.start_soon(mcp.run, c2s_recv, s2c_send, init_opts)
|
||||
tg.start_soon(ws_to_srv)
|
||||
tg.start_soon(srv_to_ws)
|
||||
|
||||
# ── SSE transport (official) ─────────────────────────────
|
||||
sse = SseServerTransport(f"{base}/messages/")
|
||||
|
||||
@app.get(f"{base}/sse")
|
||||
async def _mcp_sse(request: Request):
|
||||
async with sse.connect_sse(
|
||||
request.scope, request.receive, request._send # starlette ASGI primitives
|
||||
) as (read_stream, write_stream):
|
||||
await mcp.run(read_stream, write_stream, init_opts)
|
||||
|
||||
# client → server frames are POSTed here
|
||||
app.mount(f"{base}/messages", app=sse.handle_post_message)
|
||||
|
||||
# ── schema endpoint ───────────────────────────────────────
|
||||
@app.get(f"{base}/schema")
|
||||
async def _schema_endpoint():
|
||||
return JSONResponse({
|
||||
"tools": [x.model_dump() for x in await _list_tools()],
|
||||
"resources": [x.model_dump() for x in await _list_resources()],
|
||||
"resource_templates": [x.model_dump() for x in await _list_templates()],
|
||||
})
|
||||
|
||||
|
||||
# ── helpers ────────────────────────────────────────────────────
|
||||
def _route_name(path: str) -> str:
|
||||
return re.sub(r"[/{}}]", "_", path).strip("_")
|
||||
|
||||
def _path_params(app: FastAPI, fn: Callable) -> List[str]:
|
||||
for r in app.routes:
|
||||
if r.endpoint is fn:
|
||||
return list(r.param_convertors.keys())
|
||||
return []
|
||||
16
deploy/docker/requirements.txt
Normal file
16
deploy/docker/requirements.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
fastapi>=0.115.12
|
||||
uvicorn>=0.34.2
|
||||
gunicorn>=23.0.0
|
||||
slowapi==0.1.9
|
||||
prometheus-fastapi-instrumentator>=7.1.0
|
||||
redis>=5.2.1
|
||||
jwt>=1.3.1
|
||||
dnspython>=2.7.0
|
||||
email-validator==2.2.0
|
||||
sse-starlette==2.2.1
|
||||
pydantic>=2.11
|
||||
rank-bm25==0.2.2
|
||||
anyio==4.9.0
|
||||
PyJWT==2.10.1
|
||||
mcp>=1.6.0
|
||||
websockets>=15.0.1
|
||||
648
deploy/docker/server.py
Normal file
648
deploy/docker/server.py
Normal file
@@ -0,0 +1,648 @@
|
||||
# ───────────────────────── server.py ─────────────────────────
|
||||
"""
|
||||
Crawl4AI FastAPI entry‑point
|
||||
• Browser pool + global page cap
|
||||
• Rate‑limiting, security, metrics
|
||||
• /crawl, /crawl/stream, /md, /llm endpoints
|
||||
"""
|
||||
|
||||
# ── stdlib & 3rd‑party imports ───────────────────────────────
|
||||
from crawler_pool import get_crawler, close_all, janitor
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from auth import create_access_token, get_token_dependency, TokenRequest
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, List, Dict
|
||||
from fastapi import Request, Depends
|
||||
from fastapi.responses import FileResponse
|
||||
import base64
|
||||
import re
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from api import (
|
||||
handle_markdown_request, handle_llm_qa,
|
||||
handle_stream_crawl_request, handle_crawl_request,
|
||||
stream_results
|
||||
)
|
||||
from utils import (
|
||||
FilterType, load_config, setup_logging, verify_email_domain
|
||||
)
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import asyncio
|
||||
from typing import List
|
||||
from contextlib import asynccontextmanager
|
||||
import pathlib
|
||||
|
||||
from fastapi import (
|
||||
FastAPI, HTTPException, Request, Path, Query, Depends
|
||||
)
|
||||
from rank_bm25 import BM25Okapi
|
||||
|
||||
def chunk_code_functions(code: str) -> List[str]:
|
||||
tree = ast.parse(code)
|
||||
lines = code.splitlines()
|
||||
chunks = []
|
||||
for node in tree.body:
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
start = node.lineno - 1
|
||||
end = getattr(node, 'end_lineno', start + 1)
|
||||
chunks.append("\n".join(lines[start:end]))
|
||||
return chunks
|
||||
from fastapi.responses import (
|
||||
StreamingResponse, RedirectResponse, PlainTextResponse, JSONResponse
|
||||
)
|
||||
from fastapi.middleware.httpsredirect import HTTPSRedirectMiddleware
|
||||
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
|
||||
from mcp_bridge import attach_mcp, mcp_resource, mcp_template, mcp_tool
|
||||
|
||||
import ast
|
||||
import crawl4ai as _c4
|
||||
from pydantic import BaseModel, Field
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from prometheus_fastapi_instrumentator import Instrumentator
|
||||
from redis import asyncio as aioredis
|
||||
|
||||
# ── internal imports (after sys.path append) ─────────────────
|
||||
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
# ────────────────── configuration / logging ──────────────────
|
||||
config = load_config()
|
||||
setup_logging(config)
|
||||
|
||||
__version__ = "0.5.1-d1"
|
||||
|
||||
# ── global page semaphore (hard cap) ─────────────────────────
|
||||
MAX_PAGES = config["crawler"]["pool"].get("max_pages", 30)
|
||||
GLOBAL_SEM = asyncio.Semaphore(MAX_PAGES)
|
||||
|
||||
# import logging
|
||||
# page_log = logging.getLogger("page_cap")
|
||||
# orig_arun = AsyncWebCrawler.arun
|
||||
# async def capped_arun(self, *a, **kw):
|
||||
# await GLOBAL_SEM.acquire() # ← take slot
|
||||
# try:
|
||||
# in_flight = MAX_PAGES - GLOBAL_SEM._value # used permits
|
||||
# page_log.info("🕸️ pages_in_flight=%s / %s", in_flight, MAX_PAGES)
|
||||
# return await orig_arun(self, *a, **kw)
|
||||
# finally:
|
||||
# GLOBAL_SEM.release() # ← free slot
|
||||
|
||||
orig_arun = AsyncWebCrawler.arun
|
||||
|
||||
|
||||
async def capped_arun(self, *a, **kw):
|
||||
async with GLOBAL_SEM:
|
||||
return await orig_arun(self, *a, **kw)
|
||||
AsyncWebCrawler.arun = capped_arun
|
||||
|
||||
# ───────────────────── FastAPI lifespan ──────────────────────
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(_: FastAPI):
|
||||
await get_crawler(BrowserConfig(
|
||||
extra_args=config["crawler"]["browser"].get("extra_args", []),
|
||||
**config["crawler"]["browser"].get("kwargs", {}),
|
||||
)) # warm‑up
|
||||
app.state.janitor = asyncio.create_task(janitor()) # idle GC
|
||||
yield
|
||||
app.state.janitor.cancel()
|
||||
await close_all()
|
||||
|
||||
# ───────────────────── FastAPI instance ──────────────────────
|
||||
app = FastAPI(
|
||||
title=config["app"]["title"],
|
||||
version=config["app"]["version"],
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
# ── static playground ──────────────────────────────────────
|
||||
STATIC_DIR = pathlib.Path(__file__).parent / "static" / "playground"
|
||||
if not STATIC_DIR.exists():
|
||||
raise RuntimeError(f"Playground assets not found at {STATIC_DIR}")
|
||||
app.mount(
|
||||
"/playground",
|
||||
StaticFiles(directory=STATIC_DIR, html=True),
|
||||
name="play",
|
||||
)
|
||||
|
||||
# Optional nice‑to‑have: opening the root shows the playground
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return RedirectResponse("/playground")
|
||||
|
||||
# ─────────────────── infra / middleware ─────────────────────
|
||||
redis = aioredis.from_url(config["redis"].get("uri", "redis://localhost"))
|
||||
|
||||
limiter = Limiter(
|
||||
key_func=get_remote_address,
|
||||
default_limits=[config["rate_limiting"]["default_limit"]],
|
||||
storage_uri=config["rate_limiting"]["storage_uri"],
|
||||
)
|
||||
|
||||
|
||||
def _setup_security(app_: FastAPI):
|
||||
sec = config["security"]
|
||||
if not sec["enabled"]:
|
||||
return
|
||||
if sec.get("https_redirect"):
|
||||
app_.add_middleware(HTTPSRedirectMiddleware)
|
||||
if sec.get("trusted_hosts", []) != ["*"]:
|
||||
app_.add_middleware(
|
||||
TrustedHostMiddleware, allowed_hosts=sec["trusted_hosts"]
|
||||
)
|
||||
|
||||
|
||||
_setup_security(app)
|
||||
|
||||
if config["observability"]["prometheus"]["enabled"]:
|
||||
Instrumentator().instrument(app).expose(app)
|
||||
|
||||
token_dep = get_token_dependency(config)
|
||||
|
||||
|
||||
@app.middleware("http")
|
||||
async def add_security_headers(request: Request, call_next):
|
||||
resp = await call_next(request)
|
||||
if config["security"]["enabled"]:
|
||||
resp.headers.update(config["security"]["headers"])
|
||||
return resp
|
||||
|
||||
# ───────────────── safe config‑dump helper ─────────────────
|
||||
ALLOWED_TYPES = {
|
||||
"CrawlerRunConfig": CrawlerRunConfig,
|
||||
"BrowserConfig": BrowserConfig,
|
||||
}
|
||||
|
||||
|
||||
def _safe_eval_config(expr: str) -> dict:
|
||||
"""
|
||||
Accept exactly one top‑level call to CrawlerRunConfig(...) or BrowserConfig(...).
|
||||
Whatever is inside the parentheses is fine *except* further function calls
|
||||
(so no __import__('os') stuff). All public names from crawl4ai are available
|
||||
when we eval.
|
||||
"""
|
||||
tree = ast.parse(expr, mode="eval")
|
||||
|
||||
# must be a single call
|
||||
if not isinstance(tree.body, ast.Call):
|
||||
raise ValueError("Expression must be a single constructor call")
|
||||
|
||||
call = tree.body
|
||||
if not (isinstance(call.func, ast.Name) and call.func.id in {"CrawlerRunConfig", "BrowserConfig"}):
|
||||
raise ValueError(
|
||||
"Only CrawlerRunConfig(...) or BrowserConfig(...) are allowed")
|
||||
|
||||
# forbid nested calls to keep the surface tiny
|
||||
for node in ast.walk(call):
|
||||
if isinstance(node, ast.Call) and node is not call:
|
||||
raise ValueError("Nested function calls are not permitted")
|
||||
|
||||
# expose everything that crawl4ai exports, nothing else
|
||||
safe_env = {name: getattr(_c4, name)
|
||||
for name in dir(_c4) if not name.startswith("_")}
|
||||
obj = eval(compile(tree, "<config>", "eval"),
|
||||
{"__builtins__": {}}, safe_env)
|
||||
return obj.dump()
|
||||
|
||||
|
||||
# ───────────────────────── Schemas ───────────────────────────
|
||||
class CrawlRequest(BaseModel):
|
||||
urls: List[str] = Field(min_length=1, max_length=100)
|
||||
browser_config: Optional[Dict] = Field(default_factory=dict)
|
||||
crawler_config: Optional[Dict] = Field(default_factory=dict)
|
||||
|
||||
# ────────────── Schemas ──────────────
|
||||
class MarkdownRequest(BaseModel):
|
||||
"""Request body for the /md endpoint."""
|
||||
url: str = Field(..., description="Absolute http/https URL to fetch")
|
||||
f: FilterType = Field(FilterType.FIT,
|
||||
description="Content‑filter strategy: FIT, RAW, BM25, or LLM")
|
||||
q: Optional[str] = Field(None, description="Query string used by BM25/LLM filters")
|
||||
c: Optional[str] = Field("0", description="Cache‑bust / revision counter")
|
||||
|
||||
|
||||
class RawCode(BaseModel):
|
||||
code: str
|
||||
|
||||
class HTMLRequest(BaseModel):
|
||||
url: str
|
||||
|
||||
class ScreenshotRequest(BaseModel):
|
||||
url: str
|
||||
screenshot_wait_for: Optional[float] = 2
|
||||
output_path: Optional[str] = None
|
||||
|
||||
class PDFRequest(BaseModel):
|
||||
url: str
|
||||
output_path: Optional[str] = None
|
||||
|
||||
|
||||
class JSEndpointRequest(BaseModel):
|
||||
url: str
|
||||
scripts: List[str] = Field(
|
||||
...,
|
||||
description="List of separated JavaScript snippets to execute"
|
||||
)
|
||||
|
||||
# ──────────────────────── Endpoints ──────────────────────────
|
||||
|
||||
|
||||
@app.post("/token")
|
||||
async def get_token(req: TokenRequest):
|
||||
if not verify_email_domain(req.email):
|
||||
raise HTTPException(400, "Invalid email domain")
|
||||
token = create_access_token({"sub": req.email})
|
||||
return {"email": req.email, "access_token": token, "token_type": "bearer"}
|
||||
|
||||
|
||||
@app.post("/config/dump")
|
||||
async def config_dump(raw: RawCode):
|
||||
try:
|
||||
return JSONResponse(_safe_eval_config(raw.code.strip()))
|
||||
except Exception as e:
|
||||
raise HTTPException(400, str(e))
|
||||
|
||||
|
||||
@app.post("/md")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("md")
|
||||
async def get_markdown(
|
||||
request: Request,
|
||||
body: MarkdownRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
if not body.url.startswith(("http://", "https://")):
|
||||
raise HTTPException(400, "URL must be absolute and start with http/https")
|
||||
markdown = await handle_markdown_request(
|
||||
body.url, body.f, body.q, body.c, config
|
||||
)
|
||||
return JSONResponse({
|
||||
"url": body.url,
|
||||
"filter": body.f,
|
||||
"query": body.q,
|
||||
"cache": body.c,
|
||||
"markdown": markdown,
|
||||
"success": True
|
||||
})
|
||||
|
||||
|
||||
@app.post("/html")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("html")
|
||||
async def generate_html(
|
||||
request: Request,
|
||||
body: HTMLRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Crawls the URL, preprocesses the raw HTML for schema extraction, and returns the processed HTML.
|
||||
Use when you need sanitized HTML structures for building schemas or further processing.
|
||||
"""
|
||||
cfg = CrawlerRunConfig()
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
raw_html = results[0].html
|
||||
from crawl4ai.utils import preprocess_html_for_schema
|
||||
processed_html = preprocess_html_for_schema(raw_html)
|
||||
return JSONResponse({"html": processed_html, "url": body.url, "success": True})
|
||||
|
||||
# Screenshot endpoint
|
||||
|
||||
@app.post("/screenshot")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("screenshot")
|
||||
async def generate_screenshot(
|
||||
request: Request,
|
||||
body: ScreenshotRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Capture a full-page PNG screenshot of the specified URL, waiting an optional delay before capture,
|
||||
Use when you need an image snapshot of the rendered page. Its recommened to provide an output path to save the screenshot.
|
||||
Then in result instead of the screenshot you will get a path to the saved file.
|
||||
"""
|
||||
cfg = CrawlerRunConfig(screenshot=True, screenshot_wait_for=body.screenshot_wait_for)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
screenshot_data = results[0].screenshot
|
||||
if body.output_path:
|
||||
abs_path = os.path.abspath(body.output_path)
|
||||
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
|
||||
with open(abs_path, "wb") as f:
|
||||
f.write(base64.b64decode(screenshot_data))
|
||||
return {"success": True, "path": abs_path}
|
||||
return {"success": True, "screenshot": screenshot_data}
|
||||
|
||||
# PDF endpoint
|
||||
|
||||
@app.post("/pdf")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("pdf")
|
||||
async def generate_pdf(
|
||||
request: Request,
|
||||
body: PDFRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Generate a PDF document of the specified URL,
|
||||
Use when you need a printable or archivable snapshot of the page. It is recommended to provide an output path to save the PDF.
|
||||
Then in result instead of the PDF you will get a path to the saved file.
|
||||
"""
|
||||
cfg = CrawlerRunConfig(pdf=True)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
pdf_data = results[0].pdf
|
||||
if body.output_path:
|
||||
abs_path = os.path.abspath(body.output_path)
|
||||
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
|
||||
with open(abs_path, "wb") as f:
|
||||
f.write(pdf_data)
|
||||
return {"success": True, "path": abs_path}
|
||||
return {"success": True, "pdf": base64.b64encode(pdf_data).decode()}
|
||||
|
||||
|
||||
@app.post("/execute_js")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("execute_js")
|
||||
async def execute_js(
|
||||
request: Request,
|
||||
body: JSEndpointRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Execute a sequence of JavaScript snippets on the specified URL.
|
||||
Return the full CrawlResult JSON (first result).
|
||||
Use this when you need to interact with dynamic pages using JS.
|
||||
REMEMBER: Scripts accept a list of separated JS snippets to execute and execute them in order.
|
||||
IMPORTANT: Each script should be an expression that returns a value. It can be an IIFE or an async function. You can think of it as such.
|
||||
Your script will replace '{script}' and execute in the browser context. So provide either an IIFE or a sync/async function that returns a value.
|
||||
Return Format:
|
||||
- The return result is an instance of CrawlResult, so you have access to markdown, links, and other stuff. If this is enough, you don't need to call again for other endpoints.
|
||||
|
||||
```python
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
success: bool
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
mhtml: Optional[str] = None
|
||||
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
error_message: Optional[str] = None
|
||||
session_id: Optional[str] = None
|
||||
response_headers: Optional[dict] = None
|
||||
status_code: Optional[int] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
references_markdown: str
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
```
|
||||
|
||||
"""
|
||||
cfg = CrawlerRunConfig(js_code=body.scripts)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
# Return JSON-serializable dict of the first CrawlResult
|
||||
data = results[0].model_dump()
|
||||
return JSONResponse(data)
|
||||
|
||||
|
||||
@app.get("/llm/{url:path}")
|
||||
async def llm_endpoint(
|
||||
request: Request,
|
||||
url: str = Path(...),
|
||||
q: str = Query(...),
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
if not q:
|
||||
raise HTTPException(400, "Query parameter 'q' is required")
|
||||
if not url.startswith(("http://", "https://")):
|
||||
url = "https://" + url
|
||||
answer = await handle_llm_qa(url, q, config)
|
||||
return JSONResponse({"answer": answer})
|
||||
|
||||
|
||||
@app.get("/schema")
|
||||
async def get_schema():
|
||||
from crawl4ai import BrowserConfig, CrawlerRunConfig
|
||||
return {"browser": BrowserConfig().dump(),
|
||||
"crawler": CrawlerRunConfig().dump()}
|
||||
|
||||
|
||||
@app.get(config["observability"]["health_check"]["endpoint"])
|
||||
async def health():
|
||||
return {"status": "ok", "timestamp": time.time(), "version": __version__}
|
||||
|
||||
|
||||
@app.get(config["observability"]["prometheus"]["endpoint"])
|
||||
async def metrics():
|
||||
return RedirectResponse(config["observability"]["prometheus"]["endpoint"])
|
||||
|
||||
|
||||
@app.post("/crawl")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("crawl")
|
||||
async def crawl(
|
||||
request: Request,
|
||||
crawl_request: CrawlRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Crawl a list of URLs and return the results as JSON.
|
||||
"""
|
||||
if not crawl_request.urls:
|
||||
raise HTTPException(400, "At least one URL required")
|
||||
res = await handle_crawl_request(
|
||||
urls=crawl_request.urls,
|
||||
browser_config=crawl_request.browser_config,
|
||||
crawler_config=crawl_request.crawler_config,
|
||||
config=config,
|
||||
)
|
||||
return JSONResponse(res)
|
||||
|
||||
|
||||
@app.post("/crawl/stream")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
async def crawl_stream(
|
||||
request: Request,
|
||||
crawl_request: CrawlRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
if not crawl_request.urls:
|
||||
raise HTTPException(400, "At least one URL required")
|
||||
crawler, gen = await handle_stream_crawl_request(
|
||||
urls=crawl_request.urls,
|
||||
browser_config=crawl_request.browser_config,
|
||||
crawler_config=crawl_request.crawler_config,
|
||||
config=config,
|
||||
)
|
||||
return StreamingResponse(
|
||||
stream_results(crawler, gen),
|
||||
media_type="application/x-ndjson",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Stream-Status": "active",
|
||||
},
|
||||
)
|
||||
|
||||
def chunk_code_functions(code_md: str) -> List[str]:
|
||||
"""Extract each function/class from markdown code blocks per file."""
|
||||
pattern = re.compile(
|
||||
# match "## File: <path>" then a ```py fence, then capture until the closing ```
|
||||
r'##\s*File:\s*(?P<path>.+?)\s*?\r?\n' # file header
|
||||
r'```py\s*?\r?\n' # opening fence
|
||||
r'(?P<code>.*?)(?=\r?\n```)', # code block
|
||||
re.DOTALL
|
||||
)
|
||||
chunks: List[str] = []
|
||||
for m in pattern.finditer(code_md):
|
||||
file_path = m.group("path").strip()
|
||||
code_blk = m.group("code")
|
||||
tree = ast.parse(code_blk)
|
||||
lines = code_blk.splitlines()
|
||||
for node in tree.body:
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
start = node.lineno - 1
|
||||
end = getattr(node, "end_lineno", start + 1)
|
||||
snippet = "\n".join(lines[start:end])
|
||||
chunks.append(f"# File: {file_path}\n{snippet}")
|
||||
return chunks
|
||||
|
||||
def chunk_doc_sections(doc: str) -> List[str]:
|
||||
lines = doc.splitlines(keepends=True)
|
||||
sections = []
|
||||
current: List[str] = []
|
||||
for line in lines:
|
||||
if re.match(r"^#{1,6}\s", line):
|
||||
if current:
|
||||
sections.append("".join(current))
|
||||
current = [line]
|
||||
else:
|
||||
current.append(line)
|
||||
if current:
|
||||
sections.append("".join(current))
|
||||
return sections
|
||||
|
||||
@app.get("/ask")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("ask")
|
||||
async def get_context(
|
||||
request: Request,
|
||||
_td: Dict = Depends(token_dep),
|
||||
context_type: str = Query("all", regex="^(code|doc|all)$"),
|
||||
query: Optional[str] = Query(None, description="search query to filter chunks"),
|
||||
score_ratio: float = Query(0.5, ge=0.0, le=1.0, description="min score as fraction of max_score"),
|
||||
max_results: int = Query(20, ge=1, description="absolute cap on returned chunks"),
|
||||
):
|
||||
"""
|
||||
This end point is design for any questions about Crawl4ai library. It returns a plain text markdown with extensive information about Crawl4ai.
|
||||
You can use this as a context for any AI assistant. Use this endpoint for AI assistants to retrieve library context for decision making or code generation tasks.
|
||||
Alway is BEST practice you provide a query to filter the context. Otherwise the lenght of the response will be very long.
|
||||
|
||||
Parameters:
|
||||
- context_type: Specify "code" for code context, "doc" for documentation context, or "all" for both.
|
||||
- query: RECOMMENDED search query to filter paragraphs using BM25. You can leave this empty to get all the context.
|
||||
- score_ratio: Minimum score as a fraction of the maximum score for filtering results.
|
||||
- max_results: Maximum number of results to return. Default is 20.
|
||||
|
||||
Returns:
|
||||
- JSON response with the requested context.
|
||||
- If "code" is specified, returns the code context.
|
||||
- If "doc" is specified, returns the documentation context.
|
||||
- If "all" is specified, returns both code and documentation contexts.
|
||||
"""
|
||||
# load contexts
|
||||
base = os.path.dirname(__file__)
|
||||
code_path = os.path.join(base, "c4ai-code-context.md")
|
||||
doc_path = os.path.join(base, "c4ai-doc-context.md")
|
||||
if not os.path.exists(code_path) or not os.path.exists(doc_path):
|
||||
raise HTTPException(404, "Context files not found")
|
||||
|
||||
with open(code_path, "r") as f:
|
||||
code_content = f.read()
|
||||
with open(doc_path, "r") as f:
|
||||
doc_content = f.read()
|
||||
|
||||
# if no query, just return raw contexts
|
||||
if not query:
|
||||
if context_type == "code":
|
||||
return JSONResponse({"code_context": code_content})
|
||||
if context_type == "doc":
|
||||
return JSONResponse({"doc_context": doc_content})
|
||||
return JSONResponse({
|
||||
"code_context": code_content,
|
||||
"doc_context": doc_content,
|
||||
})
|
||||
|
||||
tokens = query.split()
|
||||
results: Dict[str, List[Dict[str, float]]] = {}
|
||||
|
||||
# code BM25 over functions/classes
|
||||
if context_type in ("code", "all"):
|
||||
code_chunks = chunk_code_functions(code_content)
|
||||
bm25 = BM25Okapi([c.split() for c in code_chunks])
|
||||
scores = bm25.get_scores(tokens)
|
||||
max_sc = float(scores.max()) if scores.size > 0 else 0.0
|
||||
cutoff = max_sc * score_ratio
|
||||
picked = [(c, s) for c, s in zip(code_chunks, scores) if s >= cutoff]
|
||||
picked = sorted(picked, key=lambda x: x[1], reverse=True)[:max_results]
|
||||
results["code_results"] = [{"text": c, "score": s} for c, s in picked]
|
||||
|
||||
# doc BM25 over markdown sections
|
||||
if context_type in ("doc", "all"):
|
||||
sections = chunk_doc_sections(doc_content)
|
||||
bm25d = BM25Okapi([sec.split() for sec in sections])
|
||||
scores_d = bm25d.get_scores(tokens)
|
||||
max_sd = float(scores_d.max()) if scores_d.size > 0 else 0.0
|
||||
cutoff_d = max_sd * score_ratio
|
||||
idxs = [i for i, s in enumerate(scores_d) if s >= cutoff_d]
|
||||
neighbors = set(i for idx in idxs for i in (idx-1, idx, idx+1))
|
||||
valid = [i for i in sorted(neighbors) if 0 <= i < len(sections)]
|
||||
valid = valid[:max_results]
|
||||
results["doc_results"] = [
|
||||
{"text": sections[i], "score": scores_d[i]} for i in valid
|
||||
]
|
||||
|
||||
return JSONResponse(results)
|
||||
|
||||
|
||||
# attach MCP layer (adds /mcp/ws, /mcp/sse, /mcp/schema)
|
||||
print(f"MCP server running on {config['app']['host']}:{config['app']['port']}")
|
||||
attach_mcp(
|
||||
app,
|
||||
base_url=f"http://{config['app']['host']}:{config['app']['port']}"
|
||||
)
|
||||
|
||||
# ────────────────────────── cli ──────────────────────────────
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(
|
||||
"server:app",
|
||||
host=config["app"]["host"],
|
||||
port=config["app"]["port"],
|
||||
reload=config["app"]["reload"],
|
||||
timeout_keep_alive=config["app"]["timeout_keep_alive"],
|
||||
)
|
||||
# ─────────────────────────────────────────────────────────────
|
||||
817
deploy/docker/static/playground/index.html
Normal file
817
deploy/docker/static/playground/index.html
Normal file
@@ -0,0 +1,817 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Crawl4AI Playground</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<script>
|
||||
tailwind.config = {
|
||||
theme: {
|
||||
extend: {
|
||||
colors: {
|
||||
primary: '#4EFFFF',
|
||||
primarydim: '#09b5a5',
|
||||
accent: '#F380F5',
|
||||
dark: '#070708',
|
||||
light: '#E8E9ED',
|
||||
secondary: '#D5CEBF',
|
||||
codebg: '#1E1E1E',
|
||||
surface: '#202020',
|
||||
border: '#3F3F44',
|
||||
},
|
||||
fontFamily: {
|
||||
mono: ['Fira Code', 'monospace'],
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Fira+Code:wght@400;500&display=swap" rel="stylesheet">
|
||||
<!-- Highlight.js -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/github-dark.min.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/highlight.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.11/clipboard.min.js"></script>
|
||||
<!-- CodeMirror (python mode) -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/codemirror.min.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/codemirror.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/mode/python/python.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/addon/edit/matchbrackets.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/addon/selection/active-line.min.js"></script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/theme/darcula.min.css">
|
||||
<!-- <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/python.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/bash.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/json.min.js"></script> -->
|
||||
<style>
|
||||
/* Custom CodeMirror styling to match theme */
|
||||
.CodeMirror {
|
||||
background-color: #1E1E1E !important;
|
||||
color: #E8E9ED !important;
|
||||
border-radius: 4px;
|
||||
font-family: 'Fira Code', monospace;
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
||||
.CodeMirror-gutters {
|
||||
background-color: #1E1E1E !important;
|
||||
border-right: 1px solid #3F3F44 !important;
|
||||
}
|
||||
|
||||
.CodeMirror-linenumber {
|
||||
color: #3F3F44 !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-keyword {
|
||||
color: #4EFFFF !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-string {
|
||||
color: #F380F5 !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-number {
|
||||
color: #D5CEBF !important;
|
||||
}
|
||||
|
||||
/* Add to your <style> section or Tailwind config */
|
||||
.hljs {
|
||||
background: #1E1E1E !important;
|
||||
border-radius: 4px;
|
||||
padding: 1rem !important;
|
||||
}
|
||||
|
||||
pre code.hljs {
|
||||
display: block;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
/* Language-specific colors */
|
||||
.hljs-attr {
|
||||
color: #4EFFFF;
|
||||
}
|
||||
|
||||
/* JSON keys */
|
||||
.hljs-string {
|
||||
color: #F380F5;
|
||||
}
|
||||
|
||||
/* Strings */
|
||||
.hljs-number {
|
||||
color: #D5CEBF;
|
||||
}
|
||||
|
||||
/* Numbers */
|
||||
.hljs-keyword {
|
||||
color: #4EFFFF;
|
||||
}
|
||||
|
||||
pre code {
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.copy-btn {
|
||||
transition: all 0.2s ease;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.copy-btn:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.tab-content:hover .copy-btn {
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.tab-content:hover .copy-btn:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* copid text highlighted */
|
||||
.highlighted {
|
||||
background-color: rgba(78, 255, 255, 0.2) !important;
|
||||
transition: background-color 0.5s ease;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body class="bg-dark text-light font-mono min-h-screen flex flex-col" style="font-feature-settings: 'calt' 0;">
|
||||
<!-- Header -->
|
||||
<header class="border-b border-border px-4 py-2 flex items-center">
|
||||
<h1 class="text-lg font-medium flex items-center space-x-4">
|
||||
<span>🚀🤖 <span class="text-primary">Crawl4AI</span> Playground</span>
|
||||
|
||||
<!-- GitHub badges -->
|
||||
<a href="https://github.com/unclecode/crawl4ai" target="_blank" class="flex space-x-1">
|
||||
<img src="https://img.shields.io/github/stars/unclecode/crawl4ai?style=social"
|
||||
alt="GitHub stars" class="h-5">
|
||||
<img src="https://img.shields.io/github/forks/unclecode/crawl4ai?style=social"
|
||||
alt="GitHub forks" class="h-5">
|
||||
</a>
|
||||
|
||||
<!-- Docs -->
|
||||
<a href="https://docs.crawl4ai.com" target="_blank"
|
||||
class="text-xs text-secondary hover:text-primary underline flex items-center">
|
||||
Docs
|
||||
</a>
|
||||
|
||||
<!-- X (Twitter) follow -->
|
||||
<a href="https://x.com/unclecode" target="_blank"
|
||||
class="hover:text-primary flex items-center" title="Follow @unclecode on X">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"
|
||||
class="w-4 h-4 fill-current mr-1">
|
||||
<path d="M22.46 6c-.77.35-1.6.58-2.46.69a4.27 4.27 0 001.88-2.35 8.53 8.53 0 01-2.71 1.04 4.24 4.24 0 00-7.23 3.87A12.05 12.05 0 013 4.62a4.24 4.24 0 001.31 5.65 4.2 4.2 0 01-1.92-.53v.05a4.24 4.24 0 003.4 4.16 4.31 4.31 0 01-1.91.07 4.25 4.25 0 003.96 2.95A8.5 8.5 0 012 19.55a12.04 12.04 0 006.53 1.92c7.84 0 12.13-6.49 12.13-12.13 0-.18-.01-.36-.02-.54A8.63 8.63 0 0024 5.1a8.45 8.45 0 01-2.54.7z"/>
|
||||
</svg>
|
||||
<span class="text-xs">@unclecode</span>
|
||||
</a>
|
||||
</h1>
|
||||
|
||||
<div class="ml-auto flex space-x-2">
|
||||
<button id="play-tab"
|
||||
class="px-3 py-1 rounded-t bg-surface border border-b-0 border-border text-primary">Playground</button>
|
||||
<button id="stress-tab" class="px-3 py-1 rounded-t border border-border hover:bg-surface">Stress
|
||||
Test</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Main Playground -->
|
||||
<main id="playground" class="flex-1 flex flex-col p-4 space-y-4 max-w-5xl w-full mx-auto">
|
||||
<!-- Request Builder -->
|
||||
<section class="bg-surface rounded-lg border border-border overflow-hidden">
|
||||
<div class="px-4 py-2 border-b border-border flex items-center">
|
||||
<h2 class="font-medium">Request Builder</h2>
|
||||
<select id="endpoint" class="ml-auto bg-dark border border-border rounded px-2 py-1 text-sm">
|
||||
<option value="crawl">/crawl (batch)</option>
|
||||
<option value="crawl_stream">/crawl/stream</option>
|
||||
<option value="md">/md</option>
|
||||
<option value="llm">/llm</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="p-4">
|
||||
<label class="block mb-2 text-sm">URL(s) - one per line</label>
|
||||
<textarea id="urls" class="w-full bg-dark border border-border rounded p-2 h-32 text-sm mb-4"
|
||||
spellcheck="false">https://example.com</textarea>
|
||||
|
||||
<details class="mb-4">
|
||||
<summary class="text-sm text-secondary cursor-pointer">Advanced Config <span
|
||||
class="text-xs text-primary">(Python → auto‑JSON)</span></summary>
|
||||
|
||||
<!-- Toolbar -->
|
||||
<div class="flex items-center justify-end space-x-3 mt-2">
|
||||
<label for="cfg-type" class="text-xs text-secondary">Type:</label>
|
||||
<select id="cfg-type"
|
||||
class="bg-dark border border-border rounded px-1 py-0.5 text-xs">
|
||||
<option value="CrawlerRunConfig">CrawlerRunConfig</option>
|
||||
<option value="BrowserConfig">BrowserConfig</option>
|
||||
</select>
|
||||
|
||||
<!-- help link -->
|
||||
<a href="https://docs.crawl4ai.com/api/parameters/"
|
||||
target="_blank"
|
||||
class="text-xs text-primary hover:underline flex items-center space-x-1"
|
||||
title="Open parameter reference in new tab">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"
|
||||
class="w-4 h-4 fill-current">
|
||||
<path d="M13 3h8v8h-2V6.41l-9.29 9.3-1.42-1.42 9.3-9.29H13V3z"/>
|
||||
<path d="M5 5h4V3H3v6h2V5zm0 14v-4H3v6h6v-2H5z"/>
|
||||
</svg>
|
||||
<span>Docs</span>
|
||||
</a>
|
||||
|
||||
<span id="cfg-status" class="text-xs text-secondary ml-2"></span>
|
||||
</div>
|
||||
|
||||
<!-- CodeMirror host -->
|
||||
<div id="adv-editor" class="mt-2 border border-border rounded overflow-hidden h-40"></div>
|
||||
</details>
|
||||
|
||||
<div class="flex space-x-2">
|
||||
<button id="run-btn" class="bg-primary text-dark px-4 py-2 rounded hover:bg-primarydim font-medium">
|
||||
Run (⌘/Ctrl+Enter)
|
||||
</button>
|
||||
<button id="export-btn" class="border border-border px-4 py-2 rounded hover:bg-surface hidden">
|
||||
Export Python Code
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Execution Status -->
|
||||
<section id="execution-status" class="hidden bg-surface rounded-lg border border-border p-3 text-sm">
|
||||
<div class="flex space-x-4">
|
||||
<div id="status-badge" class="flex items-center">
|
||||
<span class="w-3 h-3 rounded-full mr-2"></span>
|
||||
<span>Ready</span>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-secondary">Time:</span>
|
||||
<span id="exec-time" class="text-light">-</span>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-secondary">Memory:</span>
|
||||
<span id="exec-mem" class="text-light">-</span>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Response Viewer -->
|
||||
<!-- Update the Response Viewer section -->
|
||||
<section class="bg-surface rounded-lg border border-border overflow-hidden flex-1 flex flex-col">
|
||||
<div class="border-b border-border flex">
|
||||
<button data-tab="response" class="tab-btn active px-4 py-2 border-r border-border">Response</button>
|
||||
<button data-tab="python" class="tab-btn px-4 py-2 border-r border-border">Python</button>
|
||||
<button data-tab="curl" class="tab-btn px-4 py-2">cURL</button>
|
||||
</div>
|
||||
<div class="flex-1 overflow-auto relative">
|
||||
<!-- Response Tab -->
|
||||
<div class="tab-content active h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#response-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="response-content" class="p-4 text-sm h-full"><code class="json hljs">{}</code></pre>
|
||||
</div>
|
||||
|
||||
<!-- Python Tab -->
|
||||
<div class="tab-content hidden h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#python-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="python-content" class="p-4 text-sm h-full"><code class="python hljs"></code></pre>
|
||||
</div>
|
||||
|
||||
<!-- cURL Tab -->
|
||||
<div class="tab-content hidden h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#curl-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="curl-content" class="p-4 text-sm h-full"><code class="bash hljs"></code></pre>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<!-- Stress Test Modal -->
|
||||
<div id="stress-modal"
|
||||
class="hidden fixed inset-0 bg-black bg-opacity-70 z-50 flex items-center justify-center p-4">
|
||||
<div class="bg-surface rounded-lg border border-accent w-full max-w-3xl max-h-[90vh] flex flex-col">
|
||||
<div class="px-4 py-2 border-b border-border flex items-center">
|
||||
<h2 class="font-medium text-accent">🔥 Stress Test</h2>
|
||||
<button id="close-stress" class="ml-auto text-secondary hover:text-light">×</button>
|
||||
</div>
|
||||
|
||||
<div class="p-4 space-y-4 flex-1 overflow-auto">
|
||||
<div class="grid grid-cols-3 gap-4">
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Total URLs</label>
|
||||
<input id="st-total" type="number" value="20"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Chunk Size</label>
|
||||
<input id="st-chunk" type="number" value="5"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Concurrency</label>
|
||||
<input id="st-conc" type="number" value="2"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex items-center">
|
||||
<input id="st-stream" type="checkbox" class="mr-2">
|
||||
<label for="st-stream" class="text-sm">Use /crawl/stream</label>
|
||||
<button id="st-run"
|
||||
class="ml-auto bg-accent text-dark px-4 py-2 rounded hover:bg-opacity-90 font-medium">
|
||||
Run Stress Test
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div class="mt-4">
|
||||
<div class="bg-dark rounded border border-border p-3 h-64 overflow-auto text-sm whitespace-break-spaces"
|
||||
id="stress-log"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="px-4 py-2 border-t border-border text-sm text-secondary">
|
||||
<div class="flex justify-between">
|
||||
<span>Completed: <span id="stress-completed">0</span>/<span id="stress-total">0</span></span>
|
||||
<span>Avg. Time: <span id="stress-avg-time">0</span>ms</span>
|
||||
<span>Peak Memory: <span id="stress-peak-mem">0</span>MB</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Tab switching
|
||||
document.querySelectorAll('.tab-btn').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
|
||||
document.querySelectorAll('.tab-content').forEach(c => c.classList.add('hidden'));
|
||||
|
||||
btn.classList.add('active');
|
||||
const tabName = btn.dataset.tab;
|
||||
document.querySelector(`#${tabName}-content`).parentElement.classList.remove('hidden');
|
||||
|
||||
// Re-highlight content when switching tabs
|
||||
const activeCode = document.querySelector(`#${tabName}-content code`);
|
||||
if (activeCode) {
|
||||
forceHighlightElement(activeCode);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// View switching
|
||||
document.getElementById('play-tab').addEventListener('click', () => {
|
||||
document.getElementById('playground').classList.remove('hidden');
|
||||
document.getElementById('stress-modal').classList.add('hidden');
|
||||
document.getElementById('play-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('stress-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
document.getElementById('stress-tab').addEventListener('click', () => {
|
||||
document.getElementById('stress-modal').classList.remove('hidden');
|
||||
document.getElementById('stress-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('play-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
document.getElementById('close-stress').addEventListener('click', () => {
|
||||
document.getElementById('stress-modal').classList.add('hidden');
|
||||
document.getElementById('play-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('stress-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
// Initialize clipboard and highlight.js
|
||||
new ClipboardJS('#export-btn');
|
||||
hljs.highlightAll();
|
||||
|
||||
// Keyboard shortcut
|
||||
window.addEventListener('keydown', e => {
|
||||
if ((e.ctrlKey || e.metaKey) && e.key === 'Enter') {
|
||||
document.getElementById('run-btn').click();
|
||||
}
|
||||
});
|
||||
|
||||
// ================ ADVANCED CONFIG EDITOR ================
|
||||
const cm = CodeMirror(document.getElementById('adv-editor'), {
|
||||
value: `CrawlerRunConfig(
|
||||
stream=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)`,
|
||||
mode: 'python',
|
||||
lineNumbers: true,
|
||||
theme: 'darcula',
|
||||
tabSize: 4,
|
||||
styleActiveLine: true,
|
||||
matchBrackets: true,
|
||||
gutters: ["CodeMirror-linenumbers"],
|
||||
lineWrapping: true,
|
||||
});
|
||||
|
||||
const TEMPLATES = {
|
||||
CrawlerRunConfig: `CrawlerRunConfig(
|
||||
stream=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)`,
|
||||
BrowserConfig: `BrowserConfig(
|
||||
headless=True,
|
||||
extra_args=[
|
||||
"--no-sandbox",
|
||||
"--disable-gpu",
|
||||
],
|
||||
)`,
|
||||
};
|
||||
|
||||
document.getElementById('cfg-type').addEventListener('change', (e) => {
|
||||
cm.setValue(TEMPLATES[e.target.value]);
|
||||
document.getElementById('cfg-status').textContent = '';
|
||||
});
|
||||
|
||||
async function pyConfigToJson() {
|
||||
const code = cm.getValue().trim();
|
||||
if (!code) return {};
|
||||
|
||||
const res = await fetch('/config/dump', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ code }),
|
||||
});
|
||||
|
||||
const statusEl = document.getElementById('cfg-status');
|
||||
if (!res.ok) {
|
||||
const msg = await res.text();
|
||||
statusEl.textContent = '✖ config error';
|
||||
statusEl.className = 'text-xs text-red-400';
|
||||
throw new Error(msg || 'Invalid config');
|
||||
}
|
||||
|
||||
statusEl.textContent = '✓ parsed';
|
||||
statusEl.className = 'text-xs text-green-400';
|
||||
|
||||
return await res.json();
|
||||
}
|
||||
|
||||
// ================ SERVER COMMUNICATION ================
|
||||
|
||||
// Update status UI
|
||||
function updateStatus(status, time, memory, peakMemory) {
|
||||
const statusEl = document.getElementById('execution-status');
|
||||
const badgeEl = document.querySelector('#status-badge span:first-child');
|
||||
const textEl = document.querySelector('#status-badge span:last-child');
|
||||
|
||||
statusEl.classList.remove('hidden');
|
||||
badgeEl.className = 'w-3 h-3 rounded-full mr-2';
|
||||
|
||||
if (status === 'success') {
|
||||
badgeEl.classList.add('bg-green-500');
|
||||
textEl.textContent = 'Success';
|
||||
} else if (status === 'error') {
|
||||
badgeEl.classList.add('bg-red-500');
|
||||
textEl.textContent = 'Error';
|
||||
} else {
|
||||
badgeEl.classList.add('bg-yellow-500');
|
||||
textEl.textContent = 'Processing...';
|
||||
}
|
||||
|
||||
if (time) {
|
||||
document.getElementById('exec-time').textContent = `${time}ms`;
|
||||
}
|
||||
|
||||
if (memory !== undefined && peakMemory !== undefined) {
|
||||
document.getElementById('exec-mem').textContent = `Δ${memory >= 0 ? '+' : ''}${memory}MB (Peak: ${peakMemory}MB)`;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate code snippets
|
||||
function generateSnippets(api, payload) {
|
||||
// Python snippet
|
||||
const pyCodeEl = document.querySelector('#python-content code');
|
||||
const pySnippet = `import httpx\n\nasync def crawl():\n async with httpx.AsyncClient() as client:\n response = await client.post(\n "${window.location.origin}${api}",\n json=${JSON.stringify(payload, null, 4).replace(/\n/g, '\n ')}\n )\n return response.json()`;
|
||||
|
||||
pyCodeEl.textContent = pySnippet;
|
||||
pyCodeEl.className = 'python hljs'; // Reset classes
|
||||
forceHighlightElement(pyCodeEl);
|
||||
|
||||
// cURL snippet
|
||||
const curlCodeEl = document.querySelector('#curl-content code');
|
||||
const curlSnippet = `curl -X POST ${window.location.origin}${api} \\\n -H "Content-Type: application/json" \\\n -d '${JSON.stringify(payload)}'`;
|
||||
|
||||
curlCodeEl.textContent = curlSnippet;
|
||||
curlCodeEl.className = 'bash hljs'; // Reset classes
|
||||
forceHighlightElement(curlCodeEl);
|
||||
}
|
||||
|
||||
// Main run function
|
||||
async function runCrawl() {
|
||||
const endpoint = document.getElementById('endpoint').value;
|
||||
const urls = document.getElementById('urls').value.trim().split(/\n/).filter(u => u);
|
||||
// 1) grab python from CodeMirror, validate via /config/dump
|
||||
let advConfig = {};
|
||||
try {
|
||||
const cfgJson = await pyConfigToJson(); // may throw
|
||||
if (Object.keys(cfgJson).length) {
|
||||
const cfgType = document.getElementById('cfg-type').value;
|
||||
advConfig = cfgType === 'CrawlerRunConfig'
|
||||
? { crawler_config: cfgJson }
|
||||
: { browser_config: cfgJson };
|
||||
}
|
||||
} catch (err) {
|
||||
updateStatus('error');
|
||||
document.querySelector('#response-content code').textContent =
|
||||
JSON.stringify({ error: err.message }, null, 2);
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
return; // stop run
|
||||
}
|
||||
|
||||
const endpointMap = {
|
||||
crawl: '/crawl',
|
||||
};
|
||||
|
||||
/*const endpointMap = {
|
||||
crawl: '/crawl',
|
||||
crawl_stream: '/crawl/stream',
|
||||
md: '/md',
|
||||
llm: '/llm'
|
||||
};*/
|
||||
|
||||
const api = endpointMap[endpoint];
|
||||
const payload = {
|
||||
urls,
|
||||
...advConfig
|
||||
};
|
||||
|
||||
updateStatus('processing');
|
||||
|
||||
try {
|
||||
const startTime = performance.now();
|
||||
let response, responseData;
|
||||
|
||||
if (endpoint === 'crawl_stream') {
|
||||
// Stream processing
|
||||
response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const reader = response.body.getReader();
|
||||
let text = '';
|
||||
let maxMemory = 0;
|
||||
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = new TextDecoder().decode(value);
|
||||
text += chunk;
|
||||
|
||||
// Process each line for memory updates
|
||||
chunk.trim().split('\n').forEach(line => {
|
||||
if (!line) return;
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
if (obj.server_memory_mb) {
|
||||
maxMemory = Math.max(maxMemory, obj.server_memory_mb);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Error parsing stream line:', e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
responseData = { stream: text };
|
||||
const time = Math.round(performance.now() - startTime);
|
||||
updateStatus('success', time, null, maxMemory);
|
||||
document.querySelector('#response-content code').textContent = text;
|
||||
document.querySelector('#response-content code').className = 'json hljs'; // Reset classes
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
} else {
|
||||
// Regular request
|
||||
response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
responseData = await response.json();
|
||||
const time = Math.round(performance.now() - startTime);
|
||||
|
||||
if (!response.ok) {
|
||||
updateStatus('error', time);
|
||||
throw new Error(responseData.error || 'Request failed');
|
||||
}
|
||||
|
||||
updateStatus(
|
||||
'success',
|
||||
time,
|
||||
responseData.server_memory_delta_mb,
|
||||
responseData.server_peak_memory_mb
|
||||
);
|
||||
|
||||
document.querySelector('#response-content code').textContent = JSON.stringify(responseData, null, 2);
|
||||
document.querySelector('#response-content code').className = 'json hljs'; // Ensure class is set
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
}
|
||||
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
generateSnippets(api, payload);
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
updateStatus('error');
|
||||
document.querySelector('#response-content code').textContent = JSON.stringify(
|
||||
{ error: error.message },
|
||||
null,
|
||||
2
|
||||
);
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
}
|
||||
}
|
||||
|
||||
// Stress test function
|
||||
async function runStressTest() {
|
||||
const total = parseInt(document.getElementById('st-total').value);
|
||||
const chunkSize = parseInt(document.getElementById('st-chunk').value);
|
||||
const concurrency = parseInt(document.getElementById('st-conc').value);
|
||||
const useStream = document.getElementById('st-stream').checked;
|
||||
|
||||
const logEl = document.getElementById('stress-log');
|
||||
logEl.textContent = '';
|
||||
|
||||
document.getElementById('stress-completed').textContent = '0';
|
||||
document.getElementById('stress-total').textContent = total;
|
||||
document.getElementById('stress-avg-time').textContent = '0';
|
||||
document.getElementById('stress-peak-mem').textContent = '0';
|
||||
|
||||
const api = useStream ? '/crawl/stream' : '/crawl';
|
||||
const urls = Array.from({ length: total }, (_, i) => `https://httpbin.org/anything/stress-${i}-${Date.now()}`);
|
||||
const chunks = [];
|
||||
|
||||
for (let i = 0; i < urls.length; i += chunkSize) {
|
||||
chunks.push(urls.slice(i, i + chunkSize));
|
||||
}
|
||||
|
||||
let completed = 0;
|
||||
let totalTime = 0;
|
||||
let peakMemory = 0;
|
||||
|
||||
const processBatch = async (batch, index) => {
|
||||
const payload = {
|
||||
urls: batch,
|
||||
browser_config: {},
|
||||
crawler_config: { cache_mode: 'BYPASS', stream: useStream }
|
||||
};
|
||||
|
||||
const start = performance.now();
|
||||
let time, memory;
|
||||
|
||||
try {
|
||||
if (useStream) {
|
||||
const response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const reader = response.body.getReader();
|
||||
let maxMem = 0;
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
const text = new TextDecoder().decode(value);
|
||||
text.split('\n').forEach(line => {
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
if (obj.server_memory_mb) {
|
||||
maxMem = Math.max(maxMem, obj.server_memory_mb);
|
||||
}
|
||||
} catch { }
|
||||
});
|
||||
}
|
||||
|
||||
memory = maxMem;
|
||||
} else {
|
||||
const response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
memory = data.server_peak_memory_mb;
|
||||
}
|
||||
|
||||
time = Math.round(performance.now() - start);
|
||||
peakMemory = Math.max(peakMemory, memory || 0);
|
||||
totalTime += time;
|
||||
|
||||
logEl.textContent += `[${index + 1}/${chunks.length}] ✔ ${time}ms | Peak ${memory}MB\n`;
|
||||
} catch (error) {
|
||||
time = Math.round(performance.now() - start);
|
||||
logEl.textContent += `[${index + 1}/${chunks.length}] ✖ ${time}ms | ${error.message}\n`;
|
||||
}
|
||||
|
||||
completed += batch.length;
|
||||
document.getElementById('stress-completed').textContent = completed;
|
||||
document.getElementById('stress-peak-mem').textContent = peakMemory;
|
||||
document.getElementById('stress-avg-time').textContent = Math.round(totalTime / (index + 1));
|
||||
|
||||
logEl.scrollTop = logEl.scrollHeight;
|
||||
};
|
||||
|
||||
// Run with concurrency control
|
||||
let active = 0;
|
||||
let index = 0;
|
||||
|
||||
return new Promise(resolve => {
|
||||
const runNext = () => {
|
||||
while (active < concurrency && index < chunks.length) {
|
||||
processBatch(chunks[index], index)
|
||||
.finally(() => {
|
||||
active--;
|
||||
runNext();
|
||||
});
|
||||
active++;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (active === 0 && index >= chunks.length) {
|
||||
logEl.textContent += '\n✅ Stress test completed\n';
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
|
||||
runNext();
|
||||
});
|
||||
}
|
||||
|
||||
// Event listeners
|
||||
document.getElementById('run-btn').addEventListener('click', runCrawl);
|
||||
document.getElementById('st-run').addEventListener('click', runStressTest);
|
||||
|
||||
function forceHighlightElement(element) {
|
||||
if (!element) return;
|
||||
|
||||
// Save current scroll position (important for large code blocks)
|
||||
const scrollTop = element.parentElement.scrollTop;
|
||||
|
||||
// Reset the element
|
||||
const text = element.textContent;
|
||||
element.innerHTML = text;
|
||||
element.removeAttribute('data-highlighted');
|
||||
|
||||
// Reapply highlighting
|
||||
hljs.highlightElement(element);
|
||||
|
||||
// Restore scroll position
|
||||
element.parentElement.scrollTop = scrollTop;
|
||||
}
|
||||
|
||||
// Initialize clipboard for all copy buttons
|
||||
function initCopyButtons() {
|
||||
document.querySelectorAll('.copy-btn').forEach(btn => {
|
||||
new ClipboardJS(btn, {
|
||||
text: () => {
|
||||
const target = document.querySelector(btn.dataset.target);
|
||||
return target ? target.textContent : '';
|
||||
}
|
||||
}).on('success', e => {
|
||||
e.clearSelection();
|
||||
// make button text "copied" for 1 second
|
||||
const originalText = e.trigger.textContent;
|
||||
e.trigger.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
e.trigger.textContent = originalText;
|
||||
}, 1000);
|
||||
// Highlight the copied code
|
||||
const target = document.querySelector(btn.dataset.target);
|
||||
if (target) {
|
||||
target.classList.add('highlighted');
|
||||
setTimeout(() => {
|
||||
target.classList.remove('highlighted');
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
}).on('error', e => {
|
||||
console.error('Error copying:', e);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Call this in your DOMContentLoaded or initialization
|
||||
initCopyButtons();
|
||||
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
28
deploy/docker/supervisord.conf
Normal file
28
deploy/docker/supervisord.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
[supervisord]
|
||||
nodaemon=true ; Run supervisord in the foreground
|
||||
logfile=/dev/null ; Log supervisord output to stdout/stderr
|
||||
logfile_maxbytes=0
|
||||
|
||||
[program:redis]
|
||||
command=/usr/bin/redis-server --loglevel notice ; Path to redis-server on Alpine
|
||||
user=appuser ; Run redis as our non-root user
|
||||
autorestart=true
|
||||
priority=10
|
||||
stdout_logfile=/dev/stdout ; Redirect redis stdout to container stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr ; Redirect redis stderr to container stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:gunicorn]
|
||||
command=/usr/local/bin/gunicorn --bind 0.0.0.0:11235 --workers 1 --threads 4 --timeout 1800 --graceful-timeout 30 --keep-alive 300 --log-level info --worker-class uvicorn.workers.UvicornWorker server:app
|
||||
directory=/app ; Working directory for the app
|
||||
user=appuser ; Run gunicorn as our non-root user
|
||||
autorestart=true
|
||||
priority=20
|
||||
environment=PYTHONUNBUFFERED=1 ; Ensure Python output is sent straight to logs
|
||||
stdout_logfile=/dev/stdout ; Redirect gunicorn stdout to container stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr ; Redirect gunicorn stderr to container stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
# Optional: Add filebeat or other logging agents here if needed
|
||||
66
deploy/docker/utils.py
Normal file
66
deploy/docker/utils.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import dns.resolver
|
||||
import logging
|
||||
import yaml
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from fastapi import Request
|
||||
from typing import Dict, Optional
|
||||
|
||||
class TaskStatus(str, Enum):
|
||||
PROCESSING = "processing"
|
||||
FAILED = "failed"
|
||||
COMPLETED = "completed"
|
||||
|
||||
class FilterType(str, Enum):
|
||||
RAW = "raw"
|
||||
FIT = "fit"
|
||||
BM25 = "bm25"
|
||||
LLM = "llm"
|
||||
|
||||
def load_config() -> Dict:
|
||||
"""Load and return application configuration."""
|
||||
config_path = Path(__file__).parent / "config.yml"
|
||||
with open(config_path, "r") as config_file:
|
||||
return yaml.safe_load(config_file)
|
||||
|
||||
def setup_logging(config: Dict) -> None:
|
||||
"""Configure application logging."""
|
||||
logging.basicConfig(
|
||||
level=config["logging"]["level"],
|
||||
format=config["logging"]["format"]
|
||||
)
|
||||
|
||||
def get_base_url(request: Request) -> str:
|
||||
"""Get base URL including scheme and host."""
|
||||
return f"{request.url.scheme}://{request.url.netloc}"
|
||||
|
||||
def is_task_id(value: str) -> bool:
|
||||
"""Check if the value matches task ID pattern."""
|
||||
return value.startswith("llm_") and "_" in value
|
||||
|
||||
def datetime_handler(obj: any) -> Optional[str]:
|
||||
"""Handle datetime serialization for JSON."""
|
||||
if hasattr(obj, 'isoformat'):
|
||||
return obj.isoformat()
|
||||
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
|
||||
|
||||
def should_cleanup_task(created_at: str) -> bool:
|
||||
"""Check if task should be cleaned up based on creation time."""
|
||||
created = datetime.fromisoformat(created_at)
|
||||
return (datetime.now() - created).total_seconds() > 3600
|
||||
|
||||
def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]:
|
||||
"""Decode Redis hash data from bytes to strings."""
|
||||
return {k.decode('utf-8'): v.decode('utf-8') for k, v in hash_data.items()}
|
||||
|
||||
|
||||
|
||||
def verify_email_domain(email: str) -> bool:
|
||||
try:
|
||||
domain = email.split('@')[1]
|
||||
# Try to resolve MX records for the domain.
|
||||
records = dns.resolver.resolve(domain, 'MX')
|
||||
return True if records else False
|
||||
except Exception as e:
|
||||
return False
|
||||
@@ -1,67 +1,49 @@
|
||||
version: '3.8'
|
||||
|
||||
# Shared configuration for all environments
|
||||
x-base-config: &base-config
|
||||
ports:
|
||||
- "11235:11235" # Gunicorn port
|
||||
env_file:
|
||||
- .llm.env # API keys (create from .llm.env.example)
|
||||
environment:
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
||||
- TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
- GEMINI_API_TOKEN=${GEMINI_API_TOKEN:-}
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm # Chromium performance
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
memory: 1G
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11235/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
user: "appuser"
|
||||
|
||||
services:
|
||||
# Local build services for different platforms
|
||||
crawl4ai-amd64:
|
||||
crawl4ai:
|
||||
# 1. Default: Pull multi-platform test image from Docker Hub
|
||||
# 2. Override with local image via: IMAGE=local-test docker compose up
|
||||
image: ${IMAGE:-unclecode/crawl4ai:${TAG:-latest}}
|
||||
|
||||
# Local build config (used with --build)
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
PYTHON_VERSION: "3.10"
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-basic}
|
||||
ENABLE_GPU: false
|
||||
platforms:
|
||||
- linux/amd64
|
||||
profiles: ["local-amd64"]
|
||||
extends: &base-config
|
||||
file: docker-compose.yml
|
||||
service: base-config
|
||||
|
||||
crawl4ai-arm64:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
PYTHON_VERSION: "3.10"
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-basic}
|
||||
ENABLE_GPU: false
|
||||
platforms:
|
||||
- linux/arm64
|
||||
profiles: ["local-arm64"]
|
||||
extends: *base-config
|
||||
|
||||
# Hub services for different platforms and versions
|
||||
crawl4ai-hub-amd64:
|
||||
image: unclecode/crawl4ai:${VERSION:-basic}-amd64
|
||||
profiles: ["hub-amd64"]
|
||||
extends: *base-config
|
||||
|
||||
crawl4ai-hub-arm64:
|
||||
image: unclecode/crawl4ai:${VERSION:-basic}-arm64
|
||||
profiles: ["hub-arm64"]
|
||||
extends: *base-config
|
||||
|
||||
# Base configuration to be extended
|
||||
base-config:
|
||||
ports:
|
||||
- "11235:11235"
|
||||
- "8000:8000"
|
||||
- "9222:9222"
|
||||
- "8080:8080"
|
||||
environment:
|
||||
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-}
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- CLAUDE_API_KEY=${CLAUDE_API_KEY:-}
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
memory: 1G
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11235/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-default}
|
||||
ENABLE_GPU: ${ENABLE_GPU:-false}
|
||||
|
||||
# Inherit shared config
|
||||
<<: *base-config
|
||||
25
docs/assets/powered-by-dark.svg
Normal file
25
docs/assets/powered-by-dark.svg
Normal file
@@ -0,0 +1,25 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||
<!-- Dark Theme -->
|
||||
<g>
|
||||
<defs>
|
||||
<pattern id="halftoneDark" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||
<circle cx="2" cy="2" r="1" fill="#eee" opacity="0.1"/>
|
||||
</pattern>
|
||||
<pattern id="halftoneTextDark" width="3" height="3" patternUnits="userSpaceOnUse">
|
||||
<circle cx="1.5" cy="1.5" r="2" fill="#aaa" opacity="0.2"/>
|
||||
</pattern>
|
||||
</defs>
|
||||
<!-- White border - added as outer rectangle -->
|
||||
<rect width="120" height="35" rx="5" fill="#111"/>
|
||||
<!-- Dark background slightly smaller to show thicker border -->
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="#1a1a1a"/>
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#halftoneDark)"/>
|
||||
|
||||
<!-- Logo with halftone -->
|
||||
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#eee" stroke-width="2"/>
|
||||
<path d="M18 17.5 L27 17.5" stroke="#eee" stroke-width="2"/>
|
||||
<circle cx="22.5" cy="17.5" r="2" fill="#eee"/>
|
||||
|
||||
<text x="40" y="23" fill="#eee" font-family="Arial, sans-serif" font-weight="500" font-size="14">Crawl4AI</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.2 KiB |
64
docs/assets/powered-by-disco.svg
Normal file
64
docs/assets/powered-by-disco.svg
Normal file
@@ -0,0 +1,64 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||
<g>
|
||||
<defs>
|
||||
<pattern id="cyberdots" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||
<circle cx="2" cy="2" r="1">
|
||||
<animate attributeName="fill"
|
||||
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||
dur="6s"
|
||||
repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity"
|
||||
values="0.2;0.4;0.2"
|
||||
dur="4s"
|
||||
repeatCount="indefinite"/>
|
||||
</circle>
|
||||
</pattern>
|
||||
<filter id="neonGlow" x="-20%" y="-20%" width="140%" height="140%">
|
||||
<feGaussianBlur stdDeviation="1" result="blur"/>
|
||||
<feFlood flood-color="#FF2EC4" flood-opacity="0.2">
|
||||
<animate attributeName="flood-color"
|
||||
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||
dur="8s"
|
||||
repeatCount="indefinite"/>
|
||||
</feFlood>
|
||||
<feComposite in2="blur" operator="in"/>
|
||||
<feMerge>
|
||||
<feMergeNode/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<rect width="120" height="35" rx="5" fill="#0A0A0F"/>
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="#16161E"/>
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#cyberdots)"/>
|
||||
|
||||
<!-- Logo with animated neon -->
|
||||
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)">
|
||||
<animate attributeName="stroke"
|
||||
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||
dur="8s"
|
||||
repeatCount="indefinite"/>
|
||||
</path>
|
||||
<path d="M18 17.5 L27 17.5" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)">
|
||||
<animate attributeName="stroke"
|
||||
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||
dur="8s"
|
||||
repeatCount="indefinite"/>
|
||||
</path>
|
||||
<circle cx="22.5" cy="17.5" r="2" fill="#0BC5EA">
|
||||
<animate attributeName="fill"
|
||||
values="#0BC5EA;#FF2EC4;#8B5CF6;#0BC5EA"
|
||||
dur="8s"
|
||||
repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<text x="40" y="23" font-family="Arial, sans-serif" font-weight="500" font-size="14" filter="url(#neonGlow)">
|
||||
<animate attributeName="fill"
|
||||
values="#FF2EC4;#8B5CF6;#0BC5EA;#FF2EC4"
|
||||
dur="8s"
|
||||
repeatCount="indefinite"/>
|
||||
Crawl4AI
|
||||
</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.5 KiB |
21
docs/assets/powered-by-light.svg
Normal file
21
docs/assets/powered-by-light.svg
Normal file
@@ -0,0 +1,21 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||
<g>
|
||||
<defs>
|
||||
<pattern id="halftoneLight" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||
<circle cx="2" cy="2" r="1" fill="#111" opacity="0.1"/>
|
||||
</pattern>
|
||||
</defs>
|
||||
<!-- Dark border -->
|
||||
<rect width="120" height="35" rx="5" fill="#DDD"/>
|
||||
<!-- Light background -->
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="#fff"/>
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#halftoneLight)"/>
|
||||
|
||||
<!-- Logo -->
|
||||
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#111" stroke-width="2"/>
|
||||
<path d="M18 17.5 L27 17.5" stroke="#111" stroke-width="2"/>
|
||||
<circle cx="22.5" cy="17.5" r="2" fill="#111"/>
|
||||
|
||||
<text x="40" y="23" fill="#111" font-family="Arial, sans-serif" font-weight="500" font-size="14">Crawl4AI</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 925 B |
28
docs/assets/powered-by-night.svg
Normal file
28
docs/assets/powered-by-night.svg
Normal file
@@ -0,0 +1,28 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="120" height="35" viewBox="0 0 120 35">
|
||||
<g>
|
||||
<defs>
|
||||
<pattern id="halftoneDark" width="4" height="4" patternUnits="userSpaceOnUse">
|
||||
<circle cx="2" cy="2" r="1" fill="#8B5CF6" opacity="0.1"/>
|
||||
</pattern>
|
||||
<filter id="neonGlow" x="-20%" y="-20%" width="140%" height="140%">
|
||||
<feGaussianBlur stdDeviation="1" result="blur"/>
|
||||
<feFlood flood-color="#8B5CF6" flood-opacity="0.2"/>
|
||||
<feComposite in2="blur" operator="in"/>
|
||||
<feMerge>
|
||||
<feMergeNode/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
</defs>
|
||||
<rect width="120" height="35" rx="5" fill="#0A0A0F"/>
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="#16161E"/>
|
||||
<rect x="2" y="2" width="116" height="31" rx="4" fill="url(#halftoneDark)"/>
|
||||
|
||||
<!-- Logo with neon glow -->
|
||||
<path d="M30 17.5 a7.5 7.5 0 1 1 -15 0 a7.5 7.5 0 1 1 15 0" fill="none" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)"/>
|
||||
<path d="M18 17.5 L27 17.5" stroke="#8B5CF6" stroke-width="2" filter="url(#neonGlow)"/>
|
||||
<circle cx="22.5" cy="17.5" r="2" fill="#8B5CF6"/>
|
||||
|
||||
<text x="40" y="23" fill="#fff" font-family="Arial, sans-serif" font-weight="500" font-size="14" filter="url(#neonGlow)">Crawl4AI</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
123
docs/examples/README_BUILTIN_BROWSER.md
Normal file
123
docs/examples/README_BUILTIN_BROWSER.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Builtin Browser in Crawl4AI
|
||||
|
||||
This document explains the builtin browser feature in Crawl4AI and how to use it effectively.
|
||||
|
||||
## What is the Builtin Browser?
|
||||
|
||||
The builtin browser is a persistent Chrome instance that Crawl4AI manages for you. It runs in the background and can be used by multiple crawling operations, eliminating the need to start and stop browsers for each crawl.
|
||||
|
||||
Benefits include:
|
||||
- **Faster startup times** - The browser is already running, so your scripts start faster
|
||||
- **Shared resources** - All your crawling scripts can use the same browser instance
|
||||
- **Simplified management** - No need to worry about CDP URLs or browser processes
|
||||
- **Persistent cookies and sessions** - Browser state persists between script runs
|
||||
- **Less resource usage** - Only one browser instance for multiple scripts
|
||||
|
||||
## Using the Builtin Browser
|
||||
|
||||
### In Python Code
|
||||
|
||||
Using the builtin browser in your code is simple:
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
|
||||
# Create browser config with builtin mode
|
||||
browser_config = BrowserConfig(
|
||||
browser_mode="builtin", # This is the key setting!
|
||||
headless=True # Can be headless or not
|
||||
)
|
||||
|
||||
# Create the crawler
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
|
||||
# Use it - no need to explicitly start()
|
||||
result = await crawler.arun("https://example.com")
|
||||
```
|
||||
|
||||
Key points:
|
||||
1. Set `browser_mode="builtin"` in your BrowserConfig
|
||||
2. No need for explicit `start()` call - the crawler will automatically connect to the builtin browser
|
||||
3. No need to use a context manager or call `close()` - the browser stays running
|
||||
|
||||
### Via CLI
|
||||
|
||||
The CLI provides commands to manage the builtin browser:
|
||||
|
||||
```bash
|
||||
# Start the builtin browser
|
||||
crwl browser start
|
||||
|
||||
# Check its status
|
||||
crwl browser status
|
||||
|
||||
# Open a visible window to see what the browser is doing
|
||||
crwl browser view --url https://example.com
|
||||
|
||||
# Stop it when no longer needed
|
||||
crwl browser stop
|
||||
|
||||
# Restart with different settings
|
||||
crwl browser restart --no-headless
|
||||
```
|
||||
|
||||
When crawling via CLI, simply add the builtin browser mode:
|
||||
|
||||
```bash
|
||||
crwl https://example.com -b "browser_mode=builtin"
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
1. When a crawler with `browser_mode="builtin"` is created:
|
||||
- It checks if a builtin browser is already running
|
||||
- If not, it automatically launches one
|
||||
- It connects to the browser via CDP (Chrome DevTools Protocol)
|
||||
|
||||
2. The browser process continues running after your script exits
|
||||
- This means it's ready for the next crawl
|
||||
- You can manage it via the CLI commands
|
||||
|
||||
3. During installation, Crawl4AI attempts to create a builtin browser automatically
|
||||
|
||||
## Example
|
||||
|
||||
See the [builtin_browser_example.py](builtin_browser_example.py) file for a complete example.
|
||||
|
||||
Run it with:
|
||||
|
||||
```bash
|
||||
python builtin_browser_example.py
|
||||
```
|
||||
|
||||
## When to Use
|
||||
|
||||
The builtin browser is ideal for:
|
||||
- Scripts that run frequently
|
||||
- Development and testing workflows
|
||||
- Applications that need to minimize startup time
|
||||
- Systems where you want to manage browser instances centrally
|
||||
|
||||
You might not want to use it when:
|
||||
- Running one-off scripts
|
||||
- When you need different browser configurations for different tasks
|
||||
- In environments where persistent processes are not allowed
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues:
|
||||
|
||||
1. Check the browser status:
|
||||
```
|
||||
crwl browser status
|
||||
```
|
||||
|
||||
2. Try restarting it:
|
||||
```
|
||||
crwl browser restart
|
||||
```
|
||||
|
||||
3. If problems persist, stop it and let Crawl4AI start a fresh one:
|
||||
```
|
||||
crwl browser stop
|
||||
```
|
||||
79
docs/examples/arun_vs_arun_many.py
Normal file
79
docs/examples/arun_vs_arun_many.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import asyncio
|
||||
import time
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.async_configs import CrawlerRunConfig
|
||||
from crawl4ai.async_dispatcher import MemoryAdaptiveDispatcher, RateLimiter
|
||||
|
||||
VERBOSE = False
|
||||
|
||||
async def crawl_sequential(urls):
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
|
||||
results = []
|
||||
start_time = time.perf_counter()
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
for url in urls:
|
||||
result_container = await crawler.arun(url=url, config=config)
|
||||
results.append(result_container[0])
|
||||
total_time = time.perf_counter() - start_time
|
||||
return total_time, results
|
||||
|
||||
async def crawl_parallel_dispatcher(urls):
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
|
||||
# Dispatcher with rate limiter enabled (default behavior)
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
rate_limiter=RateLimiter(base_delay=(1.0, 3.0), max_delay=60.0, max_retries=3),
|
||||
max_session_permit=50,
|
||||
)
|
||||
start_time = time.perf_counter()
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result_container = await crawler.arun_many(urls=urls, config=config, dispatcher=dispatcher)
|
||||
results = []
|
||||
if isinstance(result_container, list):
|
||||
results = result_container
|
||||
else:
|
||||
async for res in result_container:
|
||||
results.append(res)
|
||||
total_time = time.perf_counter() - start_time
|
||||
return total_time, results
|
||||
|
||||
async def crawl_parallel_no_rate_limit(urls):
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
|
||||
# Dispatcher with no rate limiter and a high session permit to avoid queuing
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
rate_limiter=None,
|
||||
max_session_permit=len(urls) # allow all URLs concurrently
|
||||
)
|
||||
start_time = time.perf_counter()
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result_container = await crawler.arun_many(urls=urls, config=config, dispatcher=dispatcher)
|
||||
results = []
|
||||
if isinstance(result_container, list):
|
||||
results = result_container
|
||||
else:
|
||||
async for res in result_container:
|
||||
results.append(res)
|
||||
total_time = time.perf_counter() - start_time
|
||||
return total_time, results
|
||||
|
||||
async def main():
|
||||
urls = ["https://example.com"] * 100
|
||||
print(f"Crawling {len(urls)} URLs sequentially...")
|
||||
seq_time, seq_results = await crawl_sequential(urls)
|
||||
print(f"Sequential crawling took: {seq_time:.2f} seconds\n")
|
||||
|
||||
print(f"Crawling {len(urls)} URLs in parallel using arun_many with dispatcher (with rate limit)...")
|
||||
disp_time, disp_results = await crawl_parallel_dispatcher(urls)
|
||||
print(f"Parallel (dispatcher with rate limiter) took: {disp_time:.2f} seconds\n")
|
||||
|
||||
print(f"Crawling {len(urls)} URLs in parallel using dispatcher with no rate limiter...")
|
||||
no_rl_time, no_rl_results = await crawl_parallel_no_rate_limit(urls)
|
||||
print(f"Parallel (dispatcher without rate limiter) took: {no_rl_time:.2f} seconds\n")
|
||||
|
||||
print("Crawl4ai - Crawling Comparison")
|
||||
print("--------------------------------------------------------")
|
||||
print(f"Sequential crawling took: {seq_time:.2f} seconds")
|
||||
print(f"Parallel (dispatcher with rate limiter) took: {disp_time:.2f} seconds")
|
||||
print(f"Parallel (dispatcher without rate limiter) took: {no_rl_time:.2f} seconds")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -52,7 +52,7 @@ async def crawl_sequential(urls: List[str]):
|
||||
)
|
||||
if result.success:
|
||||
print(f"Successfully crawled {url}")
|
||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)}")
|
||||
print(f"Content length: {len(result.markdown.raw_markdown)}")
|
||||
finally:
|
||||
await crawler.close()
|
||||
|
||||
@@ -101,7 +101,7 @@ async def crawl_parallel(urls: List[str], max_concurrent: int = 3):
|
||||
print(f"Error crawling {url}: {str(result)}")
|
||||
elif result.success:
|
||||
print(f"Successfully crawled {url}")
|
||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)}")
|
||||
print(f"Content length: {len(result.markdown.raw_markdown)}")
|
||||
finally:
|
||||
await crawler.close()
|
||||
|
||||
|
||||
86
docs/examples/builtin_browser_example.py
Normal file
86
docs/examples/builtin_browser_example.py
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Builtin Browser Example
|
||||
|
||||
This example demonstrates how to use Crawl4AI's builtin browser feature,
|
||||
which simplifies the browser management process. With builtin mode:
|
||||
|
||||
- No need to manually start or connect to a browser
|
||||
- No need to manage CDP URLs or browser processes
|
||||
- Automatically connects to an existing browser or launches one if needed
|
||||
- Browser persists between script runs, reducing startup time
|
||||
- No explicit cleanup or close() calls needed
|
||||
|
||||
The example also demonstrates "auto-starting" where you don't need to explicitly
|
||||
call start() method on the crawler.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
import time
|
||||
|
||||
async def crawl_with_builtin_browser():
|
||||
"""
|
||||
Simple example of crawling with the builtin browser.
|
||||
|
||||
Key features:
|
||||
1. browser_mode="builtin" in BrowserConfig
|
||||
2. No explicit start() call needed
|
||||
3. No explicit close() needed
|
||||
"""
|
||||
print("\n=== Crawl4AI Builtin Browser Example ===\n")
|
||||
|
||||
# Create a browser configuration with builtin mode
|
||||
browser_config = BrowserConfig(
|
||||
browser_mode="builtin", # This is the key setting!
|
||||
headless=True # Can run headless for background operation
|
||||
)
|
||||
|
||||
# Create crawler run configuration
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS, # Skip cache for this demo
|
||||
screenshot=True, # Take a screenshot
|
||||
verbose=True # Show verbose logging
|
||||
)
|
||||
|
||||
# Create the crawler instance
|
||||
# Note: We don't need to use "async with" context manager
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
|
||||
# Start crawling several URLs - no explicit start() needed!
|
||||
# The crawler will automatically connect to the builtin browser
|
||||
print("\n➡️ Crawling first URL...")
|
||||
t0 = time.time()
|
||||
result1 = await crawler.arun(
|
||||
url="https://crawl4ai.com",
|
||||
config=crawler_config
|
||||
)
|
||||
t1 = time.time()
|
||||
print(f"✅ First URL crawled in {t1-t0:.2f} seconds")
|
||||
print(f" Got {len(result1.markdown.raw_markdown)} characters of content")
|
||||
print(f" Title: {result1.metadata.get('title', 'No title')}")
|
||||
|
||||
# Try another URL - the browser is already running, so this should be faster
|
||||
print("\n➡️ Crawling second URL...")
|
||||
t0 = time.time()
|
||||
result2 = await crawler.arun(
|
||||
url="https://example.com",
|
||||
config=crawler_config
|
||||
)
|
||||
t1 = time.time()
|
||||
print(f"✅ Second URL crawled in {t1-t0:.2f} seconds")
|
||||
print(f" Got {len(result2.markdown.raw_markdown)} characters of content")
|
||||
print(f" Title: {result2.metadata.get('title', 'No title')}")
|
||||
|
||||
# The builtin browser continues running in the background
|
||||
# No need to explicitly close it
|
||||
print("\n🔄 The builtin browser remains running for future use")
|
||||
print(" You can use 'crwl browser status' to check its status")
|
||||
print(" or 'crwl browser stop' to stop it when completely done")
|
||||
|
||||
async def main():
|
||||
"""Run the example"""
|
||||
await crawl_with_builtin_browser()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
13
docs/examples/cli/browser.yml
Normal file
13
docs/examples/cli/browser.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
browser_type: "chromium"
|
||||
headless: true
|
||||
viewport_width: 1280
|
||||
viewport_height: 800
|
||||
user_agent_mode: "random"
|
||||
verbose: true
|
||||
text_mode: false
|
||||
light_mode: false
|
||||
ignore_https_errors: true
|
||||
java_script_enabled: true
|
||||
extra_args:
|
||||
- "--disable-gpu"
|
||||
- "--no-sandbox"
|
||||
13
docs/examples/cli/crawler.yml
Normal file
13
docs/examples/cli/crawler.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
cache_mode: "bypass"
|
||||
wait_until: "networkidle"
|
||||
page_timeout: 30000
|
||||
delay_before_return_html: 0.5
|
||||
word_count_threshold: 100
|
||||
scan_full_page: true
|
||||
scroll_delay: 0.3
|
||||
process_iframes: false
|
||||
remove_overlay_elements: true
|
||||
magic: true
|
||||
verbose: true
|
||||
exclude_external_links: true
|
||||
exclude_social_media_links: true
|
||||
27
docs/examples/cli/css_schema.json
Normal file
27
docs/examples/cli/css_schema.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"name": "ArticleExtractor",
|
||||
"baseSelector": ".cards[data-tax=news] .card__data",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h4.card__title",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "link",
|
||||
"selector": "h4.card__title a",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
},
|
||||
{
|
||||
"name": "details",
|
||||
"selector": ".card__details",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "topics",
|
||||
"selector": ".card__topics.topics",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
11
docs/examples/cli/extract.yml
Normal file
11
docs/examples/cli/extract.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
type: "llm"
|
||||
provider: "openai/gpt-4o-mini"
|
||||
api_token: "env:OPENAI_API_KEY"
|
||||
instruction: "Extract all articles with their titles, authors, publication dates and main topics in a structured format"
|
||||
params:
|
||||
chunk_token_threshold: 4096
|
||||
overlap_rate: 0.1
|
||||
word_token_rate: 0.75
|
||||
temperature: 0.3
|
||||
max_tokens: 1000
|
||||
verbose: true
|
||||
3
docs/examples/cli/extract_css.yml
Normal file
3
docs/examples/cli/extract_css.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
type: "json-css"
|
||||
params:
|
||||
verbose: true
|
||||
26
docs/examples/cli/llm_schema.json
Normal file
26
docs/examples/cli/llm_schema.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"title": "NewsArticle",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "The title/headline of the news article"
|
||||
},
|
||||
"link": {
|
||||
"type": "string",
|
||||
"description": "The URL or link to the full article"
|
||||
},
|
||||
"details": {
|
||||
"type": "string",
|
||||
"description": "Brief summary or details about the article content"
|
||||
},
|
||||
"topics": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "List of topics or categories associated with the article"
|
||||
}
|
||||
},
|
||||
"required": ["title", "details"]
|
||||
}
|
||||
209
docs/examples/crawler_monitor_example.py
Normal file
209
docs/examples/crawler_monitor_example.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
CrawlerMonitor Example
|
||||
|
||||
This example demonstrates how to use the CrawlerMonitor component
|
||||
to visualize and track web crawler operations in real-time.
|
||||
"""
|
||||
|
||||
import time
|
||||
import uuid
|
||||
import random
|
||||
import threading
|
||||
from crawl4ai.components.crawler_monitor import CrawlerMonitor
|
||||
from crawl4ai.models import CrawlStatus
|
||||
|
||||
def simulate_webcrawler_operations(monitor, num_tasks=20):
|
||||
"""
|
||||
Simulates a web crawler's operations with multiple tasks and different states.
|
||||
|
||||
Args:
|
||||
monitor: The CrawlerMonitor instance
|
||||
num_tasks: Number of tasks to simulate
|
||||
"""
|
||||
print(f"Starting simulation with {num_tasks} tasks...")
|
||||
|
||||
# Create and register all tasks first
|
||||
task_ids = []
|
||||
for i in range(num_tasks):
|
||||
task_id = str(uuid.uuid4())
|
||||
url = f"https://example.com/page{i}"
|
||||
monitor.add_task(task_id, url)
|
||||
task_ids.append((task_id, url))
|
||||
|
||||
# Small delay between task creation
|
||||
time.sleep(0.2)
|
||||
|
||||
# Process tasks with a variety of different behaviors
|
||||
threads = []
|
||||
for i, (task_id, url) in enumerate(task_ids):
|
||||
# Create a thread for each task
|
||||
thread = threading.Thread(
|
||||
target=process_task,
|
||||
args=(monitor, task_id, url, i)
|
||||
)
|
||||
thread.daemon = True
|
||||
threads.append(thread)
|
||||
|
||||
# Start threads in batches to simulate concurrent processing
|
||||
batch_size = 4 # Process 4 tasks at a time
|
||||
for i in range(0, len(threads), batch_size):
|
||||
batch = threads[i:i+batch_size]
|
||||
for thread in batch:
|
||||
thread.start()
|
||||
time.sleep(0.5) # Stagger thread start times
|
||||
|
||||
# Wait a bit before starting next batch
|
||||
time.sleep(random.uniform(1.0, 3.0))
|
||||
|
||||
# Update queue statistics
|
||||
update_queue_stats(monitor)
|
||||
|
||||
# Simulate memory pressure changes
|
||||
active_threads = [t for t in threads if t.is_alive()]
|
||||
if len(active_threads) > 8:
|
||||
monitor.update_memory_status("CRITICAL")
|
||||
elif len(active_threads) > 4:
|
||||
monitor.update_memory_status("PRESSURE")
|
||||
else:
|
||||
monitor.update_memory_status("NORMAL")
|
||||
|
||||
# Wait for all threads to complete
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
# Final updates
|
||||
update_queue_stats(monitor)
|
||||
monitor.update_memory_status("NORMAL")
|
||||
|
||||
print("Simulation completed!")
|
||||
|
||||
def process_task(monitor, task_id, url, index):
|
||||
"""Simulate processing of a single task."""
|
||||
# Tasks start in queued state (already added)
|
||||
|
||||
# Simulate waiting in queue
|
||||
wait_time = random.uniform(0.5, 3.0)
|
||||
time.sleep(wait_time)
|
||||
|
||||
# Start processing - move to IN_PROGRESS
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=time.time(),
|
||||
wait_time=wait_time
|
||||
)
|
||||
|
||||
# Simulate task processing with memory usage changes
|
||||
total_process_time = random.uniform(2.0, 10.0)
|
||||
step_time = total_process_time / 5 # Update in 5 steps
|
||||
|
||||
for step in range(5):
|
||||
# Simulate increasing then decreasing memory usage
|
||||
if step < 3: # First 3 steps - increasing
|
||||
memory_usage = random.uniform(5.0, 20.0) * (step + 1)
|
||||
else: # Last 2 steps - decreasing
|
||||
memory_usage = random.uniform(5.0, 20.0) * (5 - step)
|
||||
|
||||
# Update peak memory if this is higher
|
||||
peak = max(memory_usage, monitor.get_task_stats(task_id).get("peak_memory", 0))
|
||||
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak
|
||||
)
|
||||
|
||||
time.sleep(step_time)
|
||||
|
||||
# Determine final state - 80% success, 20% failure
|
||||
if index % 5 == 0: # Every 5th task fails
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
end_time=time.time(),
|
||||
memory_usage=0.0,
|
||||
error_message="Connection timeout"
|
||||
)
|
||||
else:
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.COMPLETED,
|
||||
end_time=time.time(),
|
||||
memory_usage=0.0
|
||||
)
|
||||
|
||||
def update_queue_stats(monitor):
|
||||
"""Update queue statistics based on current tasks."""
|
||||
task_stats = monitor.get_all_task_stats()
|
||||
|
||||
# Count queued tasks
|
||||
queued_tasks = [
|
||||
stats for stats in task_stats.values()
|
||||
if stats["status"] == CrawlStatus.QUEUED.name
|
||||
]
|
||||
|
||||
total_queued = len(queued_tasks)
|
||||
|
||||
if total_queued > 0:
|
||||
current_time = time.time()
|
||||
# Calculate wait times
|
||||
wait_times = [
|
||||
current_time - stats.get("enqueue_time", current_time)
|
||||
for stats in queued_tasks
|
||||
]
|
||||
highest_wait_time = max(wait_times) if wait_times else 0.0
|
||||
avg_wait_time = sum(wait_times) / len(wait_times) if wait_times else 0.0
|
||||
else:
|
||||
highest_wait_time = 0.0
|
||||
avg_wait_time = 0.0
|
||||
|
||||
# Update monitor
|
||||
monitor.update_queue_statistics(
|
||||
total_queued=total_queued,
|
||||
highest_wait_time=highest_wait_time,
|
||||
avg_wait_time=avg_wait_time
|
||||
)
|
||||
|
||||
def main():
|
||||
# Initialize the monitor
|
||||
monitor = CrawlerMonitor(
|
||||
urls_total=20, # Total URLs to process
|
||||
refresh_rate=0.5, # Update UI twice per second
|
||||
enable_ui=True, # Enable terminal UI
|
||||
max_width=120 # Set maximum width to 120 characters
|
||||
)
|
||||
|
||||
# Start the monitor
|
||||
monitor.start()
|
||||
|
||||
try:
|
||||
# Run simulation
|
||||
simulate_webcrawler_operations(monitor)
|
||||
|
||||
# Keep monitor running a bit to see final state
|
||||
print("Waiting to view final state...")
|
||||
time.sleep(5)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nExample interrupted by user")
|
||||
finally:
|
||||
# Stop the monitor
|
||||
monitor.stop()
|
||||
print("Example completed!")
|
||||
|
||||
# Print some statistics
|
||||
summary = monitor.get_summary()
|
||||
print("\nCrawler Statistics Summary:")
|
||||
print(f"Total URLs: {summary['urls_total']}")
|
||||
print(f"Completed: {summary['urls_completed']}")
|
||||
print(f"Completion percentage: {summary['completion_percentage']:.1f}%")
|
||||
print(f"Peak memory usage: {summary['peak_memory_percent']:.1f}%")
|
||||
|
||||
# Print task status counts
|
||||
status_counts = summary['status_counts']
|
||||
print("\nTask Status Counts:")
|
||||
for status, count in status_counts.items():
|
||||
print(f" {status}: {count}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
443
docs/examples/crypto_analysis_example.py
Normal file
443
docs/examples/crypto_analysis_example.py
Normal file
@@ -0,0 +1,443 @@
|
||||
"""
|
||||
Crawl4AI Crypto Trading Analysis Demo
|
||||
Author: Unclecode
|
||||
Date: 2024-03-15
|
||||
|
||||
This script demonstrates advanced crypto market analysis using:
|
||||
1. Web scraping of real-time CoinMarketCap data
|
||||
2. Smart table extraction with layout detection
|
||||
3. Hedge fund-grade financial metrics
|
||||
4. Interactive visualizations for trading signals
|
||||
|
||||
Key Features:
|
||||
- Volume Anomaly Detection: Finds unusual trading activity
|
||||
- Liquidity Power Score: Identifies easily tradable assets
|
||||
- Volatility-Weighted Momentum: Surface sustainable trends
|
||||
- Smart Money Signals: Algorithmic buy/hold recommendations
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import re
|
||||
import plotly.express as px
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
LXMLWebScrapingStrategy,
|
||||
)
|
||||
from crawl4ai import CrawlResult
|
||||
from typing import List
|
||||
|
||||
__current_dir__ = __file__.rsplit("/", 1)[0]
|
||||
|
||||
class CryptoAlphaGenerator:
|
||||
"""
|
||||
Advanced crypto analysis engine that transforms raw web data into:
|
||||
- Volume anomaly flags
|
||||
- Liquidity scores
|
||||
- Momentum-risk ratios
|
||||
- Machine learning-inspired trading signals
|
||||
|
||||
Methods:
|
||||
analyze_tables(): Process raw tables into trading insights
|
||||
create_visuals(): Generate institutional-grade visualizations
|
||||
generate_insights(): Create plain English trading recommendations
|
||||
"""
|
||||
|
||||
def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Convert crypto market data to machine-readable format.
|
||||
Handles currency symbols, units (B=Billions), and percentage values.
|
||||
"""
|
||||
# Make a copy to avoid SettingWithCopyWarning
|
||||
df = df.copy()
|
||||
|
||||
# Clean Price column (handle currency symbols)
|
||||
df["Price"] = df["Price"].astype(str).str.replace("[^\d.]", "", regex=True).astype(float)
|
||||
|
||||
# Handle Market Cap and Volume, considering both Billions and Trillions
|
||||
def convert_large_numbers(value):
|
||||
if pd.isna(value):
|
||||
return float('nan')
|
||||
value = str(value)
|
||||
multiplier = 1
|
||||
if 'B' in value:
|
||||
multiplier = 1e9
|
||||
elif 'T' in value:
|
||||
multiplier = 1e12
|
||||
# Handle cases where the value might already be numeric
|
||||
cleaned_value = re.sub(r"[^\d.]", "", value)
|
||||
return float(cleaned_value) * multiplier if cleaned_value else float('nan')
|
||||
|
||||
df["Market Cap"] = df["Market Cap"].apply(convert_large_numbers)
|
||||
df["Volume(24h)"] = df["Volume(24h)"].apply(convert_large_numbers)
|
||||
|
||||
# Convert percentages to decimal values
|
||||
for col in ["1h %", "24h %", "7d %"]:
|
||||
if col in df.columns:
|
||||
# First ensure it's string, then clean
|
||||
df[col] = (
|
||||
df[col].astype(str)
|
||||
.str.replace("%", "")
|
||||
.str.replace(",", ".")
|
||||
.replace("nan", np.nan)
|
||||
)
|
||||
df[col] = pd.to_numeric(df[col], errors='coerce') / 100
|
||||
|
||||
return df
|
||||
|
||||
def calculate_metrics(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Compute advanced trading metrics used by quantitative funds:
|
||||
|
||||
1. Volume/Market Cap Ratio - Measures liquidity efficiency
|
||||
(High ratio = Underestimated attention, and small-cap = higher growth potential)
|
||||
|
||||
2. Volatility Score - Risk-adjusted momentum potential - Shows how stable is the trend
|
||||
(STD of 1h/24h/7d returns)
|
||||
|
||||
3. Momentum Score - Weighted average of returns - Shows how strong is the trend
|
||||
(1h:30% + 24h:50% + 7d:20%)
|
||||
|
||||
4. Volume Anomaly - 3σ deviation detection
|
||||
(Flags potential insider activity) - Unusual trading activity – Flags coins with volume spikes (potential insider buying or news).
|
||||
"""
|
||||
# Liquidity Metrics
|
||||
df["Volume/Market Cap Ratio"] = df["Volume(24h)"] / df["Market Cap"]
|
||||
|
||||
# Risk Metrics
|
||||
df["Volatility Score"] = df[["1h %", "24h %", "7d %"]].std(axis=1)
|
||||
|
||||
# Momentum Metrics
|
||||
df["Momentum Score"] = df["1h %"] * 0.3 + df["24h %"] * 0.5 + df["7d %"] * 0.2
|
||||
|
||||
# Anomaly Detection
|
||||
median_vol = df["Volume(24h)"].median()
|
||||
df["Volume Anomaly"] = df["Volume(24h)"] > 3 * median_vol
|
||||
|
||||
# Value Flags
|
||||
# Undervalued Flag - Low market cap and high momentum
|
||||
# (High growth potential and low attention)
|
||||
df["Undervalued Flag"] = (df["Market Cap"] < 1e9) & (
|
||||
df["Momentum Score"] > 0.05
|
||||
)
|
||||
# Liquid Giant Flag - High volume/market cap ratio and large market cap
|
||||
# (High liquidity and large market cap = institutional interest)
|
||||
df["Liquid Giant"] = (df["Volume/Market Cap Ratio"] > 0.15) & (
|
||||
df["Market Cap"] > 1e9
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
def generate_insights_simple(self, df: pd.DataFrame) -> str:
|
||||
"""
|
||||
Generates an ultra-actionable crypto trading report with:
|
||||
- Risk-tiered opportunities (High/Medium/Low)
|
||||
- Concrete examples for each trade type
|
||||
- Entry/exit strategies spelled out
|
||||
- Visual cues for quick scanning
|
||||
"""
|
||||
report = [
|
||||
"🚀 **CRYPTO TRADING CHEAT SHEET** 🚀",
|
||||
"*Based on quantitative signals + hedge fund tactics*",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
]
|
||||
|
||||
# 1. HIGH-RISK: Undervalued Small-Caps (Momentum Plays)
|
||||
high_risk = df[df["Undervalued Flag"]].sort_values("Momentum Score", ascending=False)
|
||||
if not high_risk.empty:
|
||||
example_coin = high_risk.iloc[0]
|
||||
report.extend([
|
||||
"\n🔥 **HIGH-RISK: Rocket Fuel Small-Caps**",
|
||||
f"*Example Trade:* {example_coin['Name']} (Price: ${example_coin['Price']:.6f})",
|
||||
"📊 *Why?* Tiny market cap (<$1B) but STRONG momentum (+{:.0f}% last week)".format(example_coin['7d %']*100),
|
||||
"🎯 *Strategy:*",
|
||||
"1. Wait for 5-10% dip from recent high (${:.6f} → Buy under ${:.6f})".format(
|
||||
example_coin['Price'] / (1 - example_coin['24h %']), # Approx recent high
|
||||
example_coin['Price'] * 0.95
|
||||
),
|
||||
"2. Set stop-loss at -10% (${:.6f})".format(example_coin['Price'] * 0.90),
|
||||
"3. Take profit at +20% (${:.6f})".format(example_coin['Price'] * 1.20),
|
||||
"⚠️ *Risk Warning:* These can drop 30% fast! Never bet more than 5% of your portfolio.",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
])
|
||||
|
||||
# 2. MEDIUM-RISK: Liquid Giants (Swing Trades)
|
||||
medium_risk = df[df["Liquid Giant"]].sort_values("Volume/Market Cap Ratio", ascending=False)
|
||||
if not medium_risk.empty:
|
||||
example_coin = medium_risk.iloc[0]
|
||||
report.extend([
|
||||
"\n💎 **MEDIUM-RISK: Liquid Giants (Safe Swing Trades)**",
|
||||
f"*Example Trade:* {example_coin['Name']} (Market Cap: ${example_coin['Market Cap']/1e9:.1f}B)",
|
||||
"📊 *Why?* Huge volume (${:.1f}M/day) makes it easy to enter/exit".format(example_coin['Volume(24h)']/1e6),
|
||||
"🎯 *Strategy:*",
|
||||
"1. Buy when 24h volume > 15% of market cap (Current: {:.0f}%)".format(example_coin['Volume/Market Cap Ratio']*100),
|
||||
"2. Hold 1-4 weeks (Big coins trend longer)",
|
||||
"3. Exit when momentum drops below 5% (Current: {:.0f}%)".format(example_coin['Momentum Score']*100),
|
||||
"📉 *Pro Tip:* Watch Bitcoin's trend - if BTC drops 5%, these usually follow.",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
])
|
||||
|
||||
# 3. LOW-RISK: Stable Momentum (DCA Targets)
|
||||
low_risk = df[
|
||||
(df["Momentum Score"] > 0.05) &
|
||||
(df["Volatility Score"] < 0.03)
|
||||
].sort_values("Market Cap", ascending=False)
|
||||
if not low_risk.empty:
|
||||
example_coin = low_risk.iloc[0]
|
||||
report.extend([
|
||||
"\n🛡️ **LOW-RISK: Steady Climbers (DCA & Forget)**",
|
||||
f"*Example Trade:* {example_coin['Name']} (Volatility: {example_coin['Volatility Score']:.2f}/5)",
|
||||
"📊 *Why?* Rises steadily (+{:.0f}%/week) with LOW drama".format(example_coin['7d %']*100),
|
||||
"🎯 *Strategy:*",
|
||||
"1. Buy small amounts every Tuesday/Friday (DCA)",
|
||||
"2. Hold for 3+ months (Compound gains work best here)",
|
||||
"3. Sell 10% at every +25% milestone",
|
||||
"💰 *Best For:* Long-term investors who hate sleepless nights",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
])
|
||||
|
||||
# Volume Spike Alerts
|
||||
anomalies = df[df["Volume Anomaly"]].sort_values("Volume(24h)", ascending=False)
|
||||
if not anomalies.empty:
|
||||
example_coin = anomalies.iloc[0]
|
||||
report.extend([
|
||||
"\n🚨 **Volume Spike Alert (Possible News/Whale Action)**",
|
||||
f"*Coin:* {example_coin['Name']} (Volume: ${example_coin['Volume(24h)']/1e6:.1f}M, usual: ${example_coin['Volume(24h)']/3/1e6:.1f}M)",
|
||||
"🔍 *Check:* Twitter/CoinGecko for news before trading",
|
||||
"⚡ *If no news:* Could be insider buying - watch price action:",
|
||||
"- Break above today's high → Buy with tight stop-loss",
|
||||
"- Fade back down → Avoid (may be a fakeout)"
|
||||
])
|
||||
|
||||
# Pro Tip Footer
|
||||
report.append("\n✨ *Pro Tip:* Bookmark this report & check back in 24h to see if signals held up.")
|
||||
|
||||
return "\n".join(report)
|
||||
|
||||
def generate_insights(self, df: pd.DataFrame) -> str:
|
||||
"""
|
||||
Generates a tactical trading report with:
|
||||
- Top 3 trades per risk level (High/Medium/Low)
|
||||
- Auto-calculated entry/exit prices
|
||||
- BTC chart toggle tip
|
||||
"""
|
||||
# Filter top candidates for each risk level
|
||||
high_risk = (
|
||||
df[df["Undervalued Flag"]]
|
||||
.sort_values("Momentum Score", ascending=False)
|
||||
.head(3)
|
||||
)
|
||||
medium_risk = (
|
||||
df[df["Liquid Giant"]]
|
||||
.sort_values("Volume/Market Cap Ratio", ascending=False)
|
||||
.head(3)
|
||||
)
|
||||
low_risk = (
|
||||
df[(df["Momentum Score"] > 0.05) & (df["Volatility Score"] < 0.03)]
|
||||
.sort_values("Momentum Score", ascending=False)
|
||||
.head(3)
|
||||
)
|
||||
|
||||
report = ["# 🎯 Crypto Trading Tactical Report (Top 3 Per Risk Tier)"]
|
||||
|
||||
# 1. High-Risk Trades (Small-Cap Momentum)
|
||||
if not high_risk.empty:
|
||||
report.append("\n## 🔥 HIGH RISK: Small-Cap Rockets (5-50% Potential)")
|
||||
for i, coin in high_risk.iterrows():
|
||||
current_price = coin["Price"]
|
||||
entry = current_price * 0.95 # -5% dip
|
||||
stop_loss = current_price * 0.90 # -10%
|
||||
take_profit = current_price * 1.20 # +20%
|
||||
|
||||
report.append(
|
||||
f"\n### {coin['Name']} (Momentum: {coin['Momentum Score']:.1%})"
|
||||
f"\n- **Current Price:** ${current_price:.4f}"
|
||||
f"\n- **Entry:** < ${entry:.4f} (Wait for pullback)"
|
||||
f"\n- **Stop-Loss:** ${stop_loss:.4f} (-10%)"
|
||||
f"\n- **Target:** ${take_profit:.4f} (+20%)"
|
||||
f"\n- **Risk/Reward:** 1:2"
|
||||
f"\n- **Watch:** Volume spikes above {coin['Volume(24h)']/1e6:.1f}M"
|
||||
)
|
||||
|
||||
# 2. Medium-Risk Trades (Liquid Giants)
|
||||
if not medium_risk.empty:
|
||||
report.append("\n## 💎 MEDIUM RISK: Liquid Swing Trades (10-30% Potential)")
|
||||
for i, coin in medium_risk.iterrows():
|
||||
current_price = coin["Price"]
|
||||
entry = current_price * 0.98 # -2% dip
|
||||
stop_loss = current_price * 0.94 # -6%
|
||||
take_profit = current_price * 1.15 # +15%
|
||||
|
||||
report.append(
|
||||
f"\n### {coin['Name']} (Liquidity Score: {coin['Volume/Market Cap Ratio']:.1%})"
|
||||
f"\n- **Current Price:** ${current_price:.2f}"
|
||||
f"\n- **Entry:** < ${entry:.2f} (Buy slight dips)"
|
||||
f"\n- **Stop-Loss:** ${stop_loss:.2f} (-6%)"
|
||||
f"\n- **Target:** ${take_profit:.2f} (+15%)"
|
||||
f"\n- **Hold Time:** 1-3 weeks"
|
||||
f"\n- **Key Metric:** Volume/Cap > 15%"
|
||||
)
|
||||
|
||||
# 3. Low-Risk Trades (Stable Momentum)
|
||||
if not low_risk.empty:
|
||||
report.append("\n## 🛡️ LOW RISK: Steady Gainers (5-15% Potential)")
|
||||
for i, coin in low_risk.iterrows():
|
||||
current_price = coin["Price"]
|
||||
entry = current_price * 0.99 # -1% dip
|
||||
stop_loss = current_price * 0.97 # -3%
|
||||
take_profit = current_price * 1.10 # +10%
|
||||
|
||||
report.append(
|
||||
f"\n### {coin['Name']} (Stability Score: {1/coin['Volatility Score']:.1f}x)"
|
||||
f"\n- **Current Price:** ${current_price:.2f}"
|
||||
f"\n- **Entry:** < ${entry:.2f} (Safe zone)"
|
||||
f"\n- **Stop-Loss:** ${stop_loss:.2f} (-3%)"
|
||||
f"\n- **Target:** ${take_profit:.2f} (+10%)"
|
||||
f"\n- **DCA Suggestion:** 3 buys over 72 hours"
|
||||
)
|
||||
|
||||
# Volume Anomaly Alert
|
||||
anomalies = df[df["Volume Anomaly"]].sort_values("Volume(24h)", ascending=False).head(2)
|
||||
if not anomalies.empty:
|
||||
report.append("\n⚠️ **Volume Spike Alerts**")
|
||||
for i, coin in anomalies.iterrows():
|
||||
report.append(
|
||||
f"- {coin['Name']}: Volume {coin['Volume(24h)']/1e6:.1f}M "
|
||||
f"(3x normal) | Price moved: {coin['24h %']:.1%}"
|
||||
)
|
||||
|
||||
# Pro Tip
|
||||
report.append(
|
||||
"\n📊 **Chart Hack:** Hide BTC in visuals:\n"
|
||||
"```python\n"
|
||||
"# For 3D Map:\n"
|
||||
"fig.update_traces(visible=False, selector={'name':'Bitcoin'})\n"
|
||||
"# For Treemap:\n"
|
||||
"df = df[df['Name'] != 'Bitcoin']\n"
|
||||
"```"
|
||||
)
|
||||
|
||||
return "\n".join(report)
|
||||
|
||||
def create_visuals(self, df: pd.DataFrame) -> dict:
|
||||
"""Enhanced visuals with BTC toggle support"""
|
||||
# 3D Market Map (with BTC toggle hint)
|
||||
fig1 = px.scatter_3d(
|
||||
df,
|
||||
x="Market Cap",
|
||||
y="Volume/Market Cap Ratio",
|
||||
z="Momentum Score",
|
||||
color="Name", # Color by name to allow toggling
|
||||
hover_name="Name",
|
||||
title="Market Map (Toggle BTC in legend to focus on alts)",
|
||||
log_x=True
|
||||
)
|
||||
fig1.update_traces(
|
||||
marker=dict(size=df["Volatility Score"]*100 + 5) # Dynamic sizing
|
||||
)
|
||||
|
||||
# Liquidity Tree (exclude BTC if too dominant)
|
||||
if df[df["Name"] == "BitcoinBTC"]["Market Cap"].values[0] > df["Market Cap"].median() * 10:
|
||||
df = df[df["Name"] != "BitcoinBTC"]
|
||||
|
||||
fig2 = px.treemap(
|
||||
df,
|
||||
path=["Name"],
|
||||
values="Market Cap",
|
||||
color="Volume/Market Cap Ratio",
|
||||
title="Liquidity Tree (BTC auto-removed if dominant)"
|
||||
)
|
||||
|
||||
return {"market_map": fig1, "liquidity_tree": fig2}
|
||||
|
||||
async def main():
|
||||
"""
|
||||
Main execution flow:
|
||||
1. Configure headless browser for scraping
|
||||
2. Extract live crypto market data
|
||||
3. Clean and analyze using hedge fund models
|
||||
4. Generate visualizations and insights
|
||||
5. Output professional trading report
|
||||
"""
|
||||
# Configure browser with anti-detection features
|
||||
browser_config = BrowserConfig(
|
||||
headless=False,
|
||||
)
|
||||
|
||||
# Initialize crawler with smart table detection
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
|
||||
try:
|
||||
# Set up scraping parameters
|
||||
crawl_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
table_score_threshold=8, # Strict table detection
|
||||
keep_data_attributes=True,
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
scan_full_page=True,
|
||||
scroll_delay=0.2,
|
||||
)
|
||||
|
||||
# # Execute market data extraction
|
||||
# results: List[CrawlResult] = await crawler.arun(
|
||||
# url="https://coinmarketcap.com/?page=1", config=crawl_config
|
||||
# )
|
||||
|
||||
# # Process results
|
||||
# raw_df = pd.DataFrame()
|
||||
# for result in results:
|
||||
# if result.success and result.media["tables"]:
|
||||
# # Extract primary market table
|
||||
# # DataFrame
|
||||
# raw_df = pd.DataFrame(
|
||||
# result.media["tables"][0]["rows"],
|
||||
# columns=result.media["tables"][0]["headers"],
|
||||
# )
|
||||
# break
|
||||
|
||||
|
||||
# This is for debugging only
|
||||
# ////// Remove this in production from here..
|
||||
# Save raw data for debugging
|
||||
# raw_df.to_csv(f"{__current_dir__}/tmp/raw_crypto_data.csv", index=False)
|
||||
# print("🔍 Raw data saved to 'raw_crypto_data.csv'")
|
||||
|
||||
# Read from file for debugging
|
||||
raw_df = pd.read_csv(f"{__current_dir__}/tmp/raw_crypto_data.csv")
|
||||
# ////// ..to here
|
||||
|
||||
# Select top 20
|
||||
raw_df = raw_df.head(50)
|
||||
# Remove "Buy" from name
|
||||
raw_df["Name"] = raw_df["Name"].str.replace("Buy", "")
|
||||
|
||||
# Initialize analysis engine
|
||||
analyzer = CryptoAlphaGenerator()
|
||||
clean_df = analyzer.clean_data(raw_df)
|
||||
analyzed_df = analyzer.calculate_metrics(clean_df)
|
||||
|
||||
# Generate outputs
|
||||
visuals = analyzer.create_visuals(analyzed_df)
|
||||
insights = analyzer.generate_insights(analyzed_df)
|
||||
|
||||
# Save visualizations
|
||||
visuals["market_map"].write_html(f"{__current_dir__}/tmp/market_map.html")
|
||||
visuals["liquidity_tree"].write_html(f"{__current_dir__}/tmp/liquidity_tree.html")
|
||||
|
||||
# Display results
|
||||
print("🔑 Key Trading Insights:")
|
||||
print(insights)
|
||||
print("\n📊 Open 'market_map.html' for interactive analysis")
|
||||
print("\n📊 Open 'liquidity_tree.html' for interactive analysis")
|
||||
|
||||
finally:
|
||||
await crawler.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
498
docs/examples/deepcrawl_example.py
Normal file
498
docs/examples/deepcrawl_example.py
Normal file
@@ -0,0 +1,498 @@
|
||||
import asyncio
|
||||
import time
|
||||
|
||||
from crawl4ai import CrawlerRunConfig, AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy
|
||||
from crawl4ai.deep_crawling import BFSDeepCrawlStrategy, BestFirstCrawlingStrategy
|
||||
from crawl4ai.deep_crawling.filters import (
|
||||
FilterChain,
|
||||
URLPatternFilter,
|
||||
DomainFilter,
|
||||
ContentTypeFilter,
|
||||
ContentRelevanceFilter,
|
||||
SEOFilter,
|
||||
)
|
||||
from crawl4ai.deep_crawling.scorers import (
|
||||
KeywordRelevanceScorer,
|
||||
)
|
||||
|
||||
|
||||
# 1️⃣ Basic Deep Crawl Setup
|
||||
async def basic_deep_crawl():
|
||||
"""
|
||||
PART 1: Basic Deep Crawl setup - Demonstrates a simple two-level deep crawl.
|
||||
|
||||
This function shows:
|
||||
- How to set up BFSDeepCrawlStrategy (Breadth-First Search)
|
||||
- Setting depth and domain parameters
|
||||
- Processing the results to show the hierarchy
|
||||
"""
|
||||
print("\n===== BASIC DEEP CRAWL SETUP =====")
|
||||
|
||||
# Configure a 2-level deep crawl using Breadth-First Search strategy
|
||||
# max_depth=2 means: initial page (depth 0) + 2 more levels
|
||||
# include_external=False means: only follow links within the same domain
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(max_depth=2, include_external=False),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=True, # Show progress during crawling
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
start_time = time.perf_counter()
|
||||
results = await crawler.arun(url="https://docs.crawl4ai.com", config=config)
|
||||
|
||||
# Group results by depth to visualize the crawl tree
|
||||
pages_by_depth = {}
|
||||
for result in results:
|
||||
depth = result.metadata.get("depth", 0)
|
||||
if depth not in pages_by_depth:
|
||||
pages_by_depth[depth] = []
|
||||
pages_by_depth[depth].append(result.url)
|
||||
|
||||
print(f"✅ Crawled {len(results)} pages total")
|
||||
|
||||
# Display crawl structure by depth
|
||||
for depth, urls in sorted(pages_by_depth.items()):
|
||||
print(f"\nDepth {depth}: {len(urls)} pages")
|
||||
# Show first 3 URLs for each depth as examples
|
||||
for url in urls[:3]:
|
||||
print(f" → {url}")
|
||||
if len(urls) > 3:
|
||||
print(f" ... and {len(urls) - 3} more")
|
||||
|
||||
print(
|
||||
f"\n✅ Performance: {len(results)} pages in {time.perf_counter() - start_time:.2f} seconds"
|
||||
)
|
||||
|
||||
# 2️⃣ Stream vs. Non-Stream Execution
|
||||
async def stream_vs_nonstream():
|
||||
"""
|
||||
PART 2: Demonstrates the difference between stream and non-stream execution.
|
||||
|
||||
Non-stream: Waits for all results before processing
|
||||
Stream: Processes results as they become available
|
||||
"""
|
||||
print("\n===== STREAM VS. NON-STREAM EXECUTION =====")
|
||||
|
||||
# Common configuration for both examples
|
||||
base_config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(max_depth=1, include_external=False),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# NON-STREAMING MODE
|
||||
print("\n📊 NON-STREAMING MODE:")
|
||||
print(" In this mode, all results are collected before being returned.")
|
||||
|
||||
non_stream_config = base_config.clone()
|
||||
non_stream_config.stream = False
|
||||
|
||||
start_time = time.perf_counter()
|
||||
results = await crawler.arun(
|
||||
url="https://docs.crawl4ai.com", config=non_stream_config
|
||||
)
|
||||
|
||||
print(f" ✅ Received all {len(results)} results at once")
|
||||
print(f" ✅ Total duration: {time.perf_counter() - start_time:.2f} seconds")
|
||||
|
||||
# STREAMING MODE
|
||||
print("\n📊 STREAMING MODE:")
|
||||
print(" In this mode, results are processed as they become available.")
|
||||
|
||||
stream_config = base_config.clone()
|
||||
stream_config.stream = True
|
||||
|
||||
start_time = time.perf_counter()
|
||||
result_count = 0
|
||||
first_result_time = None
|
||||
|
||||
async for result in await crawler.arun(
|
||||
url="https://docs.crawl4ai.com", config=stream_config
|
||||
):
|
||||
result_count += 1
|
||||
if result_count == 1:
|
||||
first_result_time = time.perf_counter() - start_time
|
||||
print(
|
||||
f" ✅ First result received after {first_result_time:.2f} seconds: {result.url}"
|
||||
)
|
||||
elif result_count % 5 == 0: # Show every 5th result for brevity
|
||||
print(f" → Result #{result_count}: {result.url}")
|
||||
|
||||
print(f" ✅ Total: {result_count} results")
|
||||
print(f" ✅ First result: {first_result_time:.2f} seconds")
|
||||
print(f" ✅ All results: {time.perf_counter() - start_time:.2f} seconds")
|
||||
print("\n🔍 Key Takeaway: Streaming allows processing results immediately")
|
||||
|
||||
# 3️⃣ Introduce Filters & Scorers
|
||||
async def filters_and_scorers():
|
||||
"""
|
||||
PART 3: Demonstrates the use of filters and scorers for more targeted crawling.
|
||||
|
||||
This function progressively adds:
|
||||
1. A single URL pattern filter
|
||||
2. Multiple filters in a chain
|
||||
3. Scorers for prioritizing pages
|
||||
"""
|
||||
print("\n===== FILTERS AND SCORERS =====")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# SINGLE FILTER EXAMPLE
|
||||
print("\n📊 EXAMPLE 1: SINGLE URL PATTERN FILTER")
|
||||
print(" Only crawl pages containing 'core' in the URL")
|
||||
|
||||
# Create a filter that only allows URLs with 'guide' in them
|
||||
url_filter = URLPatternFilter(patterns=["*core*"])
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(
|
||||
max_depth=1,
|
||||
include_external=False,
|
||||
filter_chain=FilterChain([url_filter]), # Single filter
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
results = await crawler.arun(url="https://docs.crawl4ai.com", config=config)
|
||||
|
||||
print(f" ✅ Crawled {len(results)} pages matching '*core*'")
|
||||
for result in results[:3]: # Show first 3 results
|
||||
print(f" → {result.url}")
|
||||
if len(results) > 3:
|
||||
print(f" ... and {len(results) - 3} more")
|
||||
|
||||
# MULTIPLE FILTERS EXAMPLE
|
||||
print("\n📊 EXAMPLE 2: MULTIPLE FILTERS IN A CHAIN")
|
||||
print(" Only crawl pages that:")
|
||||
print(" 1. Contain '2024' in the URL")
|
||||
print(" 2. Are from 'techcrunch.com'")
|
||||
print(" 3. Are of text/html or application/javascript content type")
|
||||
|
||||
# Create a chain of filters
|
||||
filter_chain = FilterChain(
|
||||
[
|
||||
URLPatternFilter(patterns=["*2024*"]),
|
||||
DomainFilter(
|
||||
allowed_domains=["techcrunch.com"],
|
||||
blocked_domains=["guce.techcrunch.com", "oidc.techcrunch.com"],
|
||||
),
|
||||
ContentTypeFilter(
|
||||
allowed_types=["text/html", "application/javascript"]
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(
|
||||
max_depth=1, include_external=False, filter_chain=filter_chain
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
results = await crawler.arun(url="https://techcrunch.com", config=config)
|
||||
|
||||
print(f" ✅ Crawled {len(results)} pages after applying all filters")
|
||||
for result in results[:3]:
|
||||
print(f" → {result.url}")
|
||||
if len(results) > 3:
|
||||
print(f" ... and {len(results) - 3} more")
|
||||
|
||||
# SCORERS EXAMPLE
|
||||
print("\n📊 EXAMPLE 3: USING A KEYWORD RELEVANCE SCORER")
|
||||
print(
|
||||
"Score pages based on relevance to keywords: 'crawl', 'example', 'async', 'configuration','javascript','css'"
|
||||
)
|
||||
|
||||
# Create a keyword relevance scorer
|
||||
keyword_scorer = KeywordRelevanceScorer(
|
||||
keywords=["crawl", "example", "async", "configuration","javascript","css"], weight=1
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BestFirstCrawlingStrategy(
|
||||
max_depth=1, include_external=False, url_scorer=keyword_scorer
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=True,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
results = []
|
||||
async for result in await crawler.arun(
|
||||
url="https://docs.crawl4ai.com", config=config
|
||||
):
|
||||
results.append(result)
|
||||
score = result.metadata.get("score")
|
||||
print(f" → Score: {score:.2f} | {result.url}")
|
||||
|
||||
print(f" ✅ Crawler prioritized {len(results)} pages by relevance score")
|
||||
print(" 🔍 Note: BestFirstCrawlingStrategy visits highest-scoring pages first")
|
||||
|
||||
# 4️⃣ Advanced Filters
|
||||
async def advanced_filters():
|
||||
"""
|
||||
PART 4: Demonstrates advanced filtering techniques for specialized crawling.
|
||||
|
||||
This function covers:
|
||||
- SEO filters
|
||||
- Text relevancy filtering
|
||||
- Combining advanced filters
|
||||
"""
|
||||
print("\n===== ADVANCED FILTERS =====")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# SEO FILTER EXAMPLE
|
||||
print("\n📊 EXAMPLE 1: SEO FILTERS")
|
||||
print(
|
||||
"Quantitative SEO quality assessment filter based searching keywords in the head section"
|
||||
)
|
||||
|
||||
seo_filter = SEOFilter(
|
||||
threshold=0.5, keywords=["dynamic", "interaction", "javascript"]
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(
|
||||
max_depth=1, filter_chain=FilterChain([seo_filter])
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
results = await crawler.arun(url="https://docs.crawl4ai.com", config=config)
|
||||
|
||||
print(f" ✅ Found {len(results)} pages with relevant keywords")
|
||||
for result in results:
|
||||
print(f" → {result.url}")
|
||||
|
||||
# ADVANCED TEXT RELEVANCY FILTER
|
||||
print("\n📊 EXAMPLE 2: ADVANCED TEXT RELEVANCY FILTER")
|
||||
|
||||
# More sophisticated content relevance filter
|
||||
relevance_filter = ContentRelevanceFilter(
|
||||
query="Interact with the web using your authentic digital identity",
|
||||
threshold=0.7,
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(
|
||||
max_depth=1, filter_chain=FilterChain([relevance_filter])
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
results = await crawler.arun(url="https://docs.crawl4ai.com", config=config)
|
||||
|
||||
print(f" ✅ Found {len(results)} pages")
|
||||
for result in results:
|
||||
relevance_score = result.metadata.get("relevance_score", 0)
|
||||
print(f" → Score: {relevance_score:.2f} | {result.url}")
|
||||
|
||||
# 5️⃣ Max Pages and Score Thresholds
|
||||
async def max_pages_and_thresholds():
|
||||
"""
|
||||
PART 5: Demonstrates using max_pages and score_threshold parameters with different strategies.
|
||||
|
||||
This function shows:
|
||||
- How to limit the number of pages crawled
|
||||
- How to set score thresholds for more targeted crawling
|
||||
- Comparing BFS, DFS, and Best-First strategies with these parameters
|
||||
"""
|
||||
print("\n===== MAX PAGES AND SCORE THRESHOLDS =====")
|
||||
|
||||
from crawl4ai.deep_crawling import DFSDeepCrawlStrategy
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Define a common keyword scorer for all examples
|
||||
keyword_scorer = KeywordRelevanceScorer(
|
||||
keywords=["browser", "crawler", "web", "automation"],
|
||||
weight=1.0
|
||||
)
|
||||
|
||||
# EXAMPLE 1: BFS WITH MAX PAGES
|
||||
print("\n📊 EXAMPLE 1: BFS STRATEGY WITH MAX PAGES LIMIT")
|
||||
print(" Limit the crawler to a maximum of 5 pages")
|
||||
|
||||
bfs_config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(
|
||||
max_depth=2,
|
||||
include_external=False,
|
||||
url_scorer=keyword_scorer,
|
||||
max_pages=5 # Only crawl 5 pages
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
results = await crawler.arun(url="https://docs.crawl4ai.com", config=bfs_config)
|
||||
|
||||
print(f" ✅ Crawled exactly {len(results)} pages as specified by max_pages")
|
||||
for result in results:
|
||||
depth = result.metadata.get("depth", 0)
|
||||
print(f" → Depth: {depth} | {result.url}")
|
||||
|
||||
# EXAMPLE 2: DFS WITH SCORE THRESHOLD
|
||||
print("\n📊 EXAMPLE 2: DFS STRATEGY WITH SCORE THRESHOLD")
|
||||
print(" Only crawl pages with a relevance score above 0.5")
|
||||
|
||||
dfs_config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=DFSDeepCrawlStrategy(
|
||||
max_depth=2,
|
||||
include_external=False,
|
||||
url_scorer=keyword_scorer,
|
||||
score_threshold=0.7, # Only process URLs with scores above 0.5
|
||||
max_pages=10
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
results = await crawler.arun(url="https://docs.crawl4ai.com", config=dfs_config)
|
||||
|
||||
print(f" ✅ Crawled {len(results)} pages with scores above threshold")
|
||||
for result in results:
|
||||
score = result.metadata.get("score", 0)
|
||||
depth = result.metadata.get("depth", 0)
|
||||
print(f" → Depth: {depth} | Score: {score:.2f} | {result.url}")
|
||||
|
||||
# EXAMPLE 3: BEST-FIRST WITH BOTH CONSTRAINTS
|
||||
print("\n📊 EXAMPLE 3: BEST-FIRST STRATEGY WITH BOTH CONSTRAINTS")
|
||||
print(" Limit to 7 pages with scores above 0.3, prioritizing highest scores")
|
||||
|
||||
bf_config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BestFirstCrawlingStrategy(
|
||||
max_depth=2,
|
||||
include_external=False,
|
||||
url_scorer=keyword_scorer,
|
||||
max_pages=7, # Limit to 7 pages total
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
results = []
|
||||
async for result in await crawler.arun(url="https://docs.crawl4ai.com", config=bf_config):
|
||||
results.append(result)
|
||||
score = result.metadata.get("score", 0)
|
||||
depth = result.metadata.get("depth", 0)
|
||||
print(f" → Depth: {depth} | Score: {score:.2f} | {result.url}")
|
||||
|
||||
print(f" ✅ Crawled {len(results)} high-value pages with scores above 0.3")
|
||||
if results:
|
||||
avg_score = sum(r.metadata.get('score', 0) for r in results) / len(results)
|
||||
print(f" ✅ Average score: {avg_score:.2f}")
|
||||
print(" 🔍 Note: BestFirstCrawlingStrategy visited highest-scoring pages first")
|
||||
|
||||
# 6️⃣ Wrap-Up and Key Takeaways
|
||||
async def wrap_up():
|
||||
"""
|
||||
PART 6: Wrap-Up and Key Takeaways
|
||||
|
||||
Summarize the key concepts learned in this tutorial.
|
||||
"""
|
||||
print("\n===== COMPLETE CRAWLER EXAMPLE =====")
|
||||
print("Combining filters, scorers, and streaming for an optimized crawl")
|
||||
|
||||
# Create a sophisticated filter chain
|
||||
filter_chain = FilterChain(
|
||||
[
|
||||
DomainFilter(
|
||||
allowed_domains=["docs.crawl4ai.com"],
|
||||
blocked_domains=["old.docs.crawl4ai.com"],
|
||||
),
|
||||
URLPatternFilter(patterns=["*core*", "*advanced*", "*blog*"]),
|
||||
ContentTypeFilter(allowed_types=["text/html"]),
|
||||
]
|
||||
)
|
||||
|
||||
# Create a composite scorer that combines multiple scoring strategies
|
||||
keyword_scorer = KeywordRelevanceScorer(
|
||||
keywords=["crawl", "example", "async", "configuration"], weight=0.7
|
||||
)
|
||||
# Set up the configuration
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=BestFirstCrawlingStrategy(
|
||||
max_depth=1,
|
||||
include_external=False,
|
||||
filter_chain=filter_chain,
|
||||
url_scorer=keyword_scorer,
|
||||
),
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
stream=True,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Execute the crawl
|
||||
results = []
|
||||
start_time = time.perf_counter()
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
async for result in await crawler.arun(
|
||||
url="https://docs.crawl4ai.com", config=config
|
||||
):
|
||||
results.append(result)
|
||||
score = result.metadata.get("score", 0)
|
||||
depth = result.metadata.get("depth", 0)
|
||||
print(f"→ Depth: {depth} | Score: {score:.2f} | {result.url}")
|
||||
|
||||
duration = time.perf_counter() - start_time
|
||||
|
||||
# Summarize the results
|
||||
print(f"\n✅ Crawled {len(results)} high-value pages in {duration:.2f} seconds")
|
||||
print(
|
||||
f"✅ Average score: {sum(r.metadata.get('score', 0) for r in results) / len(results):.2f}"
|
||||
)
|
||||
|
||||
# Group by depth
|
||||
depth_counts = {}
|
||||
for result in results:
|
||||
depth = result.metadata.get("depth", 0)
|
||||
depth_counts[depth] = depth_counts.get(depth, 0) + 1
|
||||
|
||||
print("\n📊 Pages crawled by depth:")
|
||||
for depth, count in sorted(depth_counts.items()):
|
||||
print(f" Depth {depth}: {count} pages")
|
||||
|
||||
|
||||
async def run_tutorial():
|
||||
"""
|
||||
Executes all tutorial sections in sequence.
|
||||
"""
|
||||
print("\n🚀 CRAWL4AI DEEP CRAWLING TUTORIAL 🚀")
|
||||
print("======================================")
|
||||
print("This tutorial will walk you through deep crawling techniques,")
|
||||
print("from basic to advanced, using the Crawl4AI library.")
|
||||
|
||||
# Define sections - uncomment to run specific parts during development
|
||||
tutorial_sections = [
|
||||
basic_deep_crawl,
|
||||
stream_vs_nonstream,
|
||||
filters_and_scorers,
|
||||
max_pages_and_thresholds,
|
||||
advanced_filters,
|
||||
wrap_up,
|
||||
]
|
||||
|
||||
for section in tutorial_sections:
|
||||
await section()
|
||||
|
||||
print("\n🎉 TUTORIAL COMPLETE! 🎉")
|
||||
print("You now have a comprehensive understanding of deep crawling with Crawl4AI.")
|
||||
print("For more information, check out https://docs.crawl4ai.com")
|
||||
|
||||
# Execute the tutorial when run directly
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(run_tutorial())
|
||||
@@ -39,7 +39,7 @@ async def memory_adaptive_with_rate_limit(urls, browser_config, run_config):
|
||||
start = time.perf_counter()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=70.0,
|
||||
memory_threshold_percent=95.0,
|
||||
max_session_permit=10,
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=(1.0, 2.0), max_delay=30.0, max_retries=2
|
||||
|
||||
1019
docs/examples/docker/demo_docker_api.py
Normal file
1019
docs/examples/docker/demo_docker_api.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user