Compare commits
300 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
260b9120c3 | ||
|
|
976ea52167 | ||
|
|
2d69bf2366 | ||
|
|
dee5fe9851 | ||
|
|
88697c4630 | ||
|
|
16b8d4945b | ||
|
|
d09c611d15 | ||
|
|
9247877037 | ||
|
|
2cec527a22 | ||
|
|
4b1309cbf2 | ||
|
|
8b6fe6a98f | ||
|
|
91463e34f1 | ||
|
|
1221be30a3 | ||
|
|
e363234172 | ||
|
|
3d09b6a221 | ||
|
|
2d6b19e1a2 | ||
|
|
ece9202b61 | ||
|
|
9d694da939 | ||
|
|
20c027b79c | ||
|
|
8ec12d7d68 | ||
|
|
c3370ec5da | ||
|
|
f3ae5a657c | ||
|
|
825c78a048 | ||
|
|
3865342c93 | ||
|
|
ac5f461d40 | ||
|
|
e8b4ac6046 | ||
|
|
051a6cf974 | ||
|
|
1c9464b988 | ||
|
|
6838901788 | ||
|
|
c110d459fb | ||
|
|
4d1975e0a7 | ||
|
|
82734a750c | ||
|
|
56fa4e1e42 | ||
|
|
ca3e33122e | ||
|
|
ae376f15fb | ||
|
|
72fbdac467 | ||
|
|
0857c7b448 | ||
|
|
07b4c1c0ed | ||
|
|
196dc79ec7 | ||
|
|
24b3da717a | ||
|
|
98acc4254d | ||
|
|
eac78c7993 | ||
|
|
da1bc0f7bf | ||
|
|
aa4f92f458 | ||
|
|
a96e05d4ae | ||
|
|
5c95fd92b4 | ||
|
|
4cb2a62551 | ||
|
|
5b4fad9e25 | ||
|
|
ea0ac25f38 | ||
|
|
7688aca7d6 | ||
|
|
a7215ad972 | ||
|
|
8e2403a7da | ||
|
|
318554e6bf | ||
|
|
c64979b8dd | ||
|
|
bfe21b29d4 | ||
|
|
e9d9a6ffe8 | ||
|
|
5313c71a0d | ||
|
|
d36ef3d424 | ||
|
|
4a4f613238 | ||
|
|
dc6a24618e | ||
|
|
74a7c6dbb6 | ||
|
|
67f65f958b | ||
|
|
78b6ba5cef | ||
|
|
3f019d34cc | ||
|
|
304260e484 | ||
|
|
704bd66b63 | ||
|
|
1acc162c18 | ||
|
|
553c97a0c1 | ||
|
|
bd66befcf0 | ||
|
|
3e769a9c6c | ||
|
|
19b0a5ae82 | ||
|
|
bd71f7f4ea | ||
|
|
171ce25ba6 | ||
|
|
6c5a44f774 | ||
|
|
5c3c05bf93 | ||
|
|
67d0999bc3 | ||
|
|
553a4622bf | ||
|
|
6f81ef006d | ||
|
|
a04870a662 | ||
|
|
f7d26390c5 | ||
|
|
141783fb2d | ||
|
|
2fedd4876e | ||
|
|
e187b0aaf0 | ||
|
|
e95374d7c6 | ||
|
|
8f2d0cda2f | ||
|
|
9d261d2b9c | ||
|
|
7792fe0e4c | ||
|
|
86259244e4 | ||
|
|
0ec593fa90 | ||
|
|
7391d6be73 | ||
|
|
e4e23065f1 | ||
|
|
fb33a24891 | ||
|
|
78768fd714 | ||
|
|
f2d9912697 | ||
|
|
9a4ed6bbd7 | ||
|
|
d5ed451299 | ||
|
|
bacbeb3ed4 | ||
|
|
84b311760f | ||
|
|
8fbc2e0463 | ||
|
|
849765712f | ||
|
|
393bb911c0 | ||
|
|
4a5f1aebee | ||
|
|
a11d9646e3 | ||
|
|
ed7bc1909c | ||
|
|
e9e5b5642d | ||
|
|
7524aa7b5e | ||
|
|
7af1d32ef6 | ||
|
|
399af801a1 | ||
|
|
4a72c5ea6e | ||
|
|
20d6f5fdf4 | ||
|
|
3d69715dba | ||
|
|
de1766d565 | ||
|
|
0982c639ae | ||
|
|
5188b7a6a0 | ||
|
|
759164831d | ||
|
|
5431fa2d0c | ||
|
|
e130fd8db9 | ||
|
|
ded554d334 | ||
|
|
2d31915f0a | ||
|
|
ba3e808802 | ||
|
|
e3488da194 | ||
|
|
740214e021 | ||
|
|
c51e901f68 | ||
|
|
8c611dcb4b | ||
|
|
a45b8b1eb1 | ||
|
|
56f82f3e7f | ||
|
|
486db3a771 | ||
|
|
b02544bc0b | ||
|
|
e9639ad189 | ||
|
|
95a4f74d2a | ||
|
|
293f299c08 | ||
|
|
80d58ad24c | ||
|
|
3e83893b3f | ||
|
|
8c76a8c7dc | ||
|
|
0780db55e1 | ||
|
|
1ed7c15118 | ||
|
|
569bdb6073 | ||
|
|
1def53b7fe | ||
|
|
f9c98a377d | ||
|
|
93bf3e8a1f | ||
|
|
d202f3539b | ||
|
|
12e73d4898 | ||
|
|
449dd7cc0b | ||
|
|
b0419edda6 | ||
|
|
c0e87abaee | ||
|
|
c8485776fe | ||
|
|
aa3e2d0fe6 | ||
|
|
98c64f9d5f | ||
|
|
7d81c17cca | ||
|
|
652d396a81 | ||
|
|
1d83c493af | ||
|
|
cf35cbe59e | ||
|
|
9221c08418 | ||
|
|
48d43c14b1 | ||
|
|
776efa74a4 | ||
|
|
b14e83f499 | ||
|
|
a9b6b65238 | ||
|
|
a036b7f122 | ||
|
|
0bccf23db3 | ||
|
|
0cbd594512 | ||
|
|
efe93a5f57 | ||
|
|
3fda66b85b | ||
|
|
ddfb6707b4 | ||
|
|
a69f7a9531 | ||
|
|
d583aa43ca | ||
|
|
3abb573142 | ||
|
|
d556dada9f | ||
|
|
ce7d49484f | ||
|
|
e4acd18429 | ||
|
|
c2d4784810 | ||
|
|
76bea6c577 | ||
|
|
3ff0b0b2c4 | ||
|
|
a1c7dc17ce | ||
|
|
24723b2f10 | ||
|
|
f998e9e949 | ||
|
|
73661f7d1f | ||
|
|
b5d4db07d1 | ||
|
|
c6a022132b | ||
|
|
195c0ccf8a | ||
|
|
b09a86c0c1 | ||
|
|
de43505ae4 | ||
|
|
d7c5b900b8 | ||
|
|
edad7b6a74 | ||
|
|
829a1f7992 | ||
|
|
d729aa7d5e | ||
|
|
0d0cef3438 | ||
|
|
d7a112fefe | ||
|
|
a5decaa7cf | ||
|
|
8dea3f470f | ||
|
|
e02935dc5b | ||
|
|
24ad2fe2dd | ||
|
|
571dda6549 | ||
|
|
006bee4a5a | ||
|
|
dbb751c8f0 | ||
|
|
3439f7886d | ||
|
|
d418a04602 | ||
|
|
7047422e48 | ||
|
|
2bdec1fa5a | ||
|
|
b654c49e55 | ||
|
|
f2cb7d506d | ||
|
|
a6dad3fc6d | ||
|
|
fbcff85ecb | ||
|
|
788c67c29a | ||
|
|
2f19d38693 | ||
|
|
3aae30ed2a | ||
|
|
593c7ad307 | ||
|
|
73658c758a | ||
|
|
b6af94cbbb | ||
|
|
852729ff38 | ||
|
|
152ac35bc2 | ||
|
|
df63a40606 | ||
|
|
a59c107b23 | ||
|
|
f9fe6f89fe | ||
|
|
2a82455b3d | ||
|
|
3a524a3bdd | ||
|
|
3a66aa8a60 | ||
|
|
4b45b28f25 | ||
|
|
9139ef3125 | ||
|
|
6360d0545a | ||
|
|
1961adb530 | ||
|
|
79feab89c4 | ||
|
|
5d0b13294c | ||
|
|
67edc2d641 | ||
|
|
6b569cceb5 | ||
|
|
6f2fe5954f | ||
|
|
fca1319b7d | ||
|
|
f77f06a3bd | ||
|
|
e62c807295 | ||
|
|
90df6921b7 | ||
|
|
5098442086 | ||
|
|
d0014c6793 | ||
|
|
ae7ebc0bd8 | ||
|
|
1f269f9834 | ||
|
|
7f1ae5adcf | ||
|
|
3d00fee6c2 | ||
|
|
17913f5acf | ||
|
|
c38ac29edb | ||
|
|
38044d4afe | ||
|
|
61b93ebf36 | ||
|
|
bf91adf3f8 | ||
|
|
00026b5f8b | ||
|
|
8c22396d8b | ||
|
|
b6d6631b12 | ||
|
|
a098483cbb | ||
|
|
f9a297e08d | ||
|
|
bcdd80911f | ||
|
|
b120965b6a | ||
|
|
16f918621f | ||
|
|
f7574230a1 | ||
|
|
2879344d9c | ||
|
|
9f5eef1f38 | ||
|
|
c5aa1bec18 | ||
|
|
b51263664e | ||
|
|
1e7db0d293 | ||
|
|
2a54f3c048 | ||
|
|
1c20b815b3 | ||
|
|
43a2b26f63 | ||
|
|
3cf19a1bc2 | ||
|
|
67a23c3182 | ||
|
|
796dbaf08c | ||
|
|
3a3c88a2d0 | ||
|
|
870296fa7e | ||
|
|
a28046c233 | ||
|
|
0bba0e074f | ||
|
|
c4c6227962 | ||
|
|
e6c914d2fa | ||
|
|
be8f4fc59a | ||
|
|
fbdf870fbf | ||
|
|
7b0cca41b4 | ||
|
|
33d0e9ec8c | ||
|
|
42f1c67ca8 | ||
|
|
e28c49a8fe | ||
|
|
54d5a3a259 | ||
|
|
de6b43f334 | ||
|
|
07f508bd0c | ||
|
|
62a86dbe8d | ||
|
|
492ada0ed4 | ||
|
|
d8eef02867 | ||
|
|
6c7235d6a7 | ||
|
|
0a09d78fa5 | ||
|
|
19c3f3efb2 | ||
|
|
e97e8df6ba | ||
|
|
cb6f5323ae | ||
|
|
47464cedec | ||
|
|
982d203d91 | ||
|
|
9307c19f35 | ||
|
|
605a82793b | ||
|
|
df9ee44d42 | ||
|
|
e9f7d5e73a | ||
|
|
3529c2e732 | ||
|
|
d9e0b7abab | ||
|
|
b2800fefc6 | ||
|
|
d913e20edc | ||
|
|
c2a71a5abe | ||
|
|
d61615e0b0 | ||
|
|
ac9d83c72f | ||
|
|
ff9149b5c9 | ||
|
|
32f57c49d6 | ||
|
|
a5f627ba1a | ||
|
|
dbb587d681 |
12
.gitattributes
vendored
Normal file
12
.gitattributes
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Documentation
|
||||
*.html linguist-documentation
|
||||
docs/* linguist-documentation
|
||||
docs/examples/* linguist-documentation
|
||||
docs/md_v2/* linguist-documentation
|
||||
|
||||
# Explicitly mark Python as the main language
|
||||
*.py linguist-detectable=true
|
||||
*.py linguist-language=Python
|
||||
|
||||
# Exclude HTML from language statistics
|
||||
*.html linguist-detectable=false
|
||||
26
.gitignore
vendored
26
.gitignore
vendored
@@ -199,11 +199,35 @@ test_env/
|
||||
**/.DS_Store
|
||||
|
||||
todo.md
|
||||
todo_executor.md
|
||||
git_changes.py
|
||||
git_changes.md
|
||||
pypi_build.sh
|
||||
git_issues.py
|
||||
git_issues.md
|
||||
|
||||
.next/
|
||||
.tests/
|
||||
.issues/
|
||||
# .issues/
|
||||
.docs/
|
||||
.issues/
|
||||
.gitboss/
|
||||
todo_executor.md
|
||||
protect-all-except-feature.sh
|
||||
manage-collab.sh
|
||||
publish.sh
|
||||
combine.sh
|
||||
combined_output.txt
|
||||
.local
|
||||
.scripts
|
||||
tree.md
|
||||
tree.md
|
||||
.scripts
|
||||
.local
|
||||
.do
|
||||
/plans
|
||||
.codeiumignore
|
||||
todo/
|
||||
|
||||
# windsurf rules
|
||||
.windsurfrules
|
||||
|
||||
821
CHANGELOG.md
821
CHANGELOG.md
@@ -1,8 +1,797 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to Crawl4AI will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
Okay, here's a detailed changelog in Markdown format, generated from the provided git diff and commit history. I've focused on user-facing changes, fixes, and features, and grouped them as requested:
|
||||
|
||||
## Version 0.4.3 (2025-01-21)
|
||||
|
||||
This release introduces several powerful new features, including robots.txt compliance, dynamic proxy support, LLM-powered schema generation, and improved documentation.
|
||||
|
||||
### Features
|
||||
|
||||
- **Robots.txt Compliance:**
|
||||
- Added robots.txt compliance support with efficient SQLite-based caching.
|
||||
- New `check_robots_txt` parameter in `CrawlerRunConfig` to enable robots.txt checking before crawling a URL.
|
||||
- Automated robots.txt checking is now integrated into `AsyncWebCrawler` with 403 status codes for blocked URLs.
|
||||
|
||||
- **Proxy Configuration:**
|
||||
- Added proxy configuration support to `CrawlerRunConfig`, allowing dynamic proxy settings per crawl request.
|
||||
- Updated documentation with examples for using proxy configuration in crawl operations.
|
||||
|
||||
- **LLM-Powered Schema Generation:**
|
||||
- Introduced a new utility for automatic CSS and XPath schema generation using OpenAI or Ollama models.
|
||||
- Added comprehensive documentation and examples for schema generation.
|
||||
- New prompt templates optimized for HTML schema analysis.
|
||||
|
||||
- **URL Redirection Tracking:**
|
||||
- Added URL redirection tracking to capture the final URL after any redirects.
|
||||
- The final URL is now available in the `redirected_url` field of the `AsyncCrawlResponse` object.
|
||||
|
||||
- **Enhanced Streamlined Documentation:**
|
||||
- Refactored and improved the documentation structure for clarity and ease of use.
|
||||
- Added detailed explanations of new features and updated examples.
|
||||
|
||||
- **Improved Browser Context Management:**
|
||||
- Enhanced the management of browser contexts and added shared data support.
|
||||
- Introduced the `shared_data` parameter in `CrawlerRunConfig` to pass data between hooks.
|
||||
|
||||
- **Memory Dispatcher System:**
|
||||
- Migrated to a memory dispatcher system with enhanced monitoring capabilities.
|
||||
- Introduced `MemoryAdaptiveDispatcher` and `SemaphoreDispatcher` for improved resource management.
|
||||
- Added `RateLimiter` for rate limiting support.
|
||||
- New `CrawlerMonitor` for real-time monitoring of crawler operations.
|
||||
|
||||
- **Streaming Support:**
|
||||
- Added streaming support for processing crawled URLs as they are processed.
|
||||
- Enabled streaming mode with the `stream` parameter in `CrawlerRunConfig`.
|
||||
|
||||
- **Content Scraping Strategy:**
|
||||
- Introduced a new `LXMLWebScrapingStrategy` for faster content scraping.
|
||||
- Added support for selecting the scraping strategy via the `scraping_strategy` parameter in `CrawlerRunConfig`.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **Browser Path Management:**
|
||||
- Improved browser path management for consistent behavior across different environments.
|
||||
|
||||
- **Memory Threshold:**
|
||||
- Adjusted the default memory threshold to improve resource utilization.
|
||||
|
||||
- **Pydantic Model Fields:**
|
||||
- Made several model fields optional with default values to improve flexibility.
|
||||
|
||||
### Refactor
|
||||
|
||||
- **Documentation Structure:**
|
||||
- Reorganized documentation structure to improve navigation and readability.
|
||||
- Updated styles and added new sections for advanced features.
|
||||
|
||||
- **Scraping Mode:**
|
||||
- Replaced the `ScrapingMode` enum with a strategy pattern for more flexible content scraping.
|
||||
|
||||
- **Version Update:**
|
||||
- Updated the version to `0.4.248`.
|
||||
|
||||
- **Code Cleanup:**
|
||||
- Removed unused files and improved type hints.
|
||||
- Applied Ruff corrections for code quality.
|
||||
|
||||
- **Updated dependencies:**
|
||||
- Updated dependencies to their latest versions to ensure compatibility and security.
|
||||
|
||||
- **Ignored certain patterns and directories:**
|
||||
- Updated `.gitignore` and `.codeiumignore` to ignore additional patterns and directories, streamlining the development environment.
|
||||
|
||||
- **Simplified Personal Story in README:**
|
||||
- Streamlined the personal story and project vision in the `README.md` for clarity.
|
||||
|
||||
- **Removed Deprecated Files:**
|
||||
- Deleted several deprecated files and examples that are no longer relevant.
|
||||
|
||||
---
|
||||
**Previous Releases:**
|
||||
|
||||
### 0.4.24x (2024-12-31)
|
||||
- **Enhanced SSL & Security**: New SSL certificate handling with custom paths and validation options for secure crawling.
|
||||
- **Smart Content Filtering**: Advanced filtering system with regex support and efficient chunking strategies.
|
||||
- **Improved JSON Extraction**: Support for complex JSONPath, JSON-CSS, and Microdata extraction.
|
||||
- **New Field Types**: Added `computed`, `conditional`, `aggregate`, and `template` field types.
|
||||
- **Performance Boost**: Optimized caching, parallel processing, and memory management.
|
||||
- **Better Error Handling**: Enhanced debugging capabilities with detailed error tracking.
|
||||
- **Security Features**: Improved input validation and safe expression evaluation.
|
||||
|
||||
### 0.4.247 (2025-01-06)
|
||||
|
||||
#### Added
|
||||
- **Windows Event Loop Configuration**: Introduced a utility function `configure_windows_event_loop` to resolve `NotImplementedError` for asyncio subprocesses on Windows. ([#utils.py](crawl4ai/utils.py), [#tutorials/async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||
- **`page_need_scroll` Method**: Added a method to determine if a page requires scrolling before taking actions in `AsyncPlaywrightCrawlerStrategy`. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||
|
||||
#### Changed
|
||||
- **Version Bump**: Updated the version from `0.4.246` to `0.4.247`. ([#__version__.py](crawl4ai/__version__.py))
|
||||
- **Improved Scrolling Logic**: Enhanced scrolling methods in `AsyncPlaywrightCrawlerStrategy` by adding a `scroll_delay` parameter for better control. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||
- **Markdown Generation Example**: Updated the `hello_world.py` example to reflect the latest API changes and better illustrate features. ([#examples/hello_world.py](docs/examples/hello_world.py))
|
||||
- **Documentation Update**:
|
||||
- Added Windows-specific instructions for handling asyncio event loops. ([#async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||
|
||||
#### Removed
|
||||
- **Legacy Markdown Generation Code**: Removed outdated and unused code for markdown generation in `content_scraping_strategy.py`. ([#content_scraping_strategy.py](crawl4ai/content_scraping_strategy.py))
|
||||
|
||||
#### Fixed
|
||||
- **Page Closing to Prevent Memory Leaks**:
|
||||
- **Description**: Added a `finally` block to ensure pages are closed when no `session_id` is provided.
|
||||
- **Impact**: Prevents memory leaks caused by lingering pages after a crawl.
|
||||
- **File**: [`async_crawler_strategy.py`](crawl4ai/async_crawler_strategy.py)
|
||||
- **Code**:
|
||||
```python
|
||||
finally:
|
||||
# If no session_id is given we should close the page
|
||||
if not config.session_id:
|
||||
await page.close()
|
||||
```
|
||||
- **Multiple Element Selection**: Modified `_get_elements` in `JsonCssExtractionStrategy` to return all matching elements instead of just the first one, ensuring comprehensive extraction. ([#extraction_strategy.py](crawl4ai/extraction_strategy.py))
|
||||
- **Error Handling in Scrolling**: Added robust error handling to ensure scrolling proceeds safely even if a configuration is missing. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||
|
||||
#### Other
|
||||
- **Git Ignore Update**: Added `/plans` to `.gitignore` for better development environment consistency. ([#.gitignore](.gitignore))
|
||||
|
||||
|
||||
## [0.4.24] - 2024-12-31
|
||||
|
||||
### Added
|
||||
- **Browser and SSL Handling**
|
||||
- SSL certificate validation options in extraction strategies
|
||||
- Custom certificate paths support
|
||||
- Configurable certificate validation skipping
|
||||
- Enhanced response status code handling with retry logic
|
||||
|
||||
- **Content Processing**
|
||||
- New content filtering system with regex support
|
||||
- Advanced chunking strategies for large content
|
||||
- Memory-efficient parallel processing
|
||||
- Configurable chunk size optimization
|
||||
|
||||
- **JSON Extraction**
|
||||
- Complex JSONPath expression support
|
||||
- JSON-CSS and Microdata extraction
|
||||
- RDFa parsing capabilities
|
||||
- Advanced data transformation pipeline
|
||||
|
||||
- **Field Types**
|
||||
- New field types: `computed`, `conditional`, `aggregate`, `template`
|
||||
- Field inheritance system
|
||||
- Reusable field definitions
|
||||
- Custom validation rules
|
||||
|
||||
### Changed
|
||||
- **Performance**
|
||||
- Optimized selector compilation with caching
|
||||
- Improved HTML parsing efficiency
|
||||
- Enhanced memory management for large documents
|
||||
- Batch processing optimizations
|
||||
|
||||
- **Error Handling**
|
||||
- More detailed error messages and categorization
|
||||
- Enhanced debugging capabilities
|
||||
- Improved performance metrics tracking
|
||||
- Better error recovery mechanisms
|
||||
|
||||
### Deprecated
|
||||
- Old field computation method using `eval`
|
||||
- Direct browser manipulation without proper SSL handling
|
||||
- Simple text-based content filtering
|
||||
|
||||
### Removed
|
||||
- Legacy extraction patterns without proper error handling
|
||||
- Unsafe eval-based field computation
|
||||
- Direct DOM manipulation without sanitization
|
||||
|
||||
### Fixed
|
||||
- Memory leaks in large document processing
|
||||
- SSL certificate validation issues
|
||||
- Incorrect handling of nested JSON structures
|
||||
- Performance bottlenecks in parallel processing
|
||||
|
||||
### Security
|
||||
- Improved input validation and sanitization
|
||||
- Safe expression evaluation system
|
||||
- Enhanced resource protection
|
||||
- Rate limiting implementation
|
||||
|
||||
## [0.4.1] - 2024-12-08
|
||||
|
||||
### **File: `crawl4ai/async_crawler_strategy.py`**
|
||||
|
||||
#### **New Parameters and Attributes Added**
|
||||
- **`text_mode` (boolean)**: Enables text-only mode, disables images, JavaScript, and GPU-related features for faster, minimal rendering.
|
||||
- **`light_mode` (boolean)**: Optimizes the browser by disabling unnecessary background processes and features for efficiency.
|
||||
- **`viewport_width` and `viewport_height`**: Dynamically adjusts based on `text_mode` mode (default values: 800x600 for `text_mode`, 1920x1080 otherwise).
|
||||
- **`extra_args`**: Adds browser-specific flags for `text_mode` mode.
|
||||
- **`adjust_viewport_to_content`**: Dynamically adjusts the viewport to the content size for accurate rendering.
|
||||
|
||||
#### **Browser Context Adjustments**
|
||||
- Added **`viewport` adjustments**: Dynamically computed based on `text_mode` or custom configuration.
|
||||
- Enhanced support for `light_mode` and `text_mode` by adding specific browser arguments to reduce resource consumption.
|
||||
|
||||
#### **Dynamic Content Handling**
|
||||
- **Full Page Scan Feature**:
|
||||
- Scrolls through the entire page while dynamically detecting content changes.
|
||||
- Ensures scrolling stops when no new dynamic content is loaded.
|
||||
|
||||
#### **Session Management**
|
||||
- Added **`create_session`** method:
|
||||
- Creates a new browser session and assigns a unique ID.
|
||||
- Supports persistent and non-persistent contexts with full compatibility for cookies, headers, and proxies.
|
||||
|
||||
#### **Improved Content Loading and Adjustment**
|
||||
- **`adjust_viewport_to_content`**:
|
||||
- Automatically adjusts viewport to match content dimensions.
|
||||
- Includes scaling via Chrome DevTools Protocol (CDP).
|
||||
- Enhanced content loading:
|
||||
- Waits for images to load and ensures network activity is idle before proceeding.
|
||||
|
||||
#### **Error Handling and Logging**
|
||||
- Improved error handling and detailed logging for:
|
||||
- Viewport adjustment (`adjust_viewport_to_content`).
|
||||
- Full page scanning (`scan_full_page`).
|
||||
- Dynamic content loading.
|
||||
|
||||
#### **Refactoring and Cleanup**
|
||||
- Removed hardcoded viewport dimensions in multiple places, replaced with dynamic values (`self.viewport_width`, `self.viewport_height`).
|
||||
- Removed commented-out and unused code for better readability.
|
||||
- Added default value for `delay_before_return_html` parameter.
|
||||
|
||||
#### **Optimizations**
|
||||
- Reduced resource usage in `light_mode` by disabling unnecessary browser features such as extensions, background timers, and sync.
|
||||
- Improved compatibility for different browser types (`chrome`, `firefox`, `webkit`).
|
||||
|
||||
---
|
||||
|
||||
### **File: `docs/examples/quickstart_async.py`**
|
||||
|
||||
#### **Schema Adjustment**
|
||||
- Changed schema reference for `LLMExtractionStrategy`:
|
||||
- **Old**: `OpenAIModelFee.schema()`
|
||||
- **New**: `OpenAIModelFee.model_json_schema()`
|
||||
- This likely ensures better compatibility with the `OpenAIModelFee` class and its JSON schema.
|
||||
|
||||
#### **Documentation Comments Updated**
|
||||
- Improved extraction instruction for schema-based LLM strategies.
|
||||
|
||||
---
|
||||
|
||||
### **New Features Added**
|
||||
1. **Text-Only Mode**:
|
||||
- Focuses on minimal resource usage by disabling non-essential browser features.
|
||||
2. **Light Mode**:
|
||||
- Optimizes browser for performance by disabling background tasks and unnecessary services.
|
||||
3. **Full Page Scanning**:
|
||||
- Ensures the entire content of a page is crawled, including dynamic elements loaded during scrolling.
|
||||
4. **Dynamic Viewport Adjustment**:
|
||||
- Automatically resizes the viewport to match content dimensions, improving compatibility and rendering accuracy.
|
||||
5. **Session Management**:
|
||||
- Simplifies session handling with better support for persistent and non-persistent contexts.
|
||||
|
||||
---
|
||||
|
||||
### **Bug Fixes**
|
||||
- Fixed potential viewport mismatches by ensuring consistent use of `self.viewport_width` and `self.viewport_height` throughout the code.
|
||||
- Improved robustness of dynamic content loading to avoid timeouts and failed evaluations.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## [0.3.75] December 1, 2024
|
||||
|
||||
### PruningContentFilter
|
||||
|
||||
#### 1. Introduced PruningContentFilter (Dec 01, 2024) (Dec 01, 2024)
|
||||
A new content filtering strategy that removes less relevant nodes based on metrics like text and link density.
|
||||
|
||||
**Affected Files:**
|
||||
- `crawl4ai/content_filter_strategy.py`: Enhancement of content filtering capabilities.
|
||||
```diff
|
||||
Implemented effective pruning algorithm with comprehensive scoring.
|
||||
```
|
||||
- `README.md`: Improved documentation regarding new features.
|
||||
```diff
|
||||
Updated to include usage and explanation for the PruningContentFilter.
|
||||
```
|
||||
- `docs/md_v2/basic/content_filtering.md`: Expanded documentation for users.
|
||||
```diff
|
||||
Added detailed section explaining the PruningContentFilter.
|
||||
```
|
||||
|
||||
#### 2. Added Unit Tests for PruningContentFilter (Dec 01, 2024) (Dec 01, 2024)
|
||||
Comprehensive tests added to ensure correct functionality of PruningContentFilter
|
||||
|
||||
**Affected Files:**
|
||||
- `tests/async/test_content_filter_prune.py`: Increased test coverage for content filtering strategies.
|
||||
```diff
|
||||
Created test cases for various scenarios using the PruningContentFilter.
|
||||
```
|
||||
|
||||
### Development Updates
|
||||
|
||||
#### 3. Enhanced BM25ContentFilter tests (Dec 01, 2024) (Dec 01, 2024)
|
||||
Extended testing to cover additional edge cases and performance metrics.
|
||||
|
||||
**Affected Files:**
|
||||
- `tests/async/test_content_filter_bm25.py`: Improved reliability and performance assurance.
|
||||
```diff
|
||||
Added tests for new extraction scenarios including malformed HTML.
|
||||
```
|
||||
|
||||
### Infrastructure & Documentation
|
||||
|
||||
#### 4. Updated Examples (Dec 01, 2024) (Dec 01, 2024)
|
||||
Altered examples in documentation to promote the use of PruningContentFilter alongside existing strategies.
|
||||
|
||||
**Affected Files:**
|
||||
- `docs/examples/quickstart_async.py`: Enhanced usability and clarity for new users.
|
||||
- Revised example to illustrate usage of PruningContentFilter.
|
||||
|
||||
## [0.3.746] November 29, 2024
|
||||
|
||||
### Major Features
|
||||
1. Enhanced Docker Support (Nov 29, 2024)
|
||||
- Improved GPU support in Docker images.
|
||||
- Dockerfile refactored for better platform-specific installations.
|
||||
- Introduced new Docker commands for different platforms:
|
||||
- `basic-amd64`, `all-amd64`, `gpu-amd64` for AMD64.
|
||||
- `basic-arm64`, `all-arm64`, `gpu-arm64` for ARM64.
|
||||
|
||||
### Infrastructure & Documentation
|
||||
- Enhanced README.md to improve user guidance and installation instructions.
|
||||
- Added installation instructions for Playwright setup in README.
|
||||
- Created and updated examples in `docs/examples/quickstart_async.py` to be more useful and user-friendly.
|
||||
- Updated `requirements.txt` with a new `pydantic` dependency.
|
||||
- Bumped version number in `crawl4ai/__version__.py` to 0.3.746.
|
||||
|
||||
### Breaking Changes
|
||||
- Streamlined application structure:
|
||||
- Removed static pages and related code from `main.py` which might affect existing deployments relying on static content.
|
||||
|
||||
### Development Updates
|
||||
- Developed `post_install` method in `crawl4ai/install.py` to streamline post-installation setup tasks.
|
||||
- Refined migration processes in `crawl4ai/migrations.py` with enhanced logging for better error visibility.
|
||||
- Updated `docker-compose.yml` to support local and hub services for different architectures, enhancing build and deploy capabilities.
|
||||
- Refactored example test cases in `docs/examples/docker_example.py` to facilitate comprehensive testing.
|
||||
|
||||
### README.md
|
||||
Updated README with new docker commands and setup instructions.
|
||||
Enhanced installation instructions and guidance.
|
||||
|
||||
### crawl4ai/install.py
|
||||
Added post-install script functionality.
|
||||
Introduced `post_install` method for automation of post-installation tasks.
|
||||
|
||||
### crawl4ai/migrations.py
|
||||
Improved migration logging.
|
||||
Refined migration processes and added better logging.
|
||||
|
||||
### docker-compose.yml
|
||||
Refactored docker-compose for better service management.
|
||||
Updated to define services for different platforms and versions.
|
||||
|
||||
### requirements.txt
|
||||
Updated dependencies.
|
||||
Added `pydantic` to requirements file.
|
||||
|
||||
### crawler/__version__.py
|
||||
Updated version number.
|
||||
Bumped version number to 0.3.746.
|
||||
|
||||
### docs/examples/quickstart_async.py
|
||||
Enhanced example scripts.
|
||||
Uncommented example usage in async guide for user functionality.
|
||||
|
||||
### main.py
|
||||
Refactored code to improve maintainability.
|
||||
Streamlined app structure by removing static pages code.
|
||||
|
||||
## [0.3.743] November 27, 2024
|
||||
|
||||
Enhance features and documentation
|
||||
- Updated version to 0.3.743
|
||||
- Improved ManagedBrowser configuration with dynamic host/port
|
||||
- Implemented fast HTML formatting in web crawler
|
||||
- Enhanced markdown generation with a new generator class
|
||||
- Improved sanitization and utility functions
|
||||
- Added contributor details and pull request acknowledgments
|
||||
- Updated documentation for clearer usage scenarios
|
||||
- Adjusted tests to reflect class name changes
|
||||
|
||||
### CONTRIBUTORS.md
|
||||
Added new contributors and pull request details.
|
||||
Updated community contributions and acknowledged pull requests.
|
||||
|
||||
### crawl4ai/__version__.py
|
||||
Version update.
|
||||
Bumped version to 0.3.743.
|
||||
|
||||
### crawl4ai/async_crawler_strategy.py
|
||||
Improved ManagedBrowser configuration.
|
||||
Enhanced browser initialization with configurable host and debugging port; improved hook execution.
|
||||
|
||||
### crawl4ai/async_webcrawler.py
|
||||
Optimized HTML processing.
|
||||
Implemented 'fast_format_html' for optimized HTML formatting; applied it when 'prettiify' is enabled.
|
||||
|
||||
### crawl4ai/content_scraping_strategy.py
|
||||
Enhanced markdown generation strategy.
|
||||
Updated to use DefaultMarkdownGenerator and improved markdown generation with filters option.
|
||||
|
||||
### crawl4ai/markdown_generation_strategy.py
|
||||
Refactored markdown generation class.
|
||||
Renamed DefaultMarkdownGenerationStrategy to DefaultMarkdownGenerator; added content filter handling.
|
||||
|
||||
### crawl4ai/utils.py
|
||||
Enhanced utility functions.
|
||||
Improved input sanitization and enhanced HTML formatting method.
|
||||
|
||||
### docs/md_v2/advanced/hooks-auth.md
|
||||
Improved documentation for hooks.
|
||||
Updated code examples to include cookies in crawler strategy initialization.
|
||||
|
||||
### tests/async/test_markdown_genertor.py
|
||||
Refactored tests to match class renaming.
|
||||
Updated tests to use renamed DefaultMarkdownGenerator class.
|
||||
|
||||
## [0.3.74] November 17, 2024
|
||||
|
||||
This changelog details the updates and changes introduced in Crawl4AI version 0.3.74. It's designed to inform developers about new features, modifications to existing components, removals, and other important information.
|
||||
|
||||
### 1. File Download Processing
|
||||
|
||||
- Users can now specify download folders using the `downloads_path` parameter in the `AsyncWebCrawler` constructor or the `arun` method. If not specified, downloads are saved to a "downloads" folder within the `.crawl4ai` directory.
|
||||
- File download tracking is integrated into the `CrawlResult` object. Successfully downloaded files are listed in the `downloaded_files` attribute, providing their paths.
|
||||
- Added `accept_downloads` parameter to the crawler strategies (defaults to `False`). If set to True you can add JS code and `wait_for` parameter for file download.
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
import os
|
||||
from pathlib import Path
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def download_example():
|
||||
downloads_path = os.path.join(Path.home(), ".crawl4ai", "downloads")
|
||||
os.makedirs(downloads_path, exist_ok=True)
|
||||
|
||||
async with AsyncWebCrawler(
|
||||
accept_downloads=True,
|
||||
downloads_path=downloads_path,
|
||||
verbose=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.python.org/downloads/",
|
||||
js_code="""
|
||||
const downloadLink = document.querySelector('a[href$=".exe"]');
|
||||
if (downloadLink) { downloadLink.click(); }
|
||||
""",
|
||||
wait_for=5 # To ensure download has started
|
||||
)
|
||||
|
||||
if result.downloaded_files:
|
||||
print("Downloaded files:")
|
||||
for file in result.downloaded_files:
|
||||
print(f"- {file}")
|
||||
|
||||
asyncio.run(download_example())
|
||||
|
||||
```
|
||||
|
||||
### 2. Refined Content Filtering
|
||||
|
||||
- Introduced the `RelevanceContentFilter` strategy (and its implementation `BM25ContentFilter`) for extracting relevant content from web pages, replacing Fit Markdown and other content cleaning strategy. This new strategy leverages the BM25 algorithm to identify chunks of text relevant to the page's title, description, keywords, or a user-provided query.
|
||||
- The `fit_markdown` flag in the content scraper is used to filter content based on title, meta description, and keywords.
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.content_filter_strategy import BM25ContentFilter
|
||||
|
||||
async def filter_content(url, query):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
content_filter = BM25ContentFilter(user_query=query)
|
||||
result = await crawler.arun(url=url, extraction_strategy=content_filter, fit_markdown=True)
|
||||
print(result.extracted_content) # Or result.fit_markdown for the markdown version
|
||||
print(result.fit_html) # Or result.fit_html to show HTML with only the filtered content
|
||||
|
||||
asyncio.run(filter_content("https://en.wikipedia.org/wiki/Apple", "fruit nutrition health"))
|
||||
```
|
||||
|
||||
### 3. Raw HTML and Local File Support
|
||||
|
||||
- Added support for crawling local files and raw HTML content directly.
|
||||
- Use the `file://` prefix for local file paths.
|
||||
- Use the `raw:` prefix for raw HTML strings.
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
async def crawl_local_or_raw(crawler, content, content_type):
|
||||
prefix = "file://" if content_type == "local" else "raw:"
|
||||
url = f"{prefix}{content}"
|
||||
result = await crawler.arun(url=url)
|
||||
if result.success:
|
||||
print(f"Markdown Content from {content_type.title()} Source:")
|
||||
print(result.markdown)
|
||||
|
||||
# Example usage with local file and raw HTML
|
||||
async def main():
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Local File
|
||||
await crawl_local_or_raw(
|
||||
crawler, os.path.abspath('tests/async/sample_wikipedia.html'), "local"
|
||||
)
|
||||
# Raw HTML
|
||||
await crawl_raw_html(crawler, "<h1>Raw Test</h1><p>This is raw HTML.</p>")
|
||||
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### 4. Browser Management
|
||||
|
||||
- New asynchronous crawler strategy implemented using Playwright.
|
||||
- `ManagedBrowser` class introduced for improved browser session handling, offering features like persistent browser sessions between requests (using `session_id` parameter) and browser process monitoring.
|
||||
- Updated to tf-playwright-stealth for enhanced stealth capabilities.
|
||||
- Added `use_managed_browser`, `use_persistent_context`, and `chrome_channel` parameters to AsyncPlaywrightCrawlerStrategy.
|
||||
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
async def browser_management_demo():
|
||||
user_data_dir = os.path.join(Path.home(), ".crawl4ai", "user-data-dir")
|
||||
os.makedirs(user_data_dir, exist_ok=True) # Ensure directory exists
|
||||
async with AsyncWebCrawler(
|
||||
use_managed_browser=True,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
verbose=True
|
||||
) as crawler:
|
||||
result1 = await crawler.arun(
|
||||
url="https://example.com", session_id="my_session"
|
||||
)
|
||||
result2 = await crawler.arun(
|
||||
url="https://example.com/anotherpage", session_id="my_session"
|
||||
)
|
||||
|
||||
asyncio.run(browser_management_demo())
|
||||
```
|
||||
|
||||
|
||||
### 5. API Server & Cache Improvements
|
||||
|
||||
- Added CORS support to API server.
|
||||
- Implemented static file serving.
|
||||
- Enhanced root redirect functionality.
|
||||
- Cache database updated to store response headers and downloaded files information. It utilizes a file system approach to manage large content efficiently.
|
||||
- New, more efficient caching database built using xxhash and file system approach.
|
||||
- Introduced `CacheMode` enum (`ENABLED`, `DISABLED`, `READ_ONLY`, `WRITE_ONLY`, `BYPASS`) and `always_bypass_cache` parameter in AsyncWebCrawler for fine-grained cache control. This replaces `bypass_cache`, `no_cache_read`, `no_cache_write`, and `always_by_pass_cache`.
|
||||
|
||||
|
||||
### 🗑️ Removals
|
||||
|
||||
- Removed deprecated: `crawl4ai/content_cleaning_strategy.py`.
|
||||
- Removed internal class ContentCleaningStrategy
|
||||
- Removed legacy cache control flags: `bypass_cache`, `disable_cache`, `no_cache_read`, `no_cache_write`, and `always_by_pass_cache`. These have been superseded by `cache_mode`.
|
||||
|
||||
|
||||
### ⚙️ Other Changes
|
||||
|
||||
- Moved version file to `crawl4ai/__version__.py`.
|
||||
- Added `crawl4ai/cache_context.py`.
|
||||
- Added `crawl4ai/version_manager.py`.
|
||||
- Added `crawl4ai/migrations.py`.
|
||||
- Added `crawl4ai-migrate` entry point.
|
||||
- Added config `NEED_MIGRATION` and `SHOW_DEPRECATION_WARNINGS`.
|
||||
- API server now requires an API token for authentication, configurable with the `CRAWL4AI_API_TOKEN` environment variable. This enhances API security.
|
||||
- Added synchronous crawl endpoint `/crawl_sync` for immediate result retrieval, and direct crawl endpoint `/crawl_direct` bypassing the task queue.
|
||||
|
||||
|
||||
### ⚠️ Deprecation Notices
|
||||
|
||||
- The synchronous version of `WebCrawler` is being phased out. While still available via `crawl4ai[sync]`, it will eventually be removed. Transition to `AsyncWebCrawler` is strongly recommended. Boolean cache control flags in `arun` are also deprecated, migrate to using the `cache_mode` parameter. See examples in the "New Features" section above for correct usage.
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Resolved issue with browser context closing unexpectedly in Docker. This significantly improves stability, particularly within containerized environments.
|
||||
- Fixed memory leaks associated with incorrect asynchronous cleanup by removing the `__del__` method and ensuring the browser context is closed explicitly using context managers.
|
||||
- Improved error handling in `WebScrapingStrategy`. More detailed error messages and suggestions for debugging will minimize frustration when running into unexpected issues.
|
||||
- Fixed issue with incorrect text parsing in specific HTML structures.
|
||||
|
||||
|
||||
### Example of migrating to the new CacheMode:
|
||||
|
||||
**Old way:**
|
||||
|
||||
```python
|
||||
crawler = AsyncWebCrawler(always_by_pass_cache=True)
|
||||
result = await crawler.arun(url="https://example.com", bypass_cache=True)
|
||||
```
|
||||
|
||||
**New way:**
|
||||
|
||||
```python
|
||||
from crawl4ai import CacheMode
|
||||
|
||||
crawler = AsyncWebCrawler(always_bypass_cache=True)
|
||||
result = await crawler.arun(url="https://example.com", cache_mode=CacheMode.BYPASS)
|
||||
```
|
||||
|
||||
|
||||
## [0.3.74] - November 13, 2024
|
||||
|
||||
1. **File Download Processing** (Nov 14, 2024)
|
||||
- Added capability for users to specify download folders
|
||||
- Implemented file download tracking in crowd result object
|
||||
- Created new file: `tests/async/test_async_doanloader.py`
|
||||
|
||||
2. **Content Filtering Improvements** (Nov 14, 2024)
|
||||
- Introduced Relevance Content Filter as an improvement over Fit Markdown
|
||||
- Implemented BM25 algorithm for content relevance matching
|
||||
- Added new file: `crawl4ai/content_filter_strategy.py`
|
||||
- Removed deprecated: `crawl4ai/content_cleaning_strategy.py`
|
||||
|
||||
3. **Local File and Raw HTML Support** (Nov 13, 2024)
|
||||
- Added support for processing local files
|
||||
- Implemented raw HTML input handling in AsyncWebCrawler
|
||||
- Enhanced `crawl4ai/async_webcrawler.py` with significant performance improvements
|
||||
|
||||
4. **Browser Management Enhancements** (Nov 12, 2024)
|
||||
- Implemented new async crawler strategy using Playwright
|
||||
- Introduced ManagedBrowser for better browser session handling
|
||||
- Added support for persistent browser sessions
|
||||
- Updated from playwright_stealth to tf-playwright-stealth
|
||||
|
||||
5. **API Server Component**
|
||||
- Added CORS support
|
||||
- Implemented static file serving
|
||||
- Enhanced root redirect functionality
|
||||
|
||||
|
||||
|
||||
## [0.3.731] - November 13, 2024
|
||||
|
||||
### Added
|
||||
- Support for raw HTML and local file crawling via URL prefixes ('raw:', 'file://')
|
||||
- Browser process monitoring for managed browser instances
|
||||
- Screenshot capability for raw HTML and local file content
|
||||
- Response headers storage in cache database
|
||||
- New `fit_markdown` flag for optional markdown generation
|
||||
|
||||
### Changed
|
||||
- Switched HTML parser from 'html.parser' to 'lxml' for ~4x performance improvement
|
||||
- Optimized BeautifulSoup text conversion and element selection
|
||||
- Pre-compiled regular expressions for better performance
|
||||
- Improved metadata extraction efficiency
|
||||
- Response headers now stored alongside HTML in cache
|
||||
|
||||
### Removed
|
||||
- `__del__` method from AsyncPlaywrightCrawlerStrategy to prevent async cleanup issues
|
||||
|
||||
### Fixed
|
||||
- Issue #256: Added support for crawling raw HTML content
|
||||
- Issue #253: Implemented file:// protocol handling
|
||||
- Missing response headers in cached results
|
||||
- Memory leaks from improper async cleanup
|
||||
|
||||
## [v0.3.731] - 2024-11-13 Changelog for Issue 256 Fix
|
||||
- Fixed: Browser context unexpectedly closing in Docker environment during crawl operations.
|
||||
- Removed: __del__ method from AsyncPlaywrightCrawlerStrategy to prevent unreliable asynchronous cleanup, ensuring - browser context is closed explicitly within context managers.
|
||||
- Added: Monitoring for ManagedBrowser subprocess to detect and log unexpected terminations.
|
||||
- Updated: Dockerfile configurations to expose debugging port (9222) and allocate additional shared memory for improved browser stability.
|
||||
- Improved: Error handling and resource cleanup processes for browser lifecycle management within the Docker environment.
|
||||
|
||||
## [v0.3.73] - 2024-11-05
|
||||
|
||||
### Major Features
|
||||
- **New Doctor Feature**
|
||||
- Added comprehensive system diagnostics tool
|
||||
- Available through package hub and CLI
|
||||
- Provides automated troubleshooting and system health checks
|
||||
- Includes detailed reporting of configuration issues
|
||||
|
||||
- **Dockerized API Server**
|
||||
- Released complete Docker implementation for API server
|
||||
- Added comprehensive documentation for Docker deployment
|
||||
- Implemented container communication protocols
|
||||
- Added environment configuration guides
|
||||
|
||||
- **Managed Browser Integration**
|
||||
- Added support for user-controlled browser instances
|
||||
- Implemented `ManagedBrowser` class for better browser lifecycle management
|
||||
- Added ability to connect to existing Chrome DevTools Protocol (CDP) endpoints
|
||||
- Introduced user data directory support for persistent browser profiles
|
||||
|
||||
- **Enhanced HTML Processing**
|
||||
- Added HTML tag preservation feature during markdown conversion
|
||||
- Introduced configurable tag preservation system
|
||||
- Improved pre-tag and code block handling
|
||||
- Added support for nested preserved tags with attribute retention
|
||||
|
||||
### Improvements
|
||||
- **Browser Handling**
|
||||
- Added flag to ignore body visibility for problematic pages
|
||||
- Improved browser process cleanup and management
|
||||
- Enhanced temporary directory handling for browser profiles
|
||||
- Added configurable browser launch arguments
|
||||
|
||||
- **Database Management**
|
||||
- Implemented connection pooling for better performance
|
||||
- Added retry logic for database operations
|
||||
- Improved error handling and logging
|
||||
- Enhanced cleanup procedures for database connections
|
||||
|
||||
- **Resource Management**
|
||||
- Added memory and CPU monitoring
|
||||
- Implemented dynamic task slot allocation based on system resources
|
||||
- Added configurable cleanup intervals
|
||||
|
||||
### Technical Improvements
|
||||
- **Code Structure**
|
||||
- Moved version management to dedicated _version.py file
|
||||
- Improved error handling throughout the codebase
|
||||
- Enhanced logging system with better error reporting
|
||||
- Reorganized core components for better maintainability
|
||||
|
||||
### Bug Fixes
|
||||
- Fixed issues with browser process termination
|
||||
- Improved handling of connection timeouts
|
||||
- Enhanced error recovery in database operations
|
||||
- Fixed memory leaks in long-running processes
|
||||
|
||||
### Dependencies
|
||||
- Updated Playwright to v1.47
|
||||
- Updated core dependencies with more flexible version constraints
|
||||
- Added new development dependencies for testing
|
||||
|
||||
### Breaking Changes
|
||||
- Changed default browser handling behavior
|
||||
- Modified database connection management approach
|
||||
- Updated API response structure for better consistency
|
||||
|
||||
### Migration Guide
|
||||
When upgrading to v0.3.73, be aware of the following changes:
|
||||
|
||||
1. Docker Deployment:
|
||||
- Review Docker documentation for new deployment options
|
||||
- Update environment configurations as needed
|
||||
- Check container communication settings
|
||||
|
||||
2. If using custom browser management:
|
||||
- Update browser initialization code to use new ManagedBrowser class
|
||||
- Review browser cleanup procedures
|
||||
|
||||
3. For database operations:
|
||||
- Check custom database queries for compatibility with new connection pooling
|
||||
- Update error handling to work with new retry logic
|
||||
|
||||
4. Using the Doctor:
|
||||
- Run doctor command for system diagnostics: `crawl4ai doctor`
|
||||
- Review generated reports for potential issues
|
||||
- Follow recommended fixes for any identified problems
|
||||
|
||||
|
||||
## [v0.3.73] - 2024-11-04
|
||||
This commit introduces several key enhancements, including improved error handling and robust database operations in `async_database.py`, which now features a connection pool and retry logic for better reliability. Updates to the README.md provide clearer instructions and a better user experience with links to documentation sections. The `.gitignore` file has been refined to include additional directories, while the async web crawler now utilizes a managed browser for more efficient crawling. Furthermore, multiple dependency updates and introduction of the `CustomHTML2Text` class enhance text extraction capabilities.
|
||||
|
||||
## [v0.3.73] - 2024-10-24
|
||||
|
||||
### Added
|
||||
- preserve_tags: Added support for preserving specific HTML tags during markdown conversion.
|
||||
- Smart overlay removal system in AsyncPlaywrightCrawlerStrategy:
|
||||
- Automatic removal of popups, modals, and cookie notices
|
||||
- Detection and removal of fixed/sticky position elements
|
||||
@@ -84,7 +873,7 @@
|
||||
## [v0.3.72] - 2024-10-20
|
||||
|
||||
### Fixed
|
||||
- Added support for parsing Base64 encoded images in WebScrappingStrategy
|
||||
- Added support for parsing Base64 encoded images in WebScrapingStrategy
|
||||
|
||||
### Added
|
||||
- Forked and integrated a customized version of the html2text library for more control over Markdown generation
|
||||
@@ -107,7 +896,7 @@
|
||||
### Developer Notes
|
||||
- The customized html2text library is now located within the crawl4ai package
|
||||
- New configuration options are available in the `config.py` file for external content handling
|
||||
- The `WebScrappingStrategy` class has been updated to accommodate new external content exclusion options
|
||||
- The `WebScrapingStrategy` class has been updated to accommodate new external content exclusion options
|
||||
|
||||
## [v0.3.71] - 2024-10-19
|
||||
|
||||
@@ -121,7 +910,7 @@
|
||||
- Improved `AsyncPlaywrightCrawlerStrategy.close()` method to use a shorter sleep time (0.5 seconds instead of 500), significantly reducing wait time when closing the crawler.
|
||||
- Enhanced flexibility in `CosineStrategy`:
|
||||
- Now uses a more generic `load_HF_embedding_model` function, allowing for easier swapping of embedding models.
|
||||
- Updated `JsonCssExtractionStrategy` and `JsonXPATHExtractionStrategy` for better JSON-based extraction.
|
||||
- Updated `JsonCssExtractionStrategy` and `JsonXPathExtractionStrategy` for better JSON-based extraction.
|
||||
|
||||
### Fixed
|
||||
- Addressed potential issues with the sliding window chunking strategy to ensure all text is properly chunked.
|
||||
@@ -184,7 +973,7 @@ These updates aim to provide more flexibility in text processing, improve perfor
|
||||
|
||||
### Improvements
|
||||
1. **Better Error Handling**:
|
||||
- Enhanced error reporting in WebScrappingStrategy with detailed error messages and suggestions.
|
||||
- Enhanced error reporting in WebScrapingStrategy with detailed error messages and suggestions.
|
||||
- Added console message and error logging for better debugging.
|
||||
|
||||
2. **Image Processing Enhancements**:
|
||||
@@ -242,43 +1031,43 @@ These updates aim to provide more flexibility in text processing, improve perfor
|
||||
- Allows retrieval of content after a specified delay, useful for dynamically loaded content.
|
||||
- **How to use**: Access `result.get_delayed_content(delay_in_seconds)` after crawling.
|
||||
|
||||
## Improvements and Optimizations
|
||||
### Improvements and Optimizations
|
||||
|
||||
### 1. AsyncWebCrawler Enhancements
|
||||
#### 1. AsyncWebCrawler Enhancements
|
||||
- **Flexible Initialization**: Now accepts arbitrary keyword arguments, passed directly to the crawler strategy.
|
||||
- Allows for more customized setups.
|
||||
|
||||
### 2. Image Processing Optimization
|
||||
- Enhanced image handling in WebScrappingStrategy.
|
||||
#### 2. Image Processing Optimization
|
||||
- Enhanced image handling in WebScrapingStrategy.
|
||||
- Added filtering for small, invisible, or irrelevant images.
|
||||
- Improved image scoring system for better content relevance.
|
||||
- Implemented JavaScript-based image dimension updating for more accurate representation.
|
||||
|
||||
### 3. Database Schema Auto-updates
|
||||
#### 3. Database Schema Auto-updates
|
||||
- Automatic database schema updates ensure compatibility with the latest version.
|
||||
|
||||
### 4. Enhanced Error Handling and Logging
|
||||
#### 4. Enhanced Error Handling and Logging
|
||||
- Improved error messages and logging for easier debugging.
|
||||
|
||||
### 5. Content Extraction Refinements
|
||||
#### 5. Content Extraction Refinements
|
||||
- Refined HTML sanitization process.
|
||||
- Improved handling of base64 encoded images.
|
||||
- Enhanced Markdown conversion process.
|
||||
- Optimized content extraction algorithms.
|
||||
|
||||
### 6. Utility Function Enhancements
|
||||
#### 6. Utility Function Enhancements
|
||||
- `perform_completion_with_backoff` function now supports additional arguments for more customized API calls to LLM providers.
|
||||
|
||||
## Bug Fixes
|
||||
### Bug Fixes
|
||||
- Fixed an issue where image tags were being prematurely removed during content extraction.
|
||||
|
||||
## Examples and Documentation
|
||||
### Examples and Documentation
|
||||
- Updated `quickstart_async.py` with examples of:
|
||||
- Using custom headers in LLM extraction.
|
||||
- Different LLM provider usage (OpenAI, Hugging Face, Ollama).
|
||||
- Custom browser type usage.
|
||||
|
||||
## Developer Notes
|
||||
### Developer Notes
|
||||
- Refactored code for better maintainability, flexibility, and performance.
|
||||
- Enhanced type hinting throughout the codebase for improved development experience.
|
||||
- Expanded error handling for more robust operation.
|
||||
@@ -392,6 +1181,6 @@ These changes focus on refining the existing codebase, resulting in a more stabl
|
||||
- Maintaining the semantic context of inline tags (e.g., abbreviation, DEL, INS) for improved LLM-friendliness.
|
||||
- Updated Dockerfile to ensure compatibility across multiple platforms (Hopefully!).
|
||||
|
||||
## [0.2.4] - 2024-06-17
|
||||
## [v0.2.4] - 2024-06-17
|
||||
### Fixed
|
||||
- Fix issue #22: Use MD5 hash for caching HTML files to handle long URLs
|
||||
|
||||
@@ -10,11 +10,21 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
||||
|
||||
## Community Contributors
|
||||
|
||||
- [aadityakanjolia4](https://github.com/aadityakanjolia4) - Fix for `CustomHTML2Text` is not defined.
|
||||
- [FractalMind](https://github.com/FractalMind) - Created the first official Docker Hub image and fixed Dockerfile errors
|
||||
- [ketonkss4](https://github.com/ketonkss4) - Identified Selenium's new capabilities, helping reduce dependencies
|
||||
- [jonymusky](https://github.com/jonymusky) - Javascript execution documentation, and wait_for
|
||||
- [datehoer](https://github.com/datehoer) - Add browser prxy support
|
||||
|
||||
## Pull Requests
|
||||
|
||||
- [dvschuyl](https://github.com/dvschuyl) - AsyncPlaywrightCrawlerStrategy page-evaluate context destroyed by navigation [#304](https://github.com/unclecode/crawl4ai/pull/304)
|
||||
- [nelzomal](https://github.com/nelzomal) - Enhance development installation instructions [#286](https://github.com/unclecode/crawl4ai/pull/286)
|
||||
- [HamzaFarhan](https://github.com/HamzaFarhan) - Handled the cases where markdown_with_citations, references_markdown, and filtered_html might not be defined [#293](https://github.com/unclecode/crawl4ai/pull/293)
|
||||
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
|
||||
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
|
||||
|
||||
|
||||
## Other Contributors
|
||||
|
||||
- [Gokhan](https://github.com/gkhngyk)
|
||||
|
||||
136
Dockerfile
Normal file
136
Dockerfile
Normal file
@@ -0,0 +1,136 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Other build arguments
|
||||
ARG PYTHON_VERSION=3.10
|
||||
|
||||
# Base stage with system dependencies
|
||||
FROM python:${PYTHON_VERSION}-slim as base
|
||||
|
||||
# Declare ARG variables again within the build stage
|
||||
ARG INSTALL_TYPE=all
|
||||
ARG ENABLE_GPU=false
|
||||
|
||||
# Platform-specific labels
|
||||
LABEL maintainer="unclecode"
|
||||
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
|
||||
LABEL version="1.0"
|
||||
|
||||
# Environment setup
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_DEFAULT_TIMEOUT=100 \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
wget \
|
||||
gnupg \
|
||||
git \
|
||||
cmake \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
libjpeg-dev \
|
||||
libpng-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Playwright system dependencies for Linux
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libglib2.0-0 \
|
||||
libnss3 \
|
||||
libnspr4 \
|
||||
libatk1.0-0 \
|
||||
libatk-bridge2.0-0 \
|
||||
libcups2 \
|
||||
libdrm2 \
|
||||
libdbus-1-3 \
|
||||
libxcb1 \
|
||||
libxkbcommon0 \
|
||||
libx11-6 \
|
||||
libxcomposite1 \
|
||||
libxdamage1 \
|
||||
libxext6 \
|
||||
libxfixes3 \
|
||||
libxrandr2 \
|
||||
libgbm1 \
|
||||
libpango-1.0-0 \
|
||||
libcairo2 \
|
||||
libasound2 \
|
||||
libatspi2.0-0 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# GPU support if enabled and architecture is supported
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
nvidia-cuda-toolkit \
|
||||
&& rm -rf /var/lib/apt/lists/* ; \
|
||||
else \
|
||||
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
|
||||
fi
|
||||
|
||||
# Create and set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the entire project
|
||||
COPY . .
|
||||
|
||||
# Install base requirements
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Install required library for FastAPI
|
||||
RUN pip install fastapi uvicorn psutil
|
||||
|
||||
# Install ML dependencies first for better layer caching
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install --no-cache-dir \
|
||||
torch \
|
||||
torchvision \
|
||||
torchaudio \
|
||||
scikit-learn \
|
||||
nltk \
|
||||
transformers \
|
||||
tokenizers && \
|
||||
python -m nltk.downloader punkt stopwords ; \
|
||||
fi
|
||||
|
||||
# Install the package
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install ".[all]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
|
||||
pip install ".[torch]" ; \
|
||||
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
|
||||
pip install ".[transformer]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
else \
|
||||
pip install "." ; \
|
||||
fi
|
||||
|
||||
# Install MkDocs and required plugins
|
||||
RUN pip install --no-cache-dir \
|
||||
mkdocs \
|
||||
mkdocs-material \
|
||||
mkdocs-terminal \
|
||||
pymdown-extensions
|
||||
|
||||
# Build MkDocs documentation
|
||||
RUN mkdocs build
|
||||
|
||||
# Install Playwright and browsers
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
playwright install chromium; \
|
||||
elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
playwright install chromium; \
|
||||
fi
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000 11235 9222 8080
|
||||
|
||||
# Start the FastAPI server
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "11235"]
|
||||
@@ -1 +1,2 @@
|
||||
include requirements.txt
|
||||
include requirements.txt
|
||||
recursive-include crawl4ai/js_snippet *.js
|
||||
46
MISSION.md
Normal file
46
MISSION.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Mission
|
||||
|
||||

|
||||
|
||||
### 1. The Data Capitalization Opportunity
|
||||
|
||||
We live in an unprecedented era of digital wealth creation. Every day, individuals and enterprises generate massive amounts of valuable digital footprints across various platforms, social media channels, messenger apps, and cloud services. While people can interact with their data within these platforms, there's an immense untapped opportunity to transform this data into true capital assets. Just as physical property became a foundational element of wealth creation, personal and enterprise data has the potential to become a new form of capital on balance sheets.
|
||||
|
||||
For individuals, this represents an opportunity to transform their digital activities into valuable assets. For enterprises, their internal communications, team discussions, and collaborative documents contain rich insights that could be structured and valued as intellectual capital. This wealth of information represents an unprecedented opportunity for value creation in the digital age.
|
||||
|
||||
### 2. The Potential of Authentic Data
|
||||
|
||||
While synthetic data has played a crucial role in AI development, there's an enormous untapped potential in the authentic data generated by individuals and organizations. Every message, document, and interaction contains unique insights and patterns that could enhance AI development. The challenge isn't a lack of data - it's that most authentic human-generated data remains inaccessible for productive use.
|
||||
|
||||
By enabling willing participation in data sharing, we can unlock this vast reservoir of authentic human knowledge. This represents an opportunity to enhance AI development with diverse, real-world data that reflects the full spectrum of human experience and knowledge.
|
||||
|
||||
## Our Pathway to Data Democracy
|
||||
|
||||
### 1. Open-Source Foundation
|
||||
|
||||
Our first step is creating an open-source data extraction engine that empowers developers and innovators to build tools for data structuring and organization. This foundation ensures transparency, security, and community-driven development. By making these tools openly available, we enable the technical infrastructure needed for true data ownership and capitalization.
|
||||
|
||||
### 2. Data Capitalization Platform
|
||||
|
||||
Building on this open-source foundation, we're developing a platform that helps individuals and enterprises transform their digital footprints into structured, valuable assets. This platform will provide the tools and frameworks needed to organize, understand, and value personal and organizational data as true capital assets.
|
||||
|
||||
### 3. Creating a Data Marketplace
|
||||
|
||||
The final piece is establishing a marketplace where individuals and organizations can willingly share their data assets. This creates opportunities for:
|
||||
- Individuals to earn equity, revenue, or other forms of value from their data
|
||||
- Enterprises to access diverse, high-quality data for AI development
|
||||
- Researchers to work with authentic human-generated data
|
||||
- Startups to build innovative solutions using real-world data
|
||||
|
||||
## Economic Vision: A Shared Data Economy
|
||||
|
||||
We envision a future where data becomes a fundamental asset class in a thriving shared economy. This transformation will democratize AI development by enabling willing participation in data sharing, ensuring that the benefits of AI advancement flow back to data creators. Just as property rights revolutionized economic systems, establishing data as a capital asset will create new opportunities for wealth creation and economic participation.
|
||||
|
||||
This shared data economy will:
|
||||
- Enable individuals to capitalize on their digital footprints
|
||||
- Create new revenue streams for data creators
|
||||
- Provide AI developers with access to diverse, authentic data
|
||||
- Foster innovation through broader access to real-world data
|
||||
- Ensure more equitable distribution of AI's economic benefits
|
||||
|
||||
Our vision is to facilitate this transformation from the ground up - starting with open-source tools, progressing to data capitalization platforms, and ultimately creating a thriving marketplace where data becomes a true asset class in a shared economy. This approach ensures that the future of AI is built on a foundation of authentic human knowledge, with benefits flowing back to the individuals and organizations who create and share their valuable data.
|
||||
718
README.md
718
README.md
@@ -1,248 +1,409 @@
|
||||
# Crawl4AI (Async Version) 🕷️🤖
|
||||
# 🚀🤖 Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper.
|
||||
|
||||
<div align="center">
|
||||
|
||||
<a href="https://trendshift.io/repositories/11716" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11716" alt="unclecode%2Fcrawl4ai | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
[](https://github.com/unclecode/crawl4ai/stargazers)
|
||||
[](https://github.com/unclecode/crawl4ai/network/members)
|
||||
[](https://github.com/unclecode/crawl4ai/issues)
|
||||
[](https://github.com/unclecode/crawl4ai/pulls)
|
||||
|
||||
[](https://badge.fury.io/py/crawl4ai)
|
||||
[](https://pypi.org/project/crawl4ai/)
|
||||
[](https://pepy.tech/project/crawl4ai)
|
||||
|
||||
<!-- [](https://crawl4ai.readthedocs.io/) -->
|
||||
[](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
|
||||
[](https://github.com/psf/black)
|
||||
[](https://github.com/PyCQA/bandit)
|
||||
|
||||
Crawl4AI simplifies asynchronous web crawling and data extraction, making it accessible for large language models (LLMs) and AI applications. 🆓🌐
|
||||
</div>
|
||||
|
||||
## New in 0.3.72 ✨
|
||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
||||
|
||||
- 📄 Fit markdown generation for extracting main article content.
|
||||
- 🪄 Magic mode for comprehensive anti-bot detection bypass.
|
||||
- 🌐 Enhanced multi-browser support with seamless switching (Chromium, Firefox, WebKit)
|
||||
- 📚 New chunking strategies(Sliding window, Overlapping window, Flexible size control)
|
||||
- 💾 Improved caching system for better performance
|
||||
- ⚡ Optimized batch processing with automatic rate limiting
|
||||
[✨ Check out latest update v0.4.3b1x](#-recent-updates)
|
||||
|
||||
🎉 **Version 0.4.3b1 is out!** This release brings exciting new features like a Memory Dispatcher System, Streaming Support, LLM-Powered Markdown Generation, Schema Generation, and Robots.txt Compliance! [Read the release notes →](https://docs.crawl4ai.com/blog)
|
||||
|
||||
<details>
|
||||
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||
|
||||
My journey with computers started in childhood when my dad, a computer scientist, introduced me to an Amstrad computer. Those early days sparked a fascination with technology, leading me to pursue computer science and specialize in NLP during my postgraduate studies. It was during this time that I first delved into web crawling, building tools to help researchers organize papers and extract information from publications a challenging yet rewarding experience that honed my skills in data extraction.
|
||||
|
||||
Fast forward to 2023, I was working on a tool for a project and needed a crawler to convert a webpage into markdown. While exploring solutions, I found one that claimed to be open-source but required creating an account and generating an API token. Worse, it turned out to be a SaaS model charging $16, and its quality didn’t meet my standards. Frustrated, I realized this was a deeper problem. That frustration turned into turbo anger mode, and I decided to build my own solution. In just a few days, I created Crawl4AI. To my surprise, it went viral, earning thousands of GitHub stars and resonating with a global community.
|
||||
|
||||
I made Crawl4AI open-source for two reasons. First, it’s my way of giving back to the open-source community that has supported me throughout my career. Second, I believe data should be accessible to everyone, not locked behind paywalls or monopolized by a few. Open access to data lays the foundation for the democratization of AI—a vision where individuals can train their own models and take ownership of their information. This library is the first step in a larger journey to create the best open-source data extraction and generation tool the world has ever seen, built collaboratively by a passionate community.
|
||||
|
||||
Thank you to everyone who has supported this project, used it, and shared feedback. Your encouragement motivates me to dream even bigger. Join us, file issues, submit PRs, or spread the word. Together, we can build a tool that truly empowers people to access their own data and reshape the future of AI.
|
||||
</details>
|
||||
|
||||
## 🧐 Why Crawl4AI?
|
||||
|
||||
1. **Built for LLMs**: Creates smart, concise Markdown optimized for RAG and fine-tuning applications.
|
||||
2. **Lightning Fast**: Delivers results 6x faster with real-time, cost-efficient performance.
|
||||
3. **Flexible Browser Control**: Offers session management, proxies, and custom hooks for seamless data access.
|
||||
4. **Heuristic Intelligence**: Uses advanced algorithms for efficient extraction, reducing reliance on costly models.
|
||||
5. **Open Source & Deployable**: Fully open-source with no API keys—ready for Docker and cloud integration.
|
||||
6. **Thriving Community**: Actively maintained by a vibrant community and the #1 trending GitHub repository.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
1. Install Crawl4AI:
|
||||
```bash
|
||||
# Install the package
|
||||
pip install -U crawl4ai
|
||||
|
||||
# Run post-installation setup
|
||||
crawl4ai-setup
|
||||
|
||||
# Verify your installation
|
||||
crawl4ai-doctor
|
||||
```
|
||||
|
||||
If you encounter any browser-related issues, you can install them manually:
|
||||
```bash
|
||||
python -m playwright install --with-deps chromium
|
||||
```
|
||||
|
||||
2. Run a simple web crawl:
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
)
|
||||
print(result.markdown)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## ✨ Features
|
||||
|
||||
<details>
|
||||
<summary>📝 <strong>Markdown Generation</strong></summary>
|
||||
|
||||
- 🧹 **Clean Markdown**: Generates clean, structured Markdown with accurate formatting.
|
||||
- 🎯 **Fit Markdown**: Heuristic-based filtering to remove noise and irrelevant parts for AI-friendly processing.
|
||||
- 🔗 **Citations and References**: Converts page links into a numbered reference list with clean citations.
|
||||
- 🛠️ **Custom Strategies**: Users can create their own Markdown generation strategies tailored to specific needs.
|
||||
- 📚 **BM25 Algorithm**: Employs BM25-based filtering for extracting core information and removing irrelevant content.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>📊 <strong>Structured Data Extraction</strong></summary>
|
||||
|
||||
- 🤖 **LLM-Driven Extraction**: Supports all LLMs (open-source and proprietary) for structured data extraction.
|
||||
- 🧱 **Chunking Strategies**: Implements chunking (topic-based, regex, sentence-level) for targeted content processing.
|
||||
- 🌌 **Cosine Similarity**: Find relevant content chunks based on user queries for semantic extraction.
|
||||
- 🔎 **CSS-Based Extraction**: Fast schema-based data extraction using XPath and CSS selectors.
|
||||
- 🔧 **Schema Definition**: Define custom schemas for extracting structured JSON from repetitive patterns.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🌐 <strong>Browser Integration</strong></summary>
|
||||
|
||||
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
|
||||
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
|
||||
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
|
||||
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
|
||||
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
|
||||
- 🌍 **Multi-Browser Support**: Compatible with Chromium, Firefox, and WebKit.
|
||||
- 📐 **Dynamic Viewport Adjustment**: Automatically adjusts the browser viewport to match page content, ensuring complete rendering and capturing of all elements.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🔎 <strong>Crawling & Scraping</strong></summary>
|
||||
|
||||
- 🖼️ **Media Support**: Extract images, audio, videos, and responsive image formats like `srcset` and `picture`.
|
||||
- 🚀 **Dynamic Crawling**: Execute JS and wait for async or sync for dynamic content extraction.
|
||||
- 📸 **Screenshots**: Capture page screenshots during crawling for debugging or analysis.
|
||||
- 📂 **Raw Data Crawling**: Directly process raw HTML (`raw:`) or local files (`file://`).
|
||||
- 🔗 **Comprehensive Link Extraction**: Extracts internal, external links, and embedded iframe content.
|
||||
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior.
|
||||
- 💾 **Caching**: Cache data for improved speed and to avoid redundant fetches.
|
||||
- 📄 **Metadata Extraction**: Retrieve structured metadata from web pages.
|
||||
- 📡 **IFrame Content Extraction**: Seamless extraction from embedded iframe content.
|
||||
- 🕵️ **Lazy Load Handling**: Waits for images to fully load, ensuring no content is missed due to lazy loading.
|
||||
- 🔄 **Full-Page Scanning**: Simulates scrolling to load and capture all dynamic content, perfect for infinite scroll pages.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🚀 <strong>Deployment</strong></summary>
|
||||
|
||||
- 🐳 **Dockerized Setup**: Optimized Docker image with API server for easy deployment.
|
||||
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
|
||||
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
|
||||
- ⚙️ **DigitalOcean Deployment**: Ready-to-deploy configurations for DigitalOcean and similar platforms.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🎯 <strong>Additional Features</strong></summary>
|
||||
|
||||
- 🕶️ **Stealth Mode**: Avoid bot detection by mimicking real users.
|
||||
- 🏷️ **Tag-Based Content Extraction**: Refine crawling based on custom tags, headers, or metadata.
|
||||
- 🔗 **Link Analysis**: Extract and analyze all links for detailed data exploration.
|
||||
- 🛡️ **Error Handling**: Robust error management for seamless execution.
|
||||
- 🔐 **CORS & Static Serving**: Supports filesystem-based caching and cross-origin requests.
|
||||
- 📖 **Clear Documentation**: Simplified and updated guides for onboarding and advanced usage.
|
||||
- 🙌 **Community Recognition**: Acknowledges contributors and pull requests for transparency.
|
||||
|
||||
</details>
|
||||
|
||||
## Try it Now!
|
||||
|
||||
✨ Play around with this [](https://colab.research.google.com/drive/1REChY6fXQf-EaVYLv0eHEWvzlYxGm0pd?usp=sharing)
|
||||
✨ Play around with this [](https://colab.research.google.com/drive/1SgRPrByQLzjRfwoRNq1wSGE9nYY_EE8C?usp=sharing)
|
||||
|
||||
✨ Visit our [Documentation Website](https://crawl4ai.com/mkdocs/)
|
||||
|
||||
## Features ✨
|
||||
|
||||
- 🆓 Completely free and open-source
|
||||
- 🚀 Blazing fast performance, outperforming many paid services
|
||||
- 🤖 LLM-friendly output formats (JSON, cleaned HTML, markdown)
|
||||
- 🌐 Multi-browser support (Chromium, Firefox, WebKit)
|
||||
- 🌍 Supports crawling multiple URLs simultaneously
|
||||
- 🎨 Extracts and returns all media tags (Images, Audio, and Video)
|
||||
- 🔗 Extracts all external and internal links
|
||||
- 📚 Extracts metadata from the page
|
||||
- 🔄 Custom hooks for authentication, headers, and page modifications
|
||||
- 🕵️ User-agent customization
|
||||
- 🖼️ Takes screenshots of pages with enhanced error handling
|
||||
- 📜 Executes multiple custom JavaScripts before crawling
|
||||
- 📊 Generates structured output without LLM using JsonCssExtractionStrategy
|
||||
- 📚 Various chunking strategies: topic-based, regex, sentence, and more
|
||||
- 🧠 Advanced extraction strategies: cosine clustering, LLM, and more
|
||||
- 🎯 CSS selector support for precise data extraction
|
||||
- 📝 Passes instructions/keywords to refine extraction
|
||||
- 🔒 Proxy support with authentication for enhanced access
|
||||
- 🔄 Session management for complex multi-page crawling
|
||||
- 🌐 Asynchronous architecture for improved performance
|
||||
- 🖼️ Improved image processing with lazy-loading detection
|
||||
- 🕰️ Enhanced handling of delayed content loading
|
||||
- 🔑 Custom headers support for LLM interactions
|
||||
- 🖼️ iframe content extraction for comprehensive analysis
|
||||
- ⏱️ Flexible timeout and delayed content retrieval options
|
||||
✨ Visit our [Documentation Website](https://docs.crawl4ai.com/)
|
||||
|
||||
## Installation 🛠️
|
||||
|
||||
Crawl4AI offers flexible installation options to suit various use cases. You can install it as a Python package or use Docker.
|
||||
|
||||
### Using pip 🐍
|
||||
<details>
|
||||
<summary>🐍 <strong>Using pip</strong></summary>
|
||||
|
||||
Choose the installation option that best fits your needs:
|
||||
|
||||
#### Basic Installation
|
||||
### Basic Installation
|
||||
|
||||
For basic web crawling and scraping tasks:
|
||||
|
||||
```bash
|
||||
pip install crawl4ai
|
||||
crawl4ai-setup # Setup the browser
|
||||
```
|
||||
|
||||
By default, this will install the asynchronous version of Crawl4AI, using Playwright for web crawling.
|
||||
|
||||
👉 Note: When you install Crawl4AI, the setup script should automatically install and set up Playwright. However, if you encounter any Playwright-related errors, you can manually install it using one of these methods:
|
||||
👉 **Note**: When you install Crawl4AI, the `crawl4ai-setup` should automatically install and set up Playwright. However, if you encounter any Playwright-related errors, you can manually install it using one of these methods:
|
||||
|
||||
1. Through the command line:
|
||||
|
||||
```bash
|
||||
playwright install
|
||||
```
|
||||
|
||||
2. If the above doesn't work, try this more specific command:
|
||||
|
||||
```bash
|
||||
python -m playwright install chromium
|
||||
```
|
||||
|
||||
This second method has proven to be more reliable in some cases.
|
||||
|
||||
#### Installation with Synchronous Version
|
||||
---
|
||||
|
||||
If you need the synchronous version using Selenium:
|
||||
### Installation with Synchronous Version
|
||||
|
||||
The sync version is deprecated and will be removed in future versions. If you need the synchronous version using Selenium:
|
||||
|
||||
```bash
|
||||
pip install crawl4ai[sync]
|
||||
```
|
||||
|
||||
#### Development Installation
|
||||
---
|
||||
|
||||
### Development Installation
|
||||
|
||||
For contributors who plan to modify the source code:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/unclecode/crawl4ai.git
|
||||
cd crawl4ai
|
||||
pip install -e .
|
||||
pip install -e . # Basic installation in editable mode
|
||||
```
|
||||
|
||||
### Using Docker 🐳
|
||||
Install optional features:
|
||||
|
||||
We're in the process of creating Docker images and pushing them to Docker Hub. This will provide an easy way to run Crawl4AI in a containerized environment. Stay tuned for updates!
|
||||
```bash
|
||||
pip install -e ".[torch]" # With PyTorch features
|
||||
pip install -e ".[transformer]" # With Transformer features
|
||||
pip install -e ".[cosine]" # With cosine similarity features
|
||||
pip install -e ".[sync]" # With synchronous crawling (Selenium)
|
||||
pip install -e ".[all]" # Install all optional features
|
||||
```
|
||||
|
||||
For more detailed installation instructions and options, please refer to our [Installation Guide](https://crawl4ai.com/mkdocs/installation).
|
||||
</details>
|
||||
|
||||
## Quick Start 🚀
|
||||
<details>
|
||||
<summary>🐳 <strong>Docker Deployment</strong></summary>
|
||||
|
||||
> 🚀 **Major Changes Coming!** We're developing a completely new Docker implementation that will make deployment even more efficient and seamless. The current Docker setup is being deprecated in favor of this new solution.
|
||||
|
||||
### Current Docker Support
|
||||
|
||||
The existing Docker implementation is being deprecated and will be replaced soon. If you still need to use Docker with the current version:
|
||||
|
||||
- 📚 [Deprecated Docker Setup](./docs/deprecated/docker-deployment.md) - Instructions for the current Docker implementation
|
||||
- ⚠️ Note: This setup will be replaced in the next major release
|
||||
|
||||
### What's Coming Next?
|
||||
|
||||
Our new Docker implementation will bring:
|
||||
- Improved performance and resource efficiency
|
||||
- Streamlined deployment process
|
||||
- Better integration with Crawl4AI features
|
||||
- Enhanced scalability options
|
||||
|
||||
Stay connected with our [GitHub repository](https://github.com/unclecode/crawl4ai) for updates!
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
### Quick Test
|
||||
|
||||
Run a quick test (works for both Docker options):
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Submit a crawl job
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={"urls": "https://example.com", "priority": 10}
|
||||
)
|
||||
task_id = response.json()["task_id"]
|
||||
|
||||
# Continue polling until the task is complete (status="completed")
|
||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||
```
|
||||
|
||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## 🔬 Advanced Usage Examples 🔬
|
||||
|
||||
You can check the project structure in the directory [https://github.com/unclecode/crawl4ai/docs/examples](docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
|
||||
|
||||
<details>
|
||||
<summary>📝 <strong>Heuristic Markdown Generation with Clean and Fit Markdown</strong></summary>
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter, BM25ContentFilter
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(url="https://www.nbcnews.com/business")
|
||||
print(result.markdown)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## Advanced Usage 🔬
|
||||
|
||||
### Executing JavaScript and Using CSS Selectors
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
js_code = ["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"]
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
verbose=True,
|
||||
)
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.ENABLED,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
|
||||
),
|
||||
# markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=BM25ContentFilter(user_query="WHEN_WE_FOCUS_BASED_ON_A_USER_QUERY", bm25_threshold=1.0)
|
||||
# ),
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
js_code=js_code,
|
||||
css_selector=".wide-tease-item__description",
|
||||
bypass_cache=True
|
||||
url="https://docs.micronaut.io/4.7.6/guide/",
|
||||
config=run_config
|
||||
)
|
||||
print(result.extracted_content)
|
||||
print(len(result.markdown))
|
||||
print(len(result.fit_markdown))
|
||||
print(len(result.markdown_v2.fit_markdown))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### Using a Proxy
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🖥️ <strong>Executing JavaScript & Extract Structured Data without LLMs</strong></summary>
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(verbose=True, proxy="http://127.0.0.1:7890") as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
bypass_cache=True
|
||||
)
|
||||
print(result.markdown)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### Extracting Structured Data without LLM
|
||||
|
||||
The `JsonCssExtractionStrategy` allows for precise extraction of structured data from web pages using CSS selectors.
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
import json
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
import json
|
||||
|
||||
async def extract_news_teasers():
|
||||
async def main():
|
||||
schema = {
|
||||
"name": "News Teaser Extractor",
|
||||
"baseSelector": ".wide-tease-item__wrapper",
|
||||
"fields": [
|
||||
{
|
||||
"name": "category",
|
||||
"selector": ".unibrow span[data-testid='unibrow-text']",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "headline",
|
||||
"selector": ".wide-tease-item__headline",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "summary",
|
||||
"selector": ".wide-tease-item__description",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "time",
|
||||
"selector": "[data-testid='wide-tease-date']",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "image",
|
||||
"type": "nested",
|
||||
"selector": "picture.teasePicture img",
|
||||
"fields": [
|
||||
{"name": "src", "type": "attribute", "attribute": "src"},
|
||||
{"name": "alt", "type": "attribute", "attribute": "alt"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "link",
|
||||
"selector": "a[href]",
|
||||
"type": "attribute",
|
||||
"attribute": "href",
|
||||
},
|
||||
],
|
||||
"name": "KidoCode Courses",
|
||||
"baseSelector": "section.charge-methodology .w-tab-content > div",
|
||||
"fields": [
|
||||
{
|
||||
"name": "section_title",
|
||||
"selector": "h3.heading-50",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "section_description",
|
||||
"selector": ".charge-content",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_name",
|
||||
"selector": ".text-block-93",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_description",
|
||||
"selector": ".course-content-text",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_icon",
|
||||
"selector": ".image-92",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
browser_config = BrowserConfig(
|
||||
headless=False,
|
||||
verbose=True
|
||||
)
|
||||
run_config = CrawlerRunConfig(
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=["""(async () => {const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");for(let tab of tabs) {tab.scrollIntoView();tab.click();await new Promise(r => setTimeout(r, 500));}})();"""],
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True,
|
||||
url="https://www.kidocode.com/degrees/technology",
|
||||
config=run_config
|
||||
)
|
||||
|
||||
assert result.success, "Failed to crawl the page"
|
||||
companies = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(companies)} companies")
|
||||
print(json.dumps(companies[0], indent=2))
|
||||
|
||||
news_teasers = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(news_teasers)} news teasers")
|
||||
print(json.dumps(news_teasers[0], indent=2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(extract_news_teasers())
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
For more advanced usage examples, check out our [Examples](https://crawl4ai.com/mkdocs/full_details/advanced_jsoncss_extraction.md) section in the documentation.
|
||||
</details>
|
||||
|
||||
### Extracting Structured Data with OpenAI
|
||||
<details>
|
||||
<summary>📚 <strong>Extracting Structured Data with LLMs</strong></summary>
|
||||
|
||||
```python
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -252,19 +413,26 @@ class OpenAIModelFee(BaseModel):
|
||||
output_fee: str = Field(..., description="Fee for output token for the OpenAI model.")
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
browser_config = BrowserConfig(verbose=True)
|
||||
run_config = CrawlerRunConfig(
|
||||
word_count_threshold=1,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
|
||||
# provider="ollama/qwen2", api_token="no-token",
|
||||
provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'),
|
||||
schema=OpenAIModelFee.schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content. One extracted model JSON format should look like this:
|
||||
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
|
||||
),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url='https://openai.com/api/pricing/',
|
||||
word_count_threshold=1,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'),
|
||||
schema=OpenAIModelFee.schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content. One extracted model JSON format should look like this:
|
||||
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
|
||||
),
|
||||
bypass_cache=True,
|
||||
config=run_config
|
||||
)
|
||||
print(result.extracted_content)
|
||||
|
||||
@@ -272,117 +440,144 @@ if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### Session Management and Dynamic Content Crawling
|
||||
</details>
|
||||
|
||||
Crawl4AI excels at handling complex scenarios, such as crawling multiple pages with dynamic content loaded via JavaScript. Here's an example of crawling GitHub commits across multiple pages:
|
||||
<details>
|
||||
<summary>🤖 <strong>Using You own Browswer with Custome User Profile</strong></summary>
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
import os, sys
|
||||
from pathlib import Path
|
||||
import asyncio, time
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
|
||||
async def crawl_typescript_commits():
|
||||
first_commit = ""
|
||||
async def on_execution_started(page):
|
||||
nonlocal first_commit
|
||||
try:
|
||||
while True:
|
||||
await page.wait_for_selector('li.Box-sc-g0xbh4-0 h4')
|
||||
commit = await page.query_selector('li.Box-sc-g0xbh4-0 h4')
|
||||
commit = await commit.evaluate('(element) => element.textContent')
|
||||
commit = re.sub(r'\s+', '', commit)
|
||||
if commit and commit != first_commit:
|
||||
first_commit = commit
|
||||
break
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
print(f"Warning: New content didn't appear after JavaScript execution: {e}")
|
||||
async def test_news_crawl():
|
||||
# Create a persistent user data directory
|
||||
user_data_dir = os.path.join(Path.home(), ".crawl4ai", "browser_profile")
|
||||
os.makedirs(user_data_dir, exist_ok=True)
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
crawler.crawler_strategy.set_hook('on_execution_started', on_execution_started)
|
||||
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
js_next_page = """
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
"""
|
||||
|
||||
for page in range(3): # Crawl 3 pages
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
session_id=session_id,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
js=js_next_page if page > 0 else None,
|
||||
bypass_cache=True,
|
||||
js_only=page > 0
|
||||
)
|
||||
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
soup = BeautifulSoup(result.cleaned_html, 'html.parser')
|
||||
commits = soup.select("li")
|
||||
all_commits.extend(commits)
|
||||
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(crawl_typescript_commits())
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=True,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
url = "ADDRESS_OF_A_CHALLENGING_WEBSITE"
|
||||
|
||||
result = await crawler.arun(
|
||||
url,
|
||||
config=run_config,
|
||||
magic=True,
|
||||
)
|
||||
|
||||
print(f"Successfully crawled {url}")
|
||||
print(f"Content length: {len(result.markdown)}")
|
||||
```
|
||||
|
||||
This example demonstrates Crawl4AI's ability to handle complex scenarios where content is loaded asynchronously. It crawls multiple pages of GitHub commits, executing JavaScript to load new content and using custom hooks to ensure data is loaded before proceeding.
|
||||
</details>
|
||||
|
||||
For more advanced usage examples, check out our [Examples](https://crawl4ai.com/mkdocs/full_details/session_based_crawling.md) section in the documentation.
|
||||
## ✨ Recent Updates
|
||||
|
||||
- **🚀 New Dispatcher System**: Scale to thousands of URLs with intelligent **memory monitoring**, **concurrency control**, and optional **rate limiting**. (See `MemoryAdaptiveDispatcher`, `SemaphoreDispatcher`, `RateLimiter`, `CrawlerMonitor`)
|
||||
- **⚡ Streaming Mode**: Process results **as they arrive** instead of waiting for an entire batch to complete. (Set `stream=True` in `CrawlerRunConfig`)
|
||||
- **🤖 Enhanced LLM Integration**:
|
||||
- **Automatic schema generation**: Create extraction rules from HTML using OpenAI or Ollama, no manual CSS/XPath needed.
|
||||
- **LLM-powered Markdown filtering**: Refine your markdown output with a new `LLMContentFilter` that understands content relevance.
|
||||
- **Ollama Support**: Use open-source or self-hosted models for private or cost-effective extraction.
|
||||
- **🏎️ Faster Scraping Option**: New `LXMLWebScrapingStrategy` offers **10-20x speedup** for large, complex pages (experimental).
|
||||
- **🤖 robots.txt Compliance**: Respect website rules with `check_robots_txt=True` and efficient local caching.
|
||||
- **🔄 Proxy Rotation**: Built-in support for dynamic proxy switching and IP verification, with support for authenticated proxies and session persistence.
|
||||
- **➡️ URL Redirection Tracking**: The `redirected_url` field now captures the final destination after any redirects.
|
||||
- **🪞 Improved Mirroring**: The `LXMLWebScrapingStrategy` now has much greater fidelity, allowing for almost pixel-perfect mirroring of websites.
|
||||
- **📈 Enhanced Monitoring**: Track memory, CPU, and individual crawler status with `CrawlerMonitor`.
|
||||
- **📝 Improved Documentation**: More examples, clearer explanations, and updated tutorials.
|
||||
|
||||
Read the full details in our [0.4.248 Release Notes](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
|
||||
Here's a clear markdown explanation for your users about version numbering:
|
||||
|
||||
|
||||
## Speed Comparison 🚀
|
||||
## Version Numbering in Crawl4AI
|
||||
|
||||
Crawl4AI is designed with speed as a primary focus. Our goal is to provide the fastest possible response with high-quality data extraction, minimizing abstractions between the data and the user.
|
||||
Crawl4AI follows standard Python version numbering conventions (PEP 440) to help users understand the stability and features of each release.
|
||||
|
||||
We've conducted a speed comparison between Crawl4AI and Firecrawl, a paid service. The results demonstrate Crawl4AI's superior performance:
|
||||
### Version Numbers Explained
|
||||
|
||||
```
|
||||
Firecrawl:
|
||||
Time taken: 7.02 seconds
|
||||
Content length: 42074 characters
|
||||
Images found: 49
|
||||
Our version numbers follow this pattern: `MAJOR.MINOR.PATCH` (e.g., 0.4.3)
|
||||
|
||||
Crawl4AI (simple crawl):
|
||||
Time taken: 1.60 seconds
|
||||
Content length: 18238 characters
|
||||
Images found: 49
|
||||
#### Pre-release Versions
|
||||
We use different suffixes to indicate development stages:
|
||||
|
||||
Crawl4AI (with JavaScript execution):
|
||||
Time taken: 4.64 seconds
|
||||
Content length: 40869 characters
|
||||
Images found: 89
|
||||
```
|
||||
- `dev` (0.4.3dev1): Development versions, unstable
|
||||
- `a` (0.4.3a1): Alpha releases, experimental features
|
||||
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
|
||||
- `rc` (0.4.3rc1): Release candidates, potential final version
|
||||
|
||||
As you can see, Crawl4AI outperforms Firecrawl significantly:
|
||||
- Simple crawl: Crawl4AI is over 4 times faster than Firecrawl.
|
||||
- With JavaScript execution: Even when executing JavaScript to load more content (doubling the number of images found), Crawl4AI is still faster than Firecrawl's simple crawl.
|
||||
#### Installation
|
||||
- Regular installation (stable version):
|
||||
```bash
|
||||
pip install -U crawl4ai
|
||||
```
|
||||
|
||||
You can find the full comparison code in our repository at `docs/examples/crawl4ai_vs_firecrawl.py`.
|
||||
- Install pre-release versions:
|
||||
```bash
|
||||
pip install crawl4ai --pre
|
||||
```
|
||||
|
||||
## Documentation 📚
|
||||
- Install specific version:
|
||||
```bash
|
||||
pip install crawl4ai==0.4.3b1
|
||||
```
|
||||
|
||||
For detailed documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://crawl4ai.com/mkdocs/).
|
||||
#### Why Pre-releases?
|
||||
We use pre-releases to:
|
||||
- Test new features in real-world scenarios
|
||||
- Gather feedback before final releases
|
||||
- Ensure stability for production users
|
||||
- Allow early adopters to try new features
|
||||
|
||||
## Contributing 🤝
|
||||
For production environments, we recommend using the stable version. For testing new features, you can opt-in to pre-releases using the `--pre` flag.
|
||||
|
||||
## 📖 Documentation & Roadmap
|
||||
|
||||
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
|
||||
|
||||
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://docs.crawl4ai.com/).
|
||||
|
||||
To check our development plans and upcoming features, visit our [Roadmap](https://github.com/unclecode/crawl4ai/blob/main/ROADMAP.md).
|
||||
|
||||
<details>
|
||||
<summary>📈 <strong>Development TODOs</strong></summary>
|
||||
|
||||
- [x] 0. Graph Crawler: Smart website traversal using graph search algorithms for comprehensive nested page extraction
|
||||
- [ ] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
|
||||
- [ ] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
|
||||
- [ ] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
|
||||
- [ ] 4. Automated Schema Generator: Convert natural language to extraction schemas
|
||||
- [ ] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
|
||||
- [ ] 6. Web Embedding Index: Semantic search infrastructure for crawled content
|
||||
- [ ] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
|
||||
- [ ] 8. Performance Monitor: Real-time insights into crawler operations
|
||||
- [ ] 9. Cloud Integration: One-click deployment solutions across cloud providers
|
||||
- [ ] 10. Sponsorship Program: Structured support system with tiered benefits
|
||||
- [ ] 11. Educational Content: "How to Crawl" video series and interactive tutorials
|
||||
|
||||
</details>
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTING.md) for more information.
|
||||
|
||||
## License 📄
|
||||
## 📄 License
|
||||
|
||||
Crawl4AI is released under the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE).
|
||||
|
||||
## Contact 📧
|
||||
## 📧 Contact
|
||||
|
||||
For questions, suggestions, or feedback, feel free to reach out:
|
||||
|
||||
@@ -392,6 +587,31 @@ For questions, suggestions, or feedback, feel free to reach out:
|
||||
|
||||
Happy Crawling! 🕸️🚀
|
||||
|
||||
## 🗾 Mission
|
||||
|
||||
Our mission is to unlock the value of personal and enterprise data by transforming digital footprints into structured, tradeable assets. Crawl4AI empowers individuals and organizations with open-source tools to extract and structure data, fostering a shared data economy.
|
||||
|
||||
We envision a future where AI is powered by real human knowledge, ensuring data creators directly benefit from their contributions. By democratizing data and enabling ethical sharing, we are laying the foundation for authentic AI advancement.
|
||||
|
||||
<details>
|
||||
<summary>🔑 <strong>Key Opportunities</strong></summary>
|
||||
|
||||
- **Data Capitalization**: Transform digital footprints into measurable, valuable assets.
|
||||
- **Authentic AI Data**: Provide AI systems with real human insights.
|
||||
- **Shared Economy**: Create a fair data marketplace that benefits data creators.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🚀 <strong>Development Pathway</strong></summary>
|
||||
|
||||
1. **Open-Source Tools**: Community-driven platforms for transparent data extraction.
|
||||
2. **Digital Asset Structuring**: Tools to organize and value digital knowledge.
|
||||
3. **Ethical Data Marketplace**: A secure, fair platform for exchanging structured data.
|
||||
|
||||
For more details, see our [full mission statement](./MISSION.md).
|
||||
</details>
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#unclecode/crawl4ai&Date)
|
||||
[](https://star-history.com/#unclecode/crawl4ai&Date)
|
||||
|
||||
244
README.sync.md
244
README.sync.md
@@ -1,244 +0,0 @@
|
||||
# Crawl4AI v0.2.77 🕷️🤖
|
||||
|
||||
[](https://github.com/unclecode/crawl4ai/stargazers)
|
||||
[](https://github.com/unclecode/crawl4ai/network/members)
|
||||
[](https://github.com/unclecode/crawl4ai/issues)
|
||||
[](https://github.com/unclecode/crawl4ai/pulls)
|
||||
[](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
|
||||
|
||||
Crawl4AI simplifies web crawling and data extraction, making it accessible for large language models (LLMs) and AI applications. 🆓🌐
|
||||
|
||||
#### [v0.2.77] - 2024-08-02
|
||||
|
||||
Major improvements in functionality, performance, and cross-platform compatibility! 🚀
|
||||
|
||||
- 🐳 **Docker enhancements**:
|
||||
- Significantly improved Dockerfile for easy installation on Linux, Mac, and Windows.
|
||||
- 🌐 **Official Docker Hub image**:
|
||||
- Launched our first official image on Docker Hub for streamlined deployment (unclecode/crawl4ai).
|
||||
- 🔧 **Selenium upgrade**:
|
||||
- Removed dependency on ChromeDriver, now using Selenium's built-in capabilities for better compatibility.
|
||||
- 🖼️ **Image description**:
|
||||
- Implemented ability to generate textual descriptions for extracted images from web pages.
|
||||
- ⚡ **Performance boost**:
|
||||
- Various improvements to enhance overall speed and performance.
|
||||
|
||||
## Try it Now!
|
||||
|
||||
✨ Play around with this [](https://colab.research.google.com/drive/1sJPAmeLj5PMrg2VgOwMJ2ubGIcK0cJeX?usp=sharing)
|
||||
|
||||
✨ visit our [Documentation Website](https://crawl4ai.com/mkdocs/)
|
||||
|
||||
✨ Check [Demo](https://crawl4ai.com/mkdocs/demo)
|
||||
|
||||
## Features ✨
|
||||
|
||||
- 🆓 Completely free and open-source
|
||||
- 🤖 LLM-friendly output formats (JSON, cleaned HTML, markdown)
|
||||
- 🌍 Supports crawling multiple URLs simultaneously
|
||||
- 🎨 Extracts and returns all media tags (Images, Audio, and Video)
|
||||
- 🔗 Extracts all external and internal links
|
||||
- 📚 Extracts metadata from the page
|
||||
- 🔄 Custom hooks for authentication, headers, and page modifications before crawling
|
||||
- 🕵️ User-agent customization
|
||||
- 🖼️ Takes screenshots of the page
|
||||
- 📜 Executes multiple custom JavaScripts before crawling
|
||||
- 📚 Various chunking strategies: topic-based, regex, sentence, and more
|
||||
- 🧠 Advanced extraction strategies: cosine clustering, LLM, and more
|
||||
- 🎯 CSS selector support
|
||||
- 📝 Passes instructions/keywords to refine extraction
|
||||
|
||||
# Crawl4AI
|
||||
|
||||
## 🌟 Shoutout to Contributors of v0.2.77!
|
||||
|
||||
A big thank you to the amazing contributors who've made this release possible:
|
||||
|
||||
- [@aravindkarnam](https://github.com/aravindkarnam) for the new image description feature
|
||||
- [@FractalMind](https://github.com/FractalMind) for our official Docker Hub image
|
||||
- [@ketonkss4](https://github.com/ketonkss4) for helping streamline our Selenium setup
|
||||
|
||||
Your contributions are driving Crawl4AI forward! 🚀
|
||||
|
||||
## Cool Examples 🚀
|
||||
|
||||
### Quick Start
|
||||
|
||||
```python
|
||||
from crawl4ai import WebCrawler
|
||||
|
||||
# Create an instance of WebCrawler
|
||||
crawler = WebCrawler()
|
||||
|
||||
# Warm up the crawler (load necessary models)
|
||||
crawler.warmup()
|
||||
|
||||
# Run the crawler on a URL
|
||||
result = crawler.run(url="https://www.nbcnews.com/business")
|
||||
|
||||
# Print the extracted content
|
||||
print(result.markdown)
|
||||
```
|
||||
|
||||
## How to install 🛠
|
||||
|
||||
### Using pip 🐍
|
||||
```bash
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install "crawl4ai @ git+https://github.com/unclecode/crawl4ai.git"
|
||||
```
|
||||
|
||||
### Using Docker 🐳
|
||||
|
||||
```bash
|
||||
# For Mac users (M1/M2)
|
||||
# docker build --platform linux/amd64 -t crawl4ai .
|
||||
docker build -t crawl4ai .
|
||||
docker run -d -p 8000:80 crawl4ai
|
||||
```
|
||||
|
||||
### Using Docker Hub 🐳
|
||||
|
||||
```bash
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
docker run -d -p 8000:80 unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
|
||||
## Speed-First Design 🚀
|
||||
|
||||
Perhaps the most important design principle for this library is speed. We need to ensure it can handle many links and resources in parallel as quickly as possible. By combining this speed with fast LLMs like Groq, the results will be truly amazing.
|
||||
|
||||
```python
|
||||
import time
|
||||
from crawl4ai.web_crawler import WebCrawler
|
||||
crawler = WebCrawler()
|
||||
crawler.warmup()
|
||||
|
||||
start = time.time()
|
||||
url = r"https://www.nbcnews.com/business"
|
||||
result = crawler.run( url, word_count_threshold=10, bypass_cache=True)
|
||||
end = time.time()
|
||||
print(f"Time taken: {end - start}")
|
||||
```
|
||||
|
||||
Let's take a look the calculated time for the above code snippet:
|
||||
|
||||
```bash
|
||||
[LOG] 🚀 Crawling done, success: True, time taken: 1.3623387813568115 seconds
|
||||
[LOG] 🚀 Content extracted, success: True, time taken: 0.05715131759643555 seconds
|
||||
[LOG] 🚀 Extraction, time taken: 0.05750393867492676 seconds.
|
||||
Time taken: 1.439958095550537
|
||||
```
|
||||
Fetching the content from the page took 1.3623 seconds, and extracting the content took 0.0575 seconds. 🚀
|
||||
|
||||
### Extract Structured Data from Web Pages 📊
|
||||
|
||||
Crawl all OpenAI models and their fees from the official page.
|
||||
|
||||
```python
|
||||
import os
|
||||
from crawl4ai import WebCrawler
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class OpenAIModelFee(BaseModel):
|
||||
model_name: str = Field(..., description="Name of the OpenAI model.")
|
||||
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
|
||||
output_fee: str = Field(..., description="Fee for output token ßfor the OpenAI model.")
|
||||
|
||||
url = 'https://openai.com/api/pricing/'
|
||||
crawler = WebCrawler()
|
||||
crawler.warmup()
|
||||
|
||||
result = crawler.run(
|
||||
url=url,
|
||||
word_count_threshold=1,
|
||||
extraction_strategy= LLMExtractionStrategy(
|
||||
provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'),
|
||||
schema=OpenAIModelFee.schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content. One extracted model JSON format should look like this:
|
||||
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
|
||||
),
|
||||
bypass_cache=True,
|
||||
)
|
||||
|
||||
print(result.extracted_content)
|
||||
```
|
||||
|
||||
### Execute JS, Filter Data with CSS Selector, and Clustering
|
||||
|
||||
```python
|
||||
from crawl4ai import WebCrawler
|
||||
from crawl4ai.chunking_strategy import CosineStrategy
|
||||
|
||||
js_code = ["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"]
|
||||
|
||||
crawler = WebCrawler()
|
||||
crawler.warmup()
|
||||
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
js=js_code,
|
||||
css_selector="p",
|
||||
extraction_strategy=CosineStrategy(semantic_filter="technology")
|
||||
)
|
||||
|
||||
print(result.extracted_content)
|
||||
```
|
||||
|
||||
### Extract Structured Data from Web Pages With Proxy and BaseUrl
|
||||
|
||||
```python
|
||||
from crawl4ai import WebCrawler
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
|
||||
def create_crawler():
|
||||
crawler = WebCrawler(verbose=True, proxy="http://127.0.0.1:7890")
|
||||
crawler.warmup()
|
||||
return crawler
|
||||
|
||||
crawler = create_crawler()
|
||||
|
||||
crawler.warmup()
|
||||
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o",
|
||||
api_token="sk-",
|
||||
base_url="https://api.openai.com/v1"
|
||||
)
|
||||
)
|
||||
|
||||
print(result.markdown)
|
||||
```
|
||||
|
||||
## Documentation 📚
|
||||
|
||||
For detailed documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://crawl4ai.com/mkdocs/).
|
||||
|
||||
## Contributing 🤝
|
||||
|
||||
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTING.md) for more information.
|
||||
|
||||
## License 📄
|
||||
|
||||
Crawl4AI is released under the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE).
|
||||
|
||||
## Contact 📧
|
||||
|
||||
For questions, suggestions, or feedback, feel free to reach out:
|
||||
|
||||
- GitHub: [unclecode](https://github.com/unclecode)
|
||||
- Twitter: [@unclecode](https://twitter.com/unclecode)
|
||||
- Website: [crawl4ai.com](https://crawl4ai.com)
|
||||
|
||||
Happy Crawling! 🕸️🚀
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#unclecode/crawl4ai&Date)
|
||||
503
ROADMAP.md
Normal file
503
ROADMAP.md
Normal file
@@ -0,0 +1,503 @@
|
||||
# Crawl4AI Strategic Roadmap
|
||||
|
||||
```mermaid
|
||||
%%{init: {'themeVariables': { 'fontSize': '14px'}}}%%
|
||||
graph TD
|
||||
subgraph A1[Advanced Crawling Systems 🔧]
|
||||
A["`
|
||||
• Graph Crawler ✓
|
||||
• Question-Based Crawler
|
||||
• Knowledge-Optimal Crawler
|
||||
• Agentic Crawler
|
||||
`"]
|
||||
end
|
||||
|
||||
subgraph A2[Specialized Features 🛠️]
|
||||
B["`
|
||||
• Automated Schema Generator
|
||||
• Domain-Specific Scrapers
|
||||
•
|
||||
•
|
||||
`"]
|
||||
end
|
||||
|
||||
subgraph A3[Development Tools 🔨]
|
||||
C["`
|
||||
• Interactive Playground
|
||||
• Performance Monitor
|
||||
• Cloud Integration
|
||||
•
|
||||
`"]
|
||||
end
|
||||
|
||||
subgraph A4[Community & Growth 🌱]
|
||||
D["`
|
||||
• Sponsorship Program
|
||||
• Educational Content
|
||||
•
|
||||
•
|
||||
`"]
|
||||
end
|
||||
|
||||
classDef default fill:#f9f9f9,stroke:#333,stroke-width:2px
|
||||
classDef section fill:#f0f0f0,stroke:#333,stroke-width:4px,rx:10
|
||||
class A1,A2,A3,A4 section
|
||||
|
||||
%% Layout hints
|
||||
A1 --> A2[" "]
|
||||
A3 --> A4[" "]
|
||||
linkStyle 0,1 stroke:none
|
||||
```
|
||||
|
||||
Crawl4AI is evolving to provide more intelligent, efficient, and versatile web crawling capabilities. This roadmap outlines the key developments and features planned for the project, organized into strategic sections that build upon our current foundation.
|
||||
|
||||
## 1. Advanced Crawling Systems 🔧
|
||||
|
||||
This section introduces three powerful crawling systems that extend Crawl4AI's capabilities from basic web crawling to intelligent, purpose-driven data extraction.
|
||||
|
||||
### 1.1 Question-Based Crawler
|
||||
The Question-Based Crawler enhances our core engine by enabling automatic discovery and extraction of relevant web content based on natural language questions.
|
||||
|
||||
Key Features:
|
||||
- SerpiAPI integration for intelligent web search
|
||||
- Relevancy scoring for search results
|
||||
- Automatic URL discovery and prioritization
|
||||
- Cross-source validation
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.discovery import QuestionBasedDiscovery
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
discovery = QuestionBasedDiscovery(crawler)
|
||||
results = await discovery.arun(
|
||||
question="What are the system requirements for major cloud providers' GPU instances?",
|
||||
max_urls=5,
|
||||
relevance_threshold=0.7
|
||||
)
|
||||
|
||||
for result in results:
|
||||
print(f"Source: {result.url} (Relevance: {result.relevance_score})")
|
||||
print(f"Content: {result.markdown}\n")
|
||||
```
|
||||
|
||||
### 1.2 Knowledge-Optimal Crawler
|
||||
An intelligent crawling system that solves the optimization problem of minimizing data extraction while maximizing knowledge acquisition for specific objectives.
|
||||
|
||||
Key Features:
|
||||
- Smart content prioritization
|
||||
- Minimal data extraction for maximum knowledge
|
||||
- Probabilistic relevance assessment
|
||||
- Objective-driven crawling paths
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.optimization import KnowledgeOptimizer
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
optimizer = KnowledgeOptimizer(
|
||||
objective="Understand GPU instance pricing and limitations across cloud providers",
|
||||
required_knowledge=[
|
||||
"pricing structure",
|
||||
"GPU specifications",
|
||||
"usage limits",
|
||||
"availability zones"
|
||||
],
|
||||
confidence_threshold=0.85
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
urls=[
|
||||
"https://aws.amazon.com/ec2/pricing/",
|
||||
"https://cloud.google.com/gpu",
|
||||
"https://azure.microsoft.com/pricing/"
|
||||
],
|
||||
optimizer=optimizer,
|
||||
optimization_mode="minimal_extraction"
|
||||
)
|
||||
|
||||
print(f"Knowledge Coverage: {result.knowledge_coverage}")
|
||||
print(f"Data Efficiency: {result.efficiency_ratio}")
|
||||
print(f"Extracted Content: {result.optimal_content}")
|
||||
```
|
||||
|
||||
### 1.3 Agentic Crawler
|
||||
An autonomous system capable of understanding complex goals and automatically planning and executing multi-step crawling operations.
|
||||
|
||||
Key Features:
|
||||
- Autonomous goal interpretation
|
||||
- Dynamic step planning
|
||||
- Interactive navigation capabilities
|
||||
- Visual recognition and interaction
|
||||
- Automatic error recovery
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.agents import CrawlerAgent
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
agent = CrawlerAgent(crawler)
|
||||
|
||||
# Automatic planning and execution
|
||||
result = await agent.arun(
|
||||
goal="Find research papers about quantum computing published in 2023 with more than 50 citations",
|
||||
auto_retry=True
|
||||
)
|
||||
print("Generated Plan:", result.executed_steps)
|
||||
print("Extracted Data:", result.data)
|
||||
|
||||
# Using custom steps with automatic execution
|
||||
result = await agent.arun(
|
||||
goal="Extract conference deadlines from ML conferences",
|
||||
custom_plan=[
|
||||
"Navigate to conference page",
|
||||
"Find important dates section",
|
||||
"Extract submission deadlines",
|
||||
"Verify dates are for 2024"
|
||||
]
|
||||
)
|
||||
|
||||
# Monitoring execution
|
||||
print("Step Completion:", result.step_status)
|
||||
print("Execution Time:", result.execution_time)
|
||||
print("Success Rate:", result.success_rate)
|
||||
```
|
||||
|
||||
# Section 2: Specialized Features 🛠️
|
||||
|
||||
This section introduces specialized tools and features that enhance Crawl4AI's capabilities for specific use cases and data extraction needs.
|
||||
|
||||
### 2.1 Automated Schema Generator
|
||||
A system that automatically generates JsonCssExtractionStrategy schemas from natural language descriptions, making structured data extraction accessible to all users.
|
||||
|
||||
Key Features:
|
||||
- Natural language schema generation
|
||||
- Automatic pattern detection
|
||||
- Predefined schema templates
|
||||
- Chrome extension for visual schema building
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.schema import SchemaGenerator
|
||||
|
||||
# Generate schema from natural language description
|
||||
generator = SchemaGenerator()
|
||||
schema = await generator.generate(
|
||||
url="https://news-website.com",
|
||||
description="For each news article on the page, I need the headline, publication date, and main image"
|
||||
)
|
||||
|
||||
# Use generated schema with crawler
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://news-website.com",
|
||||
extraction_strategy=schema
|
||||
)
|
||||
|
||||
# Example of generated schema:
|
||||
"""
|
||||
{
|
||||
"name": "News Article Extractor",
|
||||
"baseSelector": "article.news-item",
|
||||
"fields": [
|
||||
{
|
||||
"name": "headline",
|
||||
"selector": "h2.article-title",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "date",
|
||||
"selector": "span.publish-date",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "image",
|
||||
"selector": "img.article-image",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
```
|
||||
|
||||
### 2.2 Domain Specific Scrapers
|
||||
Specialized extraction strategies optimized for common website types and platforms, providing consistent and reliable data extraction without additional configuration.
|
||||
|
||||
Key Features:
|
||||
- Pre-configured extractors for popular platforms
|
||||
- Academic site specialization (arXiv, NCBI)
|
||||
- E-commerce standardization
|
||||
- Documentation site handling
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.extractors import AcademicExtractor, EcommerceExtractor
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Academic paper extraction
|
||||
papers = await crawler.arun(
|
||||
url="https://arxiv.org/list/cs.AI/recent",
|
||||
extractor="academic", # Built-in extractor type
|
||||
site_type="arxiv", # Specific site optimization
|
||||
extract_fields=[
|
||||
"title",
|
||||
"authors",
|
||||
"abstract",
|
||||
"citations"
|
||||
]
|
||||
)
|
||||
|
||||
# E-commerce product data
|
||||
products = await crawler.arun(
|
||||
url="https://store.example.com/products",
|
||||
extractor="ecommerce",
|
||||
extract_fields=[
|
||||
"name",
|
||||
"price",
|
||||
"availability",
|
||||
"reviews"
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
### 2.3 Web Embedding Index
|
||||
Creates and maintains a semantic search infrastructure for crawled content, enabling efficient retrieval and querying of web content through vector embeddings.
|
||||
|
||||
Key Features:
|
||||
- Automatic embedding generation
|
||||
- Intelligent content chunking
|
||||
- Efficient vector storage and indexing
|
||||
- Semantic search capabilities
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.indexing import WebIndex
|
||||
|
||||
# Initialize and build index
|
||||
index = WebIndex(model="efficient-mini")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Crawl and index content
|
||||
await index.build(
|
||||
urls=["https://docs.example.com"],
|
||||
crawler=crawler,
|
||||
options={
|
||||
"chunk_method": "semantic",
|
||||
"update_policy": "incremental",
|
||||
"embedding_batch_size": 100
|
||||
}
|
||||
)
|
||||
|
||||
# Search through indexed content
|
||||
results = await index.search(
|
||||
query="How to implement OAuth authentication?",
|
||||
filters={
|
||||
"content_type": "technical",
|
||||
"recency": "6months"
|
||||
},
|
||||
top_k=5
|
||||
)
|
||||
|
||||
# Get similar content
|
||||
similar = await index.find_similar(
|
||||
url="https://docs.example.com/auth/oauth",
|
||||
threshold=0.85
|
||||
)
|
||||
```
|
||||
|
||||
Each of these specialized features builds upon Crawl4AI's core functionality while providing targeted solutions for specific use cases. They can be used independently or combined for more complex data extraction and processing needs.
|
||||
|
||||
# Section 3: Development Tools 🔧
|
||||
|
||||
This section covers tools designed to enhance the development experience, monitoring, and deployment of Crawl4AI applications.
|
||||
|
||||
### 3.1 Crawl4AI Playground 🎮
|
||||
|
||||
The Crawl4AI Playground is an interactive web-based development environment that simplifies web scraping experimentation, development, and deployment. With its intuitive interface and AI-powered assistance, users can quickly prototype, test, and deploy web scraping solutions.
|
||||
|
||||
#### Key Features 🌟
|
||||
|
||||
##### Visual Strategy Builder
|
||||
- Interactive point-and-click interface for building extraction strategies
|
||||
- Real-time preview of selected elements
|
||||
- Side-by-side comparison of different extraction approaches
|
||||
- Visual validation of CSS selectors and XPath queries
|
||||
|
||||
##### AI Assistant Integration
|
||||
- Strategy recommendations based on target website analysis
|
||||
- Parameter optimization suggestions
|
||||
- Best practices guidance for specific use cases
|
||||
- Automated error detection and resolution
|
||||
- Performance optimization tips
|
||||
|
||||
##### Real-Time Testing & Validation
|
||||
- Live preview of extraction results
|
||||
- Side-by-side comparison of multiple strategies
|
||||
- Performance metrics visualization
|
||||
- Automatic validation of extracted data
|
||||
- Error detection and debugging tools
|
||||
|
||||
##### Project Management
|
||||
- Save and organize multiple scraping projects
|
||||
- Version control for configurations
|
||||
- Export/import project settings
|
||||
- Share configurations with team members
|
||||
- Project templates for common use cases
|
||||
|
||||
##### Deployment Pipeline
|
||||
- One-click deployment to various environments
|
||||
- Docker container generation
|
||||
- Cloud deployment templates (AWS, GCP, Azure)
|
||||
- Scaling configuration management
|
||||
- Monitoring setup automation
|
||||
|
||||
|
||||
### 3.2 Performance Monitoring System
|
||||
A comprehensive monitoring solution providing real-time insights into crawler operations, resource usage, and system health through both CLI and GUI interfaces.
|
||||
|
||||
Key Features:
|
||||
- Real-time resource tracking
|
||||
- Active crawl monitoring
|
||||
- Performance statistics
|
||||
- Customizable alerting system
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.monitor import CrawlMonitor
|
||||
|
||||
# Initialize monitoring
|
||||
monitor = CrawlMonitor()
|
||||
|
||||
# Start monitoring with CLI interface
|
||||
await monitor.start(
|
||||
mode="cli", # or "gui"
|
||||
refresh_rate="1s",
|
||||
metrics={
|
||||
"resources": ["cpu", "memory", "network"],
|
||||
"crawls": ["active", "queued", "completed"],
|
||||
"performance": ["success_rate", "response_times"]
|
||||
}
|
||||
)
|
||||
|
||||
# Example CLI output:
|
||||
"""
|
||||
Crawl4AI Monitor (Live) - Press Q to exit
|
||||
────────────────────────────────────────
|
||||
System Usage:
|
||||
├─ CPU: ███████░░░ 70%
|
||||
└─ Memory: ████░░░░░ 2.1GB/8GB
|
||||
|
||||
Active Crawls:
|
||||
ID URL Status Progress
|
||||
001 docs.example.com 🟢 Active 75%
|
||||
002 api.service.com 🟡 Queue -
|
||||
|
||||
Metrics (Last 5min):
|
||||
├─ Success Rate: 98%
|
||||
├─ Avg Response: 0.6s
|
||||
└─ Pages/sec: 8.5
|
||||
"""
|
||||
```
|
||||
|
||||
### 3.3 Cloud Integration
|
||||
Streamlined deployment tools for setting up Crawl4AI in various cloud environments, with support for scaling and monitoring.
|
||||
|
||||
Key Features:
|
||||
- One-click deployment solutions
|
||||
- Auto-scaling configuration
|
||||
- Load balancing setup
|
||||
- Cloud-specific optimizations
|
||||
- Monitoring integration
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.deploy import CloudDeployer
|
||||
|
||||
# Initialize deployer
|
||||
deployer = CloudDeployer()
|
||||
|
||||
# Deploy crawler service
|
||||
deployment = await deployer.deploy(
|
||||
service_name="crawler-cluster",
|
||||
platform="aws", # or "gcp", "azure"
|
||||
config={
|
||||
"instance_type": "compute-optimized",
|
||||
"auto_scaling": {
|
||||
"min_instances": 2,
|
||||
"max_instances": 10,
|
||||
"scale_based_on": "cpu_usage"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"monitoring": True
|
||||
}
|
||||
)
|
||||
|
||||
# Get deployment status and endpoints
|
||||
print(f"Service Status: {deployment.status}")
|
||||
print(f"API Endpoint: {deployment.endpoint}")
|
||||
print(f"Monitor URL: {deployment.monitor_url}")
|
||||
```
|
||||
|
||||
These development tools work together to provide a comprehensive environment for developing, testing, monitoring, and deploying Crawl4AI applications. The Playground helps users experiment and generate optimal configurations, the Performance Monitor ensures smooth operation, and the Cloud Integration tools simplify deployment and scaling.
|
||||
|
||||
# Section 4: Community & Growth 🌱
|
||||
|
||||
This section outlines initiatives designed to build and support the Crawl4AI community, provide educational resources, and ensure sustainable project growth.
|
||||
|
||||
### 4.1 Sponsorship Program
|
||||
A structured program to support ongoing development and maintenance of Crawl4AI while providing valuable benefits to sponsors.
|
||||
|
||||
Key Features:
|
||||
- Multiple sponsorship tiers
|
||||
- Sponsor recognition system
|
||||
- Priority support for sponsors
|
||||
- Early access to new features
|
||||
- Custom feature development opportunities
|
||||
|
||||
Program Structure (not yet finalized):
|
||||
```
|
||||
Sponsorship Tiers:
|
||||
|
||||
🥉 Bronze Supporter
|
||||
- GitHub Sponsor badge
|
||||
- Priority issue response
|
||||
- Community Discord role
|
||||
|
||||
🥈 Silver Supporter
|
||||
- All Bronze benefits
|
||||
- Technical support channel
|
||||
- Vote on roadmap priorities
|
||||
- Early access to beta features
|
||||
|
||||
🥇 Gold Supporter
|
||||
- All Silver benefits
|
||||
- Custom feature requests
|
||||
- Direct developer access
|
||||
- Private support sessions
|
||||
|
||||
💎 Diamond Partner
|
||||
- All Gold benefits
|
||||
- Custom development
|
||||
- On-demand consulting
|
||||
- Integration support
|
||||
```
|
||||
|
||||
### 4.2 "How to Crawl" Video Series
|
||||
A comprehensive educational resource teaching users how to effectively use Crawl4AI for various web scraping and data extraction scenarios.
|
||||
|
||||
Key Features:
|
||||
- Step-by-step tutorials
|
||||
- Real-world use cases
|
||||
- Best practices
|
||||
- Integration guides
|
||||
- Advanced feature deep-dives
|
||||
|
||||
These community initiatives are designed to:
|
||||
- Provide comprehensive learning resources
|
||||
- Foster a supportive user community
|
||||
- Ensure sustainable project development
|
||||
- Share knowledge and best practices
|
||||
- Create opportunities for collaboration
|
||||
|
||||
The combination of structured support through sponsorship, educational content through video series, and interactive learning through the playground creates a robust ecosystem for both new and experienced users of Crawl4AI.
|
||||
@@ -1,30 +1,88 @@
|
||||
# __init__.py
|
||||
|
||||
from .async_webcrawler import AsyncWebCrawler
|
||||
from .models import CrawlResult
|
||||
|
||||
__version__ = "0.3.72"
|
||||
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy,
|
||||
WebScrapingStrategy,
|
||||
LXMLWebScrapingStrategy,
|
||||
)
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
CosineStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy
|
||||
)
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from .content_filter_strategy import PruningContentFilter, BM25ContentFilter, LLMContentFilter
|
||||
from .models import CrawlResult, MarkdownGenerationResult
|
||||
from .async_dispatcher import (
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
RateLimiter,
|
||||
CrawlerMonitor,
|
||||
DisplayMode,
|
||||
BaseDispatcher
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AsyncWebCrawler",
|
||||
"CrawlResult",
|
||||
"CacheMode",
|
||||
"ContentScrapingStrategy",
|
||||
"WebScrapingStrategy",
|
||||
"LXMLWebScrapingStrategy",
|
||||
"BrowserConfig",
|
||||
"CrawlerRunConfig",
|
||||
"ExtractionStrategy",
|
||||
"LLMExtractionStrategy",
|
||||
"CosineStrategy",
|
||||
"JsonCssExtractionStrategy",
|
||||
"JsonXPathExtractionStrategy",
|
||||
"ChunkingStrategy",
|
||||
"RegexChunking",
|
||||
"DefaultMarkdownGenerator",
|
||||
"PruningContentFilter",
|
||||
"BM25ContentFilter",
|
||||
"LLMContentFilter",
|
||||
"BaseDispatcher",
|
||||
"MemoryAdaptiveDispatcher",
|
||||
"SemaphoreDispatcher",
|
||||
"RateLimiter",
|
||||
"CrawlerMonitor",
|
||||
"DisplayMode",
|
||||
"MarkdownGenerationResult",
|
||||
]
|
||||
|
||||
|
||||
def is_sync_version_installed():
|
||||
try:
|
||||
import selenium
|
||||
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
|
||||
if is_sync_version_installed():
|
||||
try:
|
||||
from .web_crawler import WebCrawler
|
||||
|
||||
__all__.append("WebCrawler")
|
||||
except ImportError:
|
||||
import warnings
|
||||
print("Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies.")
|
||||
print(
|
||||
"Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
|
||||
)
|
||||
else:
|
||||
WebCrawler = None
|
||||
import warnings
|
||||
print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||
# import warnings
|
||||
# print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||
|
||||
import warnings
|
||||
from pydantic import warnings as pydantic_warnings
|
||||
|
||||
# Disable all Pydantic warnings
|
||||
warnings.filterwarnings("ignore", module="pydantic")
|
||||
# pydantic_warnings.filter_warnings()
|
||||
2
crawl4ai/__version__.py
Normal file
2
crawl4ai/__version__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# crawl4ai/_version.py
|
||||
__version__ = "0.4.3b2"
|
||||
715
crawl4ai/async_configs.py
Normal file
715
crawl4ai/async_configs.py
Normal file
@@ -0,0 +1,715 @@
|
||||
from .config import (
|
||||
MIN_WORD_THRESHOLD,
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
SCREENSHOT_HEIGHT_TRESHOLD,
|
||||
PAGE_TIMEOUT,
|
||||
IMAGE_SCORE_THRESHOLD,
|
||||
SOCIAL_MEDIA_DOMAINS,
|
||||
)
|
||||
from .user_agent_generator import UserAgentGenerator
|
||||
from .extraction_strategy import ExtractionStrategy
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import MarkdownGenerationStrategy
|
||||
from .content_scraping_strategy import ContentScrapingStrategy, WebScrapingStrategy
|
||||
from typing import Optional, Union, List
|
||||
|
||||
|
||||
class BrowserConfig:
|
||||
"""
|
||||
Configuration class for setting up a browser instance and its context in AsyncPlaywrightCrawlerStrategy.
|
||||
|
||||
This class centralizes all parameters that affect browser and context creation. Instead of passing
|
||||
scattered keyword arguments, users can instantiate and modify this configuration object. The crawler
|
||||
code will then reference these settings to initialize the browser in a consistent, documented manner.
|
||||
|
||||
Attributes:
|
||||
browser_type (str): The type of browser to launch. Supported values: "chromium", "firefox", "webkit".
|
||||
Default: "chromium".
|
||||
headless (bool): Whether to run the browser in headless mode (no visible GUI).
|
||||
Default: True.
|
||||
use_managed_browser (bool): Launch the browser using a managed approach (e.g., via CDP), allowing
|
||||
advanced manipulation. Default: False.
|
||||
debugging_port (int): Port for the browser debugging protocol. Default: 9222.
|
||||
use_persistent_context (bool): Use a persistent browser context (like a persistent profile).
|
||||
Automatically sets use_managed_browser=True. Default: False.
|
||||
user_data_dir (str or None): Path to a user data directory for persistent sessions. If None, a
|
||||
temporary directory may be used. Default: None.
|
||||
chrome_channel (str): The Chrome channel to launch (e.g., "chrome", "msedge"). Only applies if browser_type
|
||||
is "chromium". Default: "chromium".
|
||||
channel (str): The channel to launch (e.g., "chromium", "chrome", "msedge"). Only applies if browser_type
|
||||
is "chromium". Default: "chromium".
|
||||
proxy (Optional[str]): Proxy server URL (e.g., "http://username:password@proxy:port"). If None, no proxy is used.
|
||||
Default: None.
|
||||
proxy_config (dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
If None, no additional proxy config. Default: None.
|
||||
viewport_width (int): Default viewport width for pages. Default: 1080.
|
||||
viewport_height (int): Default viewport height for pages. Default: 600.
|
||||
verbose (bool): Enable verbose logging.
|
||||
Default: True.
|
||||
accept_downloads (bool): Whether to allow file downloads. If True, requires a downloads_path.
|
||||
Default: False.
|
||||
downloads_path (str or None): Directory to store downloaded files. If None and accept_downloads is True,
|
||||
a default path will be created. Default: None.
|
||||
storage_state (str or dict or None): Path or object describing storage state (cookies, localStorage).
|
||||
Default: None.
|
||||
ignore_https_errors (bool): Ignore HTTPS certificate errors. Default: True.
|
||||
java_script_enabled (bool): Enable JavaScript execution in pages. Default: True.
|
||||
cookies (list): List of cookies to add to the browser context. Each cookie is a dict with fields like
|
||||
{"name": "...", "value": "...", "url": "..."}.
|
||||
Default: [].
|
||||
headers (dict): Extra HTTP headers to apply to all requests in this context.
|
||||
Default: {}.
|
||||
user_agent (str): Custom User-Agent string to use. Default: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36".
|
||||
user_agent_mode (str or None): Mode for generating the user agent (e.g., "random"). If None, use the provided
|
||||
user_agent as-is. Default: None.
|
||||
user_agent_generator_config (dict or None): Configuration for user agent generation if user_agent_mode is set.
|
||||
Default: None.
|
||||
text_mode (bool): If True, disables images and other rich content for potentially faster load times.
|
||||
Default: False.
|
||||
light_mode (bool): Disables certain background features for performance gains. Default: False.
|
||||
extra_args (list): Additional command-line arguments passed to the browser.
|
||||
Default: [].
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
browser_type: str = "chromium",
|
||||
headless: bool = True,
|
||||
use_managed_browser: bool = False,
|
||||
use_persistent_context: bool = False,
|
||||
user_data_dir: str = None,
|
||||
chrome_channel: str = "chromium",
|
||||
channel: str = "chromium",
|
||||
proxy: Optional[str] = None,
|
||||
proxy_config: dict = None,
|
||||
viewport_width: int = 1080,
|
||||
viewport_height: int = 600,
|
||||
accept_downloads: bool = False,
|
||||
downloads_path: str = None,
|
||||
storage_state=None,
|
||||
ignore_https_errors: bool = True,
|
||||
java_script_enabled: bool = True,
|
||||
sleep_on_close: bool = False,
|
||||
verbose: bool = True,
|
||||
cookies: list = None,
|
||||
headers: dict = None,
|
||||
user_agent: str = (
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Chrome/116.0.5845.187 Safari/604.1 Edg/117.0.2045.47"
|
||||
),
|
||||
user_agent_mode: str = None,
|
||||
user_agent_generator_config: dict = None,
|
||||
text_mode: bool = False,
|
||||
light_mode: bool = False,
|
||||
extra_args: list = None,
|
||||
debugging_port: int = 9222,
|
||||
):
|
||||
self.browser_type = browser_type
|
||||
self.headless = headless
|
||||
self.use_managed_browser = use_managed_browser
|
||||
self.use_persistent_context = use_persistent_context
|
||||
self.user_data_dir = user_data_dir
|
||||
self.chrome_channel = chrome_channel or self.browser_type or "chromium"
|
||||
self.channel = channel or self.browser_type or "chromium"
|
||||
if self.browser_type in ["firefox", "webkit"]:
|
||||
self.channel = ""
|
||||
self.chrome_channel = ""
|
||||
self.proxy = proxy
|
||||
self.proxy_config = proxy_config
|
||||
self.viewport_width = viewport_width
|
||||
self.viewport_height = viewport_height
|
||||
self.accept_downloads = accept_downloads
|
||||
self.downloads_path = downloads_path
|
||||
self.storage_state = storage_state
|
||||
self.ignore_https_errors = ignore_https_errors
|
||||
self.java_script_enabled = java_script_enabled
|
||||
self.cookies = cookies if cookies is not None else []
|
||||
self.headers = headers if headers is not None else {}
|
||||
self.user_agent = user_agent
|
||||
self.user_agent_mode = user_agent_mode
|
||||
self.user_agent_generator_config = user_agent_generator_config
|
||||
self.text_mode = text_mode
|
||||
self.light_mode = light_mode
|
||||
self.extra_args = extra_args if extra_args is not None else []
|
||||
self.sleep_on_close = sleep_on_close
|
||||
self.verbose = verbose
|
||||
self.debugging_port = debugging_port
|
||||
|
||||
user_agenr_generator = UserAgentGenerator()
|
||||
if self.user_agent_mode != "random" and self.user_agent_generator_config:
|
||||
self.user_agent = user_agenr_generator.generate(
|
||||
**(self.user_agent_generator_config or {})
|
||||
)
|
||||
elif self.user_agent_mode == "random":
|
||||
self.user_agent = user_agenr_generator.generate()
|
||||
else:
|
||||
pass
|
||||
|
||||
self.browser_hint = user_agenr_generator.generate_client_hints(self.user_agent)
|
||||
self.headers.setdefault("sec-ch-ua", self.browser_hint)
|
||||
|
||||
# If persistent context is requested, ensure managed browser is enabled
|
||||
if self.use_persistent_context:
|
||||
self.use_managed_browser = True
|
||||
|
||||
@staticmethod
|
||||
def from_kwargs(kwargs: dict) -> "BrowserConfig":
|
||||
return BrowserConfig(
|
||||
browser_type=kwargs.get("browser_type", "chromium"),
|
||||
headless=kwargs.get("headless", True),
|
||||
use_managed_browser=kwargs.get("use_managed_browser", False),
|
||||
use_persistent_context=kwargs.get("use_persistent_context", False),
|
||||
user_data_dir=kwargs.get("user_data_dir"),
|
||||
chrome_channel=kwargs.get("chrome_channel", "chromium"),
|
||||
channel=kwargs.get("channel", "chromium"),
|
||||
proxy=kwargs.get("proxy"),
|
||||
proxy_config=kwargs.get("proxy_config"),
|
||||
viewport_width=kwargs.get("viewport_width", 1080),
|
||||
viewport_height=kwargs.get("viewport_height", 600),
|
||||
accept_downloads=kwargs.get("accept_downloads", False),
|
||||
downloads_path=kwargs.get("downloads_path"),
|
||||
storage_state=kwargs.get("storage_state"),
|
||||
ignore_https_errors=kwargs.get("ignore_https_errors", True),
|
||||
java_script_enabled=kwargs.get("java_script_enabled", True),
|
||||
cookies=kwargs.get("cookies", []),
|
||||
headers=kwargs.get("headers", {}),
|
||||
user_agent=kwargs.get(
|
||||
"user_agent",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
),
|
||||
user_agent_mode=kwargs.get("user_agent_mode"),
|
||||
user_agent_generator_config=kwargs.get("user_agent_generator_config"),
|
||||
text_mode=kwargs.get("text_mode", False),
|
||||
light_mode=kwargs.get("light_mode", False),
|
||||
extra_args=kwargs.get("extra_args", []),
|
||||
)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"browser_type": self.browser_type,
|
||||
"headless": self.headless,
|
||||
"use_managed_browser": self.use_managed_browser,
|
||||
"use_persistent_context": self.use_persistent_context,
|
||||
"user_data_dir": self.user_data_dir,
|
||||
"chrome_channel": self.chrome_channel,
|
||||
"channel": self.channel,
|
||||
"proxy": self.proxy,
|
||||
"proxy_config": self.proxy_config,
|
||||
"viewport_width": self.viewport_width,
|
||||
"viewport_height": self.viewport_height,
|
||||
"accept_downloads": self.accept_downloads,
|
||||
"downloads_path": self.downloads_path,
|
||||
"storage_state": self.storage_state,
|
||||
"ignore_https_errors": self.ignore_https_errors,
|
||||
"java_script_enabled": self.java_script_enabled,
|
||||
"cookies": self.cookies,
|
||||
"headers": self.headers,
|
||||
"user_agent": self.user_agent,
|
||||
"user_agent_mode": self.user_agent_mode,
|
||||
"user_agent_generator_config": self.user_agent_generator_config,
|
||||
"text_mode": self.text_mode,
|
||||
"light_mode": self.light_mode,
|
||||
"extra_args": self.extra_args,
|
||||
"sleep_on_close": self.sleep_on_close,
|
||||
"verbose": self.verbose,
|
||||
"debugging_port": self.debugging_port,
|
||||
}
|
||||
|
||||
def clone(self, **kwargs):
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
BrowserConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return BrowserConfig.from_kwargs(config_dict)
|
||||
|
||||
|
||||
class CrawlerRunConfig:
|
||||
"""
|
||||
Configuration class for controlling how the crawler runs each crawl operation.
|
||||
This includes parameters for content extraction, page manipulation, waiting conditions,
|
||||
caching, and other runtime behaviors.
|
||||
|
||||
This centralizes parameters that were previously scattered as kwargs to `arun()` and related methods.
|
||||
By using this class, you have a single place to understand and adjust the crawling options.
|
||||
|
||||
Attributes:
|
||||
# Content Processing Parameters
|
||||
word_count_threshold (int): Minimum word count threshold before processing content.
|
||||
Default: MIN_WORD_THRESHOLD (typically 200).
|
||||
extraction_strategy (ExtractionStrategy or None): Strategy to extract structured data from crawled pages.
|
||||
Default: None (NoExtractionStrategy is used if None).
|
||||
chunking_strategy (ChunkingStrategy): Strategy to chunk content before extraction.
|
||||
Default: RegexChunking().
|
||||
markdown_generator (MarkdownGenerationStrategy): Strategy for generating markdown.
|
||||
Default: None.
|
||||
content_filter (RelevantContentFilter or None): Optional filter to prune irrelevant content.
|
||||
Default: None.
|
||||
only_text (bool): If True, attempt to extract text-only content where applicable.
|
||||
Default: False.
|
||||
css_selector (str or None): CSS selector to extract a specific portion of the page.
|
||||
Default: None.
|
||||
excluded_tags (list of str or None): List of HTML tags to exclude from processing.
|
||||
Default: None.
|
||||
excluded_selector (str or None): CSS selector to exclude from processing.
|
||||
Default: None.
|
||||
keep_data_attributes (bool): If True, retain `data-*` attributes while removing unwanted attributes.
|
||||
Default: False.
|
||||
remove_forms (bool): If True, remove all `<form>` elements from the HTML.
|
||||
Default: False.
|
||||
prettiify (bool): If True, apply `fast_format_html` to produce prettified HTML output.
|
||||
Default: False.
|
||||
parser_type (str): Type of parser to use for HTML parsing.
|
||||
Default: "lxml".
|
||||
scraping_strategy (ContentScrapingStrategy): Scraping strategy to use.
|
||||
Default: WebScrapingStrategy.
|
||||
proxy_config (dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
If None, no additional proxy config. Default: None.
|
||||
|
||||
# Caching Parameters
|
||||
cache_mode (CacheMode or None): Defines how caching is handled.
|
||||
If None, defaults to CacheMode.ENABLED internally.
|
||||
Default: None.
|
||||
session_id (str or None): Optional session ID to persist the browser context and the created
|
||||
page instance. If the ID already exists, the crawler does not
|
||||
create a new page and uses the current page to preserve the state.
|
||||
bypass_cache (bool): Legacy parameter, if True acts like CacheMode.BYPASS.
|
||||
Default: False.
|
||||
disable_cache (bool): Legacy parameter, if True acts like CacheMode.DISABLED.
|
||||
Default: False.
|
||||
no_cache_read (bool): Legacy parameter, if True acts like CacheMode.WRITE_ONLY.
|
||||
Default: False.
|
||||
no_cache_write (bool): Legacy parameter, if True acts like CacheMode.READ_ONLY.
|
||||
Default: False.
|
||||
shared_data (dict or None): Shared data to be passed between hooks.
|
||||
Default: None.
|
||||
|
||||
# Page Navigation and Timing Parameters
|
||||
wait_until (str): The condition to wait for when navigating, e.g. "domcontentloaded".
|
||||
Default: "domcontentloaded".
|
||||
page_timeout (int): Timeout in ms for page operations like navigation.
|
||||
Default: 60000 (60 seconds).
|
||||
wait_for (str or None): A CSS selector or JS condition to wait for before extracting content.
|
||||
Default: None.
|
||||
wait_for_images (bool): If True, wait for images to load before extracting content.
|
||||
Default: False.
|
||||
delay_before_return_html (float): Delay in seconds before retrieving final HTML.
|
||||
Default: 0.1.
|
||||
mean_delay (float): Mean base delay between requests when calling arun_many.
|
||||
Default: 0.1.
|
||||
max_range (float): Max random additional delay range for requests in arun_many.
|
||||
Default: 0.3.
|
||||
semaphore_count (int): Number of concurrent operations allowed.
|
||||
Default: 5.
|
||||
|
||||
# Page Interaction Parameters
|
||||
js_code (str or list of str or None): JavaScript code/snippets to run on the page.
|
||||
Default: None.
|
||||
js_only (bool): If True, indicates subsequent calls are JS-driven updates, not full page loads.
|
||||
Default: False.
|
||||
ignore_body_visibility (bool): If True, ignore whether the body is visible before proceeding.
|
||||
Default: True.
|
||||
scan_full_page (bool): If True, scroll through the entire page to load all content.
|
||||
Default: False.
|
||||
scroll_delay (float): Delay in seconds between scroll steps if scan_full_page is True.
|
||||
Default: 0.2.
|
||||
process_iframes (bool): If True, attempts to process and inline iframe content.
|
||||
Default: False.
|
||||
remove_overlay_elements (bool): If True, remove overlays/popups before extracting HTML.
|
||||
Default: False.
|
||||
simulate_user (bool): If True, simulate user interactions (mouse moves, clicks) for anti-bot measures.
|
||||
Default: False.
|
||||
override_navigator (bool): If True, overrides navigator properties for more human-like behavior.
|
||||
Default: False.
|
||||
magic (bool): If True, attempts automatic handling of overlays/popups.
|
||||
Default: False.
|
||||
adjust_viewport_to_content (bool): If True, adjust viewport according to the page content dimensions.
|
||||
Default: False.
|
||||
|
||||
# Media Handling Parameters
|
||||
screenshot (bool): Whether to take a screenshot after crawling.
|
||||
Default: False.
|
||||
screenshot_wait_for (float or None): Additional wait time before taking a screenshot.
|
||||
Default: None.
|
||||
screenshot_height_threshold (int): Threshold for page height to decide screenshot strategy.
|
||||
Default: SCREENSHOT_HEIGHT_TRESHOLD (from config, e.g. 20000).
|
||||
pdf (bool): Whether to generate a PDF of the page.
|
||||
Default: False.
|
||||
image_description_min_word_threshold (int): Minimum words for image description extraction.
|
||||
Default: IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD (e.g., 50).
|
||||
image_score_threshold (int): Minimum score threshold for processing an image.
|
||||
Default: IMAGE_SCORE_THRESHOLD (e.g., 3).
|
||||
exclude_external_images (bool): If True, exclude all external images from processing.
|
||||
Default: False.
|
||||
|
||||
# Link and Domain Handling Parameters
|
||||
exclude_social_media_domains (list of str): List of domains to exclude for social media links.
|
||||
Default: SOCIAL_MEDIA_DOMAINS (from config).
|
||||
exclude_external_links (bool): If True, exclude all external links from the results.
|
||||
Default: False.
|
||||
exclude_social_media_links (bool): If True, exclude links pointing to social media domains.
|
||||
Default: False.
|
||||
exclude_domains (list of str): List of specific domains to exclude from results.
|
||||
Default: [].
|
||||
|
||||
# Debugging and Logging Parameters
|
||||
verbose (bool): Enable verbose logging.
|
||||
Default: True.
|
||||
log_console (bool): If True, log console messages from the page.
|
||||
Default: False.
|
||||
|
||||
# Streaming Parameters
|
||||
stream (bool): If True, enables streaming of crawled URLs as they are processed when used with arun_many.
|
||||
Default: False.
|
||||
|
||||
# Optional Parameters
|
||||
stream (bool): If True, stream the page content as it is being loaded.
|
||||
url: str = None # This is not a compulsory parameter
|
||||
check_robots_txt (bool): Whether to check robots.txt rules before crawling. Default: False
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
# Content Processing Parameters
|
||||
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
markdown_generator: MarkdownGenerationStrategy = None,
|
||||
content_filter=None,
|
||||
only_text: bool = False,
|
||||
css_selector: str = None,
|
||||
excluded_tags: list = None,
|
||||
excluded_selector: str = None,
|
||||
keep_data_attributes: bool = False,
|
||||
remove_forms: bool = False,
|
||||
prettiify: bool = False,
|
||||
parser_type: str = "lxml",
|
||||
scraping_strategy: ContentScrapingStrategy = None,
|
||||
proxy_config: dict = None,
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate: bool = False,
|
||||
# Caching Parameters
|
||||
cache_mode=None,
|
||||
session_id: str = None,
|
||||
bypass_cache: bool = False,
|
||||
disable_cache: bool = False,
|
||||
no_cache_read: bool = False,
|
||||
no_cache_write: bool = False,
|
||||
shared_data: dict = None,
|
||||
# Page Navigation and Timing Parameters
|
||||
wait_until: str = "domcontentloaded",
|
||||
page_timeout: int = PAGE_TIMEOUT,
|
||||
wait_for: str = None,
|
||||
wait_for_images: bool = False,
|
||||
delay_before_return_html: float = 0.1,
|
||||
mean_delay: float = 0.1,
|
||||
max_range: float = 0.3,
|
||||
semaphore_count: int = 5,
|
||||
# Page Interaction Parameters
|
||||
js_code: Union[str, List[str]] = None,
|
||||
js_only: bool = False,
|
||||
ignore_body_visibility: bool = True,
|
||||
scan_full_page: bool = False,
|
||||
scroll_delay: float = 0.2,
|
||||
process_iframes: bool = False,
|
||||
remove_overlay_elements: bool = False,
|
||||
simulate_user: bool = False,
|
||||
override_navigator: bool = False,
|
||||
magic: bool = False,
|
||||
adjust_viewport_to_content: bool = False,
|
||||
# Media Handling Parameters
|
||||
screenshot: bool = False,
|
||||
screenshot_wait_for: float = None,
|
||||
screenshot_height_threshold: int = SCREENSHOT_HEIGHT_TRESHOLD,
|
||||
pdf: bool = False,
|
||||
image_description_min_word_threshold: int = IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
image_score_threshold: int = IMAGE_SCORE_THRESHOLD,
|
||||
exclude_external_images: bool = False,
|
||||
# Link and Domain Handling Parameters
|
||||
exclude_social_media_domains: list = None,
|
||||
exclude_external_links: bool = False,
|
||||
exclude_social_media_links: bool = False,
|
||||
exclude_domains: list = None,
|
||||
# Debugging and Logging Parameters
|
||||
verbose: bool = True,
|
||||
log_console: bool = False,
|
||||
# Streaming Parameters
|
||||
stream: bool = False,
|
||||
url: str = None,
|
||||
check_robots_txt: bool = False,
|
||||
):
|
||||
self.url = url
|
||||
|
||||
# Content Processing Parameters
|
||||
self.word_count_threshold = word_count_threshold
|
||||
self.extraction_strategy = extraction_strategy
|
||||
self.chunking_strategy = chunking_strategy
|
||||
self.markdown_generator = markdown_generator
|
||||
self.content_filter = content_filter
|
||||
self.only_text = only_text
|
||||
self.css_selector = css_selector
|
||||
self.excluded_tags = excluded_tags or []
|
||||
self.excluded_selector = excluded_selector or ""
|
||||
self.keep_data_attributes = keep_data_attributes
|
||||
self.remove_forms = remove_forms
|
||||
self.prettiify = prettiify
|
||||
self.parser_type = parser_type
|
||||
self.scraping_strategy = scraping_strategy or WebScrapingStrategy()
|
||||
self.proxy_config = proxy_config
|
||||
|
||||
# SSL Parameters
|
||||
self.fetch_ssl_certificate = fetch_ssl_certificate
|
||||
|
||||
# Caching Parameters
|
||||
self.cache_mode = cache_mode
|
||||
self.session_id = session_id
|
||||
self.bypass_cache = bypass_cache
|
||||
self.disable_cache = disable_cache
|
||||
self.no_cache_read = no_cache_read
|
||||
self.no_cache_write = no_cache_write
|
||||
self.shared_data = shared_data
|
||||
|
||||
# Page Navigation and Timing Parameters
|
||||
self.wait_until = wait_until
|
||||
self.page_timeout = page_timeout
|
||||
self.wait_for = wait_for
|
||||
self.wait_for_images = wait_for_images
|
||||
self.delay_before_return_html = delay_before_return_html
|
||||
self.mean_delay = mean_delay
|
||||
self.max_range = max_range
|
||||
self.semaphore_count = semaphore_count
|
||||
|
||||
# Page Interaction Parameters
|
||||
self.js_code = js_code
|
||||
self.js_only = js_only
|
||||
self.ignore_body_visibility = ignore_body_visibility
|
||||
self.scan_full_page = scan_full_page
|
||||
self.scroll_delay = scroll_delay
|
||||
self.process_iframes = process_iframes
|
||||
self.remove_overlay_elements = remove_overlay_elements
|
||||
self.simulate_user = simulate_user
|
||||
self.override_navigator = override_navigator
|
||||
self.magic = magic
|
||||
self.adjust_viewport_to_content = adjust_viewport_to_content
|
||||
|
||||
# Media Handling Parameters
|
||||
self.screenshot = screenshot
|
||||
self.screenshot_wait_for = screenshot_wait_for
|
||||
self.screenshot_height_threshold = screenshot_height_threshold
|
||||
self.pdf = pdf
|
||||
self.image_description_min_word_threshold = image_description_min_word_threshold
|
||||
self.image_score_threshold = image_score_threshold
|
||||
self.exclude_external_images = exclude_external_images
|
||||
|
||||
# Link and Domain Handling Parameters
|
||||
self.exclude_social_media_domains = (
|
||||
exclude_social_media_domains or SOCIAL_MEDIA_DOMAINS
|
||||
)
|
||||
self.exclude_external_links = exclude_external_links
|
||||
self.exclude_social_media_links = exclude_social_media_links
|
||||
self.exclude_domains = exclude_domains or []
|
||||
|
||||
# Debugging and Logging Parameters
|
||||
self.verbose = verbose
|
||||
self.log_console = log_console
|
||||
|
||||
# Streaming Parameters
|
||||
self.stream = stream
|
||||
|
||||
# Robots.txt Handling Parameters
|
||||
self.check_robots_txt = check_robots_txt
|
||||
|
||||
# Validate type of extraction strategy and chunking strategy if they are provided
|
||||
if self.extraction_strategy is not None and not isinstance(
|
||||
self.extraction_strategy, ExtractionStrategy
|
||||
):
|
||||
raise ValueError(
|
||||
"extraction_strategy must be an instance of ExtractionStrategy"
|
||||
)
|
||||
if self.chunking_strategy is not None and not isinstance(
|
||||
self.chunking_strategy, ChunkingStrategy
|
||||
):
|
||||
raise ValueError(
|
||||
"chunking_strategy must be an instance of ChunkingStrategy"
|
||||
)
|
||||
|
||||
# Set default chunking strategy if None
|
||||
if self.chunking_strategy is None:
|
||||
self.chunking_strategy = RegexChunking()
|
||||
|
||||
@staticmethod
|
||||
def from_kwargs(kwargs: dict) -> "CrawlerRunConfig":
|
||||
return CrawlerRunConfig(
|
||||
# Content Processing Parameters
|
||||
word_count_threshold=kwargs.get("word_count_threshold", 200),
|
||||
extraction_strategy=kwargs.get("extraction_strategy"),
|
||||
chunking_strategy=kwargs.get("chunking_strategy", RegexChunking()),
|
||||
markdown_generator=kwargs.get("markdown_generator"),
|
||||
content_filter=kwargs.get("content_filter"),
|
||||
only_text=kwargs.get("only_text", False),
|
||||
css_selector=kwargs.get("css_selector"),
|
||||
excluded_tags=kwargs.get("excluded_tags", []),
|
||||
excluded_selector=kwargs.get("excluded_selector", ""),
|
||||
keep_data_attributes=kwargs.get("keep_data_attributes", False),
|
||||
remove_forms=kwargs.get("remove_forms", False),
|
||||
prettiify=kwargs.get("prettiify", False),
|
||||
parser_type=kwargs.get("parser_type", "lxml"),
|
||||
scraping_strategy=kwargs.get("scraping_strategy"),
|
||||
proxy_config=kwargs.get("proxy_config"),
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate=kwargs.get("fetch_ssl_certificate", False),
|
||||
# Caching Parameters
|
||||
cache_mode=kwargs.get("cache_mode"),
|
||||
session_id=kwargs.get("session_id"),
|
||||
bypass_cache=kwargs.get("bypass_cache", False),
|
||||
disable_cache=kwargs.get("disable_cache", False),
|
||||
no_cache_read=kwargs.get("no_cache_read", False),
|
||||
no_cache_write=kwargs.get("no_cache_write", False),
|
||||
shared_data=kwargs.get("shared_data", None),
|
||||
# Page Navigation and Timing Parameters
|
||||
wait_until=kwargs.get("wait_until", "domcontentloaded"),
|
||||
page_timeout=kwargs.get("page_timeout", 60000),
|
||||
wait_for=kwargs.get("wait_for"),
|
||||
wait_for_images=kwargs.get("wait_for_images", False),
|
||||
delay_before_return_html=kwargs.get("delay_before_return_html", 0.1),
|
||||
mean_delay=kwargs.get("mean_delay", 0.1),
|
||||
max_range=kwargs.get("max_range", 0.3),
|
||||
semaphore_count=kwargs.get("semaphore_count", 5),
|
||||
# Page Interaction Parameters
|
||||
js_code=kwargs.get("js_code"),
|
||||
js_only=kwargs.get("js_only", False),
|
||||
ignore_body_visibility=kwargs.get("ignore_body_visibility", True),
|
||||
scan_full_page=kwargs.get("scan_full_page", False),
|
||||
scroll_delay=kwargs.get("scroll_delay", 0.2),
|
||||
process_iframes=kwargs.get("process_iframes", False),
|
||||
remove_overlay_elements=kwargs.get("remove_overlay_elements", False),
|
||||
simulate_user=kwargs.get("simulate_user", False),
|
||||
override_navigator=kwargs.get("override_navigator", False),
|
||||
magic=kwargs.get("magic", False),
|
||||
adjust_viewport_to_content=kwargs.get("adjust_viewport_to_content", False),
|
||||
# Media Handling Parameters
|
||||
screenshot=kwargs.get("screenshot", False),
|
||||
screenshot_wait_for=kwargs.get("screenshot_wait_for"),
|
||||
screenshot_height_threshold=kwargs.get(
|
||||
"screenshot_height_threshold", SCREENSHOT_HEIGHT_TRESHOLD
|
||||
),
|
||||
pdf=kwargs.get("pdf", False),
|
||||
image_description_min_word_threshold=kwargs.get(
|
||||
"image_description_min_word_threshold",
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
),
|
||||
image_score_threshold=kwargs.get(
|
||||
"image_score_threshold", IMAGE_SCORE_THRESHOLD
|
||||
),
|
||||
exclude_external_images=kwargs.get("exclude_external_images", False),
|
||||
# Link and Domain Handling Parameters
|
||||
exclude_social_media_domains=kwargs.get(
|
||||
"exclude_social_media_domains", SOCIAL_MEDIA_DOMAINS
|
||||
),
|
||||
exclude_external_links=kwargs.get("exclude_external_links", False),
|
||||
exclude_social_media_links=kwargs.get("exclude_social_media_links", False),
|
||||
exclude_domains=kwargs.get("exclude_domains", []),
|
||||
# Debugging and Logging Parameters
|
||||
verbose=kwargs.get("verbose", True),
|
||||
log_console=kwargs.get("log_console", False),
|
||||
# Streaming Parameters
|
||||
stream=kwargs.get("stream", False),
|
||||
url=kwargs.get("url"),
|
||||
check_robots_txt=kwargs.get("check_robots_txt", False),
|
||||
)
|
||||
|
||||
# Create a funciton returns dict of the object
|
||||
def to_dict(self):
|
||||
return {
|
||||
"word_count_threshold": self.word_count_threshold,
|
||||
"extraction_strategy": self.extraction_strategy,
|
||||
"chunking_strategy": self.chunking_strategy,
|
||||
"markdown_generator": self.markdown_generator,
|
||||
"content_filter": self.content_filter,
|
||||
"only_text": self.only_text,
|
||||
"css_selector": self.css_selector,
|
||||
"excluded_tags": self.excluded_tags,
|
||||
"excluded_selector": self.excluded_selector,
|
||||
"keep_data_attributes": self.keep_data_attributes,
|
||||
"remove_forms": self.remove_forms,
|
||||
"prettiify": self.prettiify,
|
||||
"parser_type": self.parser_type,
|
||||
"scraping_strategy": self.scraping_strategy,
|
||||
"proxy_config": self.proxy_config,
|
||||
"fetch_ssl_certificate": self.fetch_ssl_certificate,
|
||||
"cache_mode": self.cache_mode,
|
||||
"session_id": self.session_id,
|
||||
"bypass_cache": self.bypass_cache,
|
||||
"disable_cache": self.disable_cache,
|
||||
"no_cache_read": self.no_cache_read,
|
||||
"no_cache_write": self.no_cache_write,
|
||||
"shared_data": self.shared_data,
|
||||
"wait_until": self.wait_until,
|
||||
"page_timeout": self.page_timeout,
|
||||
"wait_for": self.wait_for,
|
||||
"wait_for_images": self.wait_for_images,
|
||||
"delay_before_return_html": self.delay_before_return_html,
|
||||
"mean_delay": self.mean_delay,
|
||||
"max_range": self.max_range,
|
||||
"semaphore_count": self.semaphore_count,
|
||||
"js_code": self.js_code,
|
||||
"js_only": self.js_only,
|
||||
"ignore_body_visibility": self.ignore_body_visibility,
|
||||
"scan_full_page": self.scan_full_page,
|
||||
"scroll_delay": self.scroll_delay,
|
||||
"process_iframes": self.process_iframes,
|
||||
"remove_overlay_elements": self.remove_overlay_elements,
|
||||
"simulate_user": self.simulate_user,
|
||||
"override_navigator": self.override_navigator,
|
||||
"magic": self.magic,
|
||||
"adjust_viewport_to_content": self.adjust_viewport_to_content,
|
||||
"screenshot": self.screenshot,
|
||||
"screenshot_wait_for": self.screenshot_wait_for,
|
||||
"screenshot_height_threshold": self.screenshot_height_threshold,
|
||||
"pdf": self.pdf,
|
||||
"image_description_min_word_threshold": self.image_description_min_word_threshold,
|
||||
"image_score_threshold": self.image_score_threshold,
|
||||
"exclude_external_images": self.exclude_external_images,
|
||||
"exclude_social_media_domains": self.exclude_social_media_domains,
|
||||
"exclude_external_links": self.exclude_external_links,
|
||||
"exclude_social_media_links": self.exclude_social_media_links,
|
||||
"exclude_domains": self.exclude_domains,
|
||||
"verbose": self.verbose,
|
||||
"log_console": self.log_console,
|
||||
"stream": self.stream,
|
||||
"url": self.url,
|
||||
"check_robots_txt": self.check_robots_txt,
|
||||
}
|
||||
|
||||
def clone(self, **kwargs):
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
CrawlerRunConfig: A new instance with the specified updates
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Create a new config with streaming enabled
|
||||
stream_config = config.clone(stream=True)
|
||||
|
||||
# Create a new config with multiple updates
|
||||
new_config = config.clone(
|
||||
stream=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return CrawlerRunConfig.from_kwargs(config_dict)
|
||||
@@ -1,558 +0,0 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Callable, Dict, Any, List, Optional, Awaitable
|
||||
import os
|
||||
from playwright.async_api import async_playwright, Page, Browser, Error
|
||||
from io import BytesIO
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from pathlib import Path
|
||||
from playwright.async_api import ProxySettings
|
||||
from pydantic import BaseModel
|
||||
import hashlib
|
||||
import json
|
||||
import uuid
|
||||
from playwright_stealth import stealth_async
|
||||
|
||||
class AsyncCrawlResponse(BaseModel):
|
||||
html: str
|
||||
response_headers: Dict[str, str]
|
||||
status_code: int
|
||||
screenshot: Optional[str] = None
|
||||
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
class AsyncCrawlerStrategy(ABC):
|
||||
@abstractmethod
|
||||
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def crawl_many(self, urls: List[str], **kwargs) -> List[AsyncCrawlResponse]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def take_screenshot(self, url: str) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_user_agent(self, user_agent: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
pass
|
||||
|
||||
class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
|
||||
self.use_cached_html = use_cached_html
|
||||
self.user_agent = kwargs.get(
|
||||
"user_agent",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
||||
)
|
||||
self.proxy = kwargs.get("proxy")
|
||||
self.headless = kwargs.get("headless", True)
|
||||
self.browser_type = kwargs.get("browser_type", "chromium")
|
||||
self.headers = kwargs.get("headers", {})
|
||||
self.sessions = {}
|
||||
self.session_ttl = 1800
|
||||
self.js_code = js_code
|
||||
self.verbose = kwargs.get("verbose", False)
|
||||
self.playwright = None
|
||||
self.browser = None
|
||||
self.hooks = {
|
||||
'on_browser_created': None,
|
||||
'on_user_agent_updated': None,
|
||||
'on_execution_started': None,
|
||||
'before_goto': None,
|
||||
'after_goto': None,
|
||||
'before_return_html': None,
|
||||
'before_retrieve_html': None
|
||||
}
|
||||
|
||||
async def __aenter__(self):
|
||||
await self.start()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
async def start(self):
|
||||
if self.playwright is None:
|
||||
self.playwright = await async_playwright().start()
|
||||
if self.browser is None:
|
||||
browser_args = {
|
||||
"headless": self.headless,
|
||||
"args": [
|
||||
"--disable-gpu",
|
||||
"--no-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--disable-blink-features=AutomationControlled",
|
||||
"--disable-infobars",
|
||||
"--window-position=0,0",
|
||||
"--ignore-certificate-errors",
|
||||
"--ignore-certificate-errors-spki-list",
|
||||
# "--headless=new", # Use the new headless mode
|
||||
]
|
||||
}
|
||||
|
||||
# Add proxy settings if a proxy is specified
|
||||
if self.proxy:
|
||||
proxy_settings = ProxySettings(server=self.proxy)
|
||||
browser_args["proxy"] = proxy_settings
|
||||
|
||||
# Select the appropriate browser based on the browser_type
|
||||
if self.browser_type == "firefox":
|
||||
self.browser = await self.playwright.firefox.launch(**browser_args)
|
||||
elif self.browser_type == "webkit":
|
||||
self.browser = await self.playwright.webkit.launch(**browser_args)
|
||||
else:
|
||||
self.browser = await self.playwright.chromium.launch(**browser_args)
|
||||
|
||||
await self.execute_hook('on_browser_created', self.browser)
|
||||
|
||||
async def close(self):
|
||||
if self.browser:
|
||||
await self.browser.close()
|
||||
self.browser = None
|
||||
if self.playwright:
|
||||
await self.playwright.stop()
|
||||
self.playwright = None
|
||||
|
||||
def __del__(self):
|
||||
if self.browser or self.playwright:
|
||||
asyncio.get_event_loop().run_until_complete(self.close())
|
||||
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
if hook_type in self.hooks:
|
||||
self.hooks[hook_type] = hook
|
||||
else:
|
||||
raise ValueError(f"Invalid hook type: {hook_type}")
|
||||
|
||||
async def execute_hook(self, hook_type: str, *args):
|
||||
hook = self.hooks.get(hook_type)
|
||||
if hook:
|
||||
if asyncio.iscoroutinefunction(hook):
|
||||
return await hook(*args)
|
||||
else:
|
||||
return hook(*args)
|
||||
return args[0] if args else None
|
||||
|
||||
def update_user_agent(self, user_agent: str):
|
||||
self.user_agent = user_agent
|
||||
|
||||
def set_custom_headers(self, headers: Dict[str, str]):
|
||||
self.headers = headers
|
||||
|
||||
async def kill_session(self, session_id: str):
|
||||
if session_id in self.sessions:
|
||||
context, page, _ = self.sessions[session_id]
|
||||
await page.close()
|
||||
await context.close()
|
||||
del self.sessions[session_id]
|
||||
|
||||
def _cleanup_expired_sessions(self):
|
||||
current_time = time.time()
|
||||
expired_sessions = [
|
||||
sid for sid, (_, _, last_used) in self.sessions.items()
|
||||
if current_time - last_used > self.session_ttl
|
||||
]
|
||||
for sid in expired_sessions:
|
||||
asyncio.create_task(self.kill_session(sid))
|
||||
|
||||
async def smart_wait(self, page: Page, wait_for: str, timeout: float = 30000):
|
||||
wait_for = wait_for.strip()
|
||||
|
||||
if wait_for.startswith('js:'):
|
||||
# Explicitly specified JavaScript
|
||||
js_code = wait_for[3:].strip()
|
||||
return await self.csp_compliant_wait(page, js_code, timeout)
|
||||
elif wait_for.startswith('css:'):
|
||||
# Explicitly specified CSS selector
|
||||
css_selector = wait_for[4:].strip()
|
||||
try:
|
||||
await page.wait_for_selector(css_selector, timeout=timeout)
|
||||
except Error as e:
|
||||
if 'Timeout' in str(e):
|
||||
raise TimeoutError(f"Timeout after {timeout}ms waiting for selector '{css_selector}'")
|
||||
else:
|
||||
raise ValueError(f"Invalid CSS selector: '{css_selector}'")
|
||||
else:
|
||||
# Auto-detect based on content
|
||||
if wait_for.startswith('()') or wait_for.startswith('function'):
|
||||
# It's likely a JavaScript function
|
||||
return await self.csp_compliant_wait(page, wait_for, timeout)
|
||||
else:
|
||||
# Assume it's a CSS selector first
|
||||
try:
|
||||
await page.wait_for_selector(wait_for, timeout=timeout)
|
||||
except Error as e:
|
||||
if 'Timeout' in str(e):
|
||||
raise TimeoutError(f"Timeout after {timeout}ms waiting for selector '{wait_for}'")
|
||||
else:
|
||||
# If it's not a timeout error, it might be an invalid selector
|
||||
# Let's try to evaluate it as a JavaScript function as a fallback
|
||||
try:
|
||||
return await self.csp_compliant_wait(page, f"() => {{{wait_for}}}", timeout)
|
||||
except Error:
|
||||
raise ValueError(f"Invalid wait_for parameter: '{wait_for}'. "
|
||||
"It should be either a valid CSS selector, a JavaScript function, "
|
||||
"or explicitly prefixed with 'js:' or 'css:'.")
|
||||
|
||||
async def csp_compliant_wait(self, page: Page, user_wait_function: str, timeout: float = 30000):
|
||||
wrapper_js = f"""
|
||||
async () => {{
|
||||
const userFunction = {user_wait_function};
|
||||
const startTime = Date.now();
|
||||
while (true) {{
|
||||
if (await userFunction()) {{
|
||||
return true;
|
||||
}}
|
||||
if (Date.now() - startTime > {timeout}) {{
|
||||
throw new Error('Timeout waiting for condition');
|
||||
}}
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
|
||||
try:
|
||||
await page.evaluate(wrapper_js)
|
||||
except TimeoutError:
|
||||
raise TimeoutError(f"Timeout after {timeout}ms waiting for condition")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error in wait condition: {str(e)}")
|
||||
|
||||
async def process_iframes(self, page):
|
||||
# Find all iframes
|
||||
iframes = await page.query_selector_all('iframe')
|
||||
|
||||
for i, iframe in enumerate(iframes):
|
||||
try:
|
||||
# Add a unique identifier to the iframe
|
||||
await iframe.evaluate(f'(element) => element.id = "iframe-{i}"')
|
||||
|
||||
# Get the frame associated with this iframe
|
||||
frame = await iframe.content_frame()
|
||||
|
||||
if frame:
|
||||
# Wait for the frame to load
|
||||
await frame.wait_for_load_state('load', timeout=30000) # 30 seconds timeout
|
||||
|
||||
# Extract the content of the iframe's body
|
||||
iframe_content = await frame.evaluate('() => document.body.innerHTML')
|
||||
|
||||
# Generate a unique class name for this iframe
|
||||
class_name = f'extracted-iframe-content-{i}'
|
||||
|
||||
# Replace the iframe with a div containing the extracted content
|
||||
_iframe = iframe_content.replace('`', '\\`')
|
||||
await page.evaluate(f"""
|
||||
() => {{
|
||||
const iframe = document.getElementById('iframe-{i}');
|
||||
const div = document.createElement('div');
|
||||
div.innerHTML = `{_iframe}`;
|
||||
div.className = '{class_name}';
|
||||
iframe.replaceWith(div);
|
||||
}}
|
||||
""")
|
||||
else:
|
||||
print(f"Warning: Could not access content frame for iframe {i}")
|
||||
except Exception as e:
|
||||
print(f"Error processing iframe {i}: {str(e)}")
|
||||
|
||||
# Return the page object
|
||||
return page
|
||||
|
||||
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
|
||||
response_headers = {}
|
||||
status_code = None
|
||||
|
||||
self._cleanup_expired_sessions()
|
||||
session_id = kwargs.get("session_id")
|
||||
if session_id:
|
||||
context, page, _ = self.sessions.get(session_id, (None, None, None))
|
||||
if not context:
|
||||
context = await self.browser.new_context(
|
||||
user_agent=self.user_agent,
|
||||
viewport={"width": 1920, "height": 1080},
|
||||
proxy={"server": self.proxy} if self.proxy else None
|
||||
)
|
||||
await context.set_extra_http_headers(self.headers)
|
||||
page = await context.new_page()
|
||||
self.sessions[session_id] = (context, page, time.time())
|
||||
else:
|
||||
context = await self.browser.new_context(
|
||||
user_agent=self.user_agent,
|
||||
viewport={"width": 1920, "height": 1080},
|
||||
proxy={"server": self.proxy} if self.proxy else None
|
||||
)
|
||||
await context.set_extra_http_headers(self.headers)
|
||||
|
||||
if kwargs.get("override_navigator", False):
|
||||
# Inject scripts to override navigator properties
|
||||
await context.add_init_script("""
|
||||
// Pass the Permissions Test.
|
||||
const originalQuery = window.navigator.permissions.query;
|
||||
window.navigator.permissions.query = (parameters) => (
|
||||
parameters.name === 'notifications' ?
|
||||
Promise.resolve({ state: Notification.permission }) :
|
||||
originalQuery(parameters)
|
||||
);
|
||||
Object.defineProperty(navigator, 'webdriver', {
|
||||
get: () => undefined
|
||||
});
|
||||
window.navigator.chrome = {
|
||||
runtime: {},
|
||||
// Add other properties if necessary
|
||||
};
|
||||
Object.defineProperty(navigator, 'plugins', {
|
||||
get: () => [1, 2, 3, 4, 5],
|
||||
});
|
||||
Object.defineProperty(navigator, 'languages', {
|
||||
get: () => ['en-US', 'en'],
|
||||
});
|
||||
Object.defineProperty(document, 'hidden', {
|
||||
get: () => false
|
||||
});
|
||||
Object.defineProperty(document, 'visibilityState', {
|
||||
get: () => 'visible'
|
||||
});
|
||||
""")
|
||||
|
||||
page = await context.new_page()
|
||||
|
||||
try:
|
||||
if self.verbose:
|
||||
print(f"[LOG] 🕸️ Crawling {url} using AsyncPlaywrightCrawlerStrategy...")
|
||||
|
||||
if self.use_cached_html:
|
||||
cache_file_path = os.path.join(
|
||||
Path.home(), ".crawl4ai", "cache", hashlib.md5(url.encode()).hexdigest()
|
||||
)
|
||||
if os.path.exists(cache_file_path):
|
||||
html = ""
|
||||
with open(cache_file_path, "r") as f:
|
||||
html = f.read()
|
||||
# retrieve response headers and status code from cache
|
||||
with open(cache_file_path + ".meta", "r") as f:
|
||||
meta = json.load(f)
|
||||
response_headers = meta.get("response_headers", {})
|
||||
status_code = meta.get("status_code")
|
||||
response = AsyncCrawlResponse(
|
||||
html=html, response_headers=response_headers, status_code=status_code
|
||||
)
|
||||
return response
|
||||
|
||||
if not kwargs.get("js_only", False):
|
||||
await self.execute_hook('before_goto', page)
|
||||
|
||||
response = await page.goto("about:blank")
|
||||
await stealth_async(page)
|
||||
response = await page.goto(
|
||||
url, wait_until="domcontentloaded", timeout=kwargs.get("page_timeout", 60000)
|
||||
)
|
||||
|
||||
# await stealth_async(page)
|
||||
# response = await page.goto("about:blank")
|
||||
# await stealth_async(page)
|
||||
# await page.evaluate(f"window.location.href = '{url}'")
|
||||
|
||||
await self.execute_hook('after_goto', page)
|
||||
|
||||
# Get status code and headers
|
||||
status_code = response.status
|
||||
response_headers = response.headers
|
||||
else:
|
||||
status_code = 200
|
||||
response_headers = {}
|
||||
|
||||
await page.wait_for_selector('body')
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
||||
|
||||
js_code = kwargs.get("js_code", kwargs.get("js", self.js_code))
|
||||
if js_code:
|
||||
if isinstance(js_code, str):
|
||||
await page.evaluate(js_code)
|
||||
elif isinstance(js_code, list):
|
||||
for js in js_code:
|
||||
await page.evaluate(js)
|
||||
|
||||
await page.wait_for_load_state('networkidle')
|
||||
# Check for on execution event
|
||||
await self.execute_hook('on_execution_started', page)
|
||||
|
||||
if kwargs.get("simulate_user", False):
|
||||
# Simulate user interactions
|
||||
await page.mouse.move(100, 100)
|
||||
await page.mouse.down()
|
||||
await page.mouse.up()
|
||||
await page.keyboard.press('ArrowDown')
|
||||
|
||||
# Handle the wait_for parameter
|
||||
wait_for = kwargs.get("wait_for")
|
||||
if wait_for:
|
||||
try:
|
||||
await self.smart_wait(page, wait_for, timeout=kwargs.get("page_timeout", 60000))
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Wait condition failed: {str(e)}")
|
||||
|
||||
|
||||
|
||||
# Update image dimensions
|
||||
update_image_dimensions_js = """
|
||||
() => {
|
||||
return new Promise((resolve) => {
|
||||
const filterImage = (img) => {
|
||||
// Filter out images that are too small
|
||||
if (img.width < 100 && img.height < 100) return false;
|
||||
|
||||
// Filter out images that are not visible
|
||||
const rect = img.getBoundingClientRect();
|
||||
if (rect.width === 0 || rect.height === 0) return false;
|
||||
|
||||
// Filter out images with certain class names (e.g., icons, thumbnails)
|
||||
if (img.classList.contains('icon') || img.classList.contains('thumbnail')) return false;
|
||||
|
||||
// Filter out images with certain patterns in their src (e.g., placeholder images)
|
||||
if (img.src.includes('placeholder') || img.src.includes('icon')) return false;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
const images = Array.from(document.querySelectorAll('img')).filter(filterImage);
|
||||
let imagesLeft = images.length;
|
||||
|
||||
if (imagesLeft === 0) {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
const checkImage = (img) => {
|
||||
if (img.complete && img.naturalWidth !== 0) {
|
||||
img.setAttribute('width', img.naturalWidth);
|
||||
img.setAttribute('height', img.naturalHeight);
|
||||
imagesLeft--;
|
||||
if (imagesLeft === 0) resolve();
|
||||
}
|
||||
};
|
||||
|
||||
images.forEach(img => {
|
||||
checkImage(img);
|
||||
if (!img.complete) {
|
||||
img.onload = () => {
|
||||
checkImage(img);
|
||||
};
|
||||
img.onerror = () => {
|
||||
imagesLeft--;
|
||||
if (imagesLeft === 0) resolve();
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// Fallback timeout of 5 seconds
|
||||
setTimeout(() => resolve(), 5000);
|
||||
});
|
||||
}
|
||||
"""
|
||||
await page.evaluate(update_image_dimensions_js)
|
||||
|
||||
# Wait a bit for any onload events to complete
|
||||
await page.wait_for_timeout(100)
|
||||
|
||||
# Process iframes
|
||||
if kwargs.get("process_iframes", False):
|
||||
page = await self.process_iframes(page)
|
||||
|
||||
await self.execute_hook('before_retrieve_html', page)
|
||||
# Check if delay_before_return_html is set then wait for that time
|
||||
delay_before_return_html = kwargs.get("delay_before_return_html")
|
||||
if delay_before_return_html:
|
||||
await asyncio.sleep(delay_before_return_html)
|
||||
|
||||
html = await page.content()
|
||||
await self.execute_hook('before_return_html', page, html)
|
||||
|
||||
# Check if kwargs has screenshot=True then take screenshot
|
||||
screenshot_data = None
|
||||
if kwargs.get("screenshot"):
|
||||
screenshot_data = await self.take_screenshot(url)
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] ✅ Crawled {url} successfully!")
|
||||
|
||||
if self.use_cached_html:
|
||||
cache_file_path = os.path.join(
|
||||
Path.home(), ".crawl4ai", "cache", hashlib.md5(url.encode()).hexdigest()
|
||||
)
|
||||
with open(cache_file_path, "w", encoding="utf-8") as f:
|
||||
f.write(html)
|
||||
# store response headers and status code in cache
|
||||
with open(cache_file_path + ".meta", "w", encoding="utf-8") as f:
|
||||
json.dump({
|
||||
"response_headers": response_headers,
|
||||
"status_code": status_code
|
||||
}, f)
|
||||
|
||||
async def get_delayed_content(delay: float = 5.0) -> str:
|
||||
if self.verbose:
|
||||
print(f"[LOG] Waiting for {delay} seconds before retrieving content for {url}")
|
||||
await asyncio.sleep(delay)
|
||||
return await page.content()
|
||||
|
||||
response = AsyncCrawlResponse(
|
||||
html=html,
|
||||
response_headers=response_headers,
|
||||
status_code=status_code,
|
||||
screenshot=screenshot_data,
|
||||
get_delayed_content=get_delayed_content
|
||||
)
|
||||
return response
|
||||
except Error as e:
|
||||
raise Error(f"Failed to crawl {url}: {str(e)}")
|
||||
finally:
|
||||
if not session_id:
|
||||
await page.close()
|
||||
await context.close()
|
||||
|
||||
async def crawl_many(self, urls: List[str], **kwargs) -> List[AsyncCrawlResponse]:
|
||||
semaphore_count = kwargs.get('semaphore_count', 5) # Adjust as needed
|
||||
semaphore = asyncio.Semaphore(semaphore_count)
|
||||
|
||||
async def crawl_with_semaphore(url):
|
||||
async with semaphore:
|
||||
return await self.crawl(url, **kwargs)
|
||||
|
||||
tasks = [crawl_with_semaphore(url) for url in urls]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
return [result if not isinstance(result, Exception) else str(result) for result in results]
|
||||
|
||||
async def take_screenshot(self, url: str, wait_time=1000) -> str:
|
||||
async with await self.browser.new_context(user_agent=self.user_agent) as context:
|
||||
page = await context.new_page()
|
||||
try:
|
||||
await page.goto(url, wait_until="domcontentloaded", timeout=30000)
|
||||
# Wait for a specified time (default is 1 second)
|
||||
await page.wait_for_timeout(wait_time)
|
||||
screenshot = await page.screenshot(full_page=True)
|
||||
return base64.b64encode(screenshot).decode('utf-8')
|
||||
except Exception as e:
|
||||
error_message = f"Failed to take screenshot: {str(e)}"
|
||||
print(error_message)
|
||||
|
||||
# Generate an error image
|
||||
img = Image.new('RGB', (800, 600), color='black')
|
||||
draw = ImageDraw.Draw(img)
|
||||
font = ImageFont.load_default()
|
||||
draw.text((10, 10), error_message, fill=(255, 255, 255), font=font)
|
||||
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
||||
finally:
|
||||
await page.close()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,19 +2,232 @@ import os
|
||||
from pathlib import Path
|
||||
import aiosqlite
|
||||
import asyncio
|
||||
from typing import Optional, Tuple
|
||||
from typing import Optional, Dict
|
||||
from contextlib import asynccontextmanager
|
||||
import logging
|
||||
import json # Added for serialization/deserialization
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
from .models import CrawlResult, MarkdownGenerationResult
|
||||
import aiofiles
|
||||
from .version_manager import VersionManager
|
||||
from .async_logger import AsyncLogger
|
||||
from .utils import get_error_context, create_box_message
|
||||
|
||||
DB_PATH = os.path.join(Path.home(), ".crawl4ai")
|
||||
# Set up logging
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
# logger = logging.getLogger(__name__)
|
||||
# logger.setLevel(logging.INFO)
|
||||
|
||||
base_directory = DB_PATH = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(DB_PATH, exist_ok=True)
|
||||
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
|
||||
DB_PATH = os.path.join(base_directory, "crawl4ai.db")
|
||||
|
||||
|
||||
class AsyncDatabaseManager:
|
||||
def __init__(self):
|
||||
def __init__(self, pool_size: int = 10, max_retries: int = 3):
|
||||
self.db_path = DB_PATH
|
||||
self.content_paths = ensure_content_dirs(os.path.dirname(DB_PATH))
|
||||
self.pool_size = pool_size
|
||||
self.max_retries = max_retries
|
||||
self.connection_pool: Dict[int, aiosqlite.Connection] = {}
|
||||
self.pool_lock = asyncio.Lock()
|
||||
self.init_lock = asyncio.Lock()
|
||||
self.connection_semaphore = asyncio.Semaphore(pool_size)
|
||||
self._initialized = False
|
||||
self.version_manager = VersionManager()
|
||||
self.logger = AsyncLogger(
|
||||
log_file=os.path.join(base_directory, ".crawl4ai", "crawler_db.log"),
|
||||
verbose=False,
|
||||
tag_width=10,
|
||||
)
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize the database and connection pool"""
|
||||
try:
|
||||
self.logger.info("Initializing database", tag="INIT")
|
||||
# Ensure the database file exists
|
||||
os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
|
||||
|
||||
# Check if version update is needed
|
||||
needs_update = self.version_manager.needs_update()
|
||||
|
||||
# Always ensure base table exists
|
||||
await self.ainit_db()
|
||||
|
||||
# Verify the table exists
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
async with db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='crawled_data'"
|
||||
) as cursor:
|
||||
result = await cursor.fetchone()
|
||||
if not result:
|
||||
raise Exception("crawled_data table was not created")
|
||||
|
||||
# If version changed or fresh install, run updates
|
||||
if needs_update:
|
||||
self.logger.info("New version detected, running updates", tag="INIT")
|
||||
await self.update_db_schema()
|
||||
from .migrations import (
|
||||
run_migration,
|
||||
) # Import here to avoid circular imports
|
||||
|
||||
await run_migration()
|
||||
self.version_manager.update_version() # Update stored version after successful migration
|
||||
self.logger.success(
|
||||
"Version update completed successfully", tag="COMPLETE"
|
||||
)
|
||||
else:
|
||||
self.logger.success(
|
||||
"Database initialization completed successfully", tag="COMPLETE"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Database initialization error: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
self.logger.info(
|
||||
message="Database will be initialized on first use", tag="INIT"
|
||||
)
|
||||
|
||||
raise
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup connections when shutting down"""
|
||||
async with self.pool_lock:
|
||||
for conn in self.connection_pool.values():
|
||||
await conn.close()
|
||||
self.connection_pool.clear()
|
||||
|
||||
@asynccontextmanager
|
||||
async def get_connection(self):
|
||||
"""Connection pool manager with enhanced error handling"""
|
||||
if not self._initialized:
|
||||
async with self.init_lock:
|
||||
if not self._initialized:
|
||||
try:
|
||||
await self.initialize()
|
||||
self._initialized = True
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
self.logger.error(
|
||||
message="Database initialization failed:\n{error}\n\nContext:\n{context}\n\nTraceback:\n{traceback}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={
|
||||
"error": str(e),
|
||||
"context": error_context["code_context"],
|
||||
"traceback": error_context["full_traceback"],
|
||||
},
|
||||
)
|
||||
raise
|
||||
|
||||
await self.connection_semaphore.acquire()
|
||||
task_id = id(asyncio.current_task())
|
||||
|
||||
try:
|
||||
async with self.pool_lock:
|
||||
if task_id not in self.connection_pool:
|
||||
try:
|
||||
conn = await aiosqlite.connect(self.db_path, timeout=30.0)
|
||||
await conn.execute("PRAGMA journal_mode = WAL")
|
||||
await conn.execute("PRAGMA busy_timeout = 5000")
|
||||
|
||||
# Verify database structure
|
||||
async with conn.execute(
|
||||
"PRAGMA table_info(crawled_data)"
|
||||
) as cursor:
|
||||
columns = await cursor.fetchall()
|
||||
column_names = [col[1] for col in columns]
|
||||
expected_columns = {
|
||||
"url",
|
||||
"html",
|
||||
"cleaned_html",
|
||||
"markdown",
|
||||
"extracted_content",
|
||||
"success",
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"screenshot",
|
||||
"response_headers",
|
||||
"downloaded_files",
|
||||
}
|
||||
missing_columns = expected_columns - set(column_names)
|
||||
if missing_columns:
|
||||
raise ValueError(
|
||||
f"Database missing columns: {missing_columns}"
|
||||
)
|
||||
|
||||
self.connection_pool[task_id] = conn
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
error_message = (
|
||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||
f"in {error_context['function']} ({error_context['filename']}):\n"
|
||||
f"Error: {str(e)}\n\n"
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type="error"),
|
||||
)
|
||||
|
||||
raise
|
||||
|
||||
yield self.connection_pool[task_id]
|
||||
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
error_message = (
|
||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||
f"in {error_context['function']} ({error_context['filename']}):\n"
|
||||
f"Error: {str(e)}\n\n"
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type="error"),
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
async with self.pool_lock:
|
||||
if task_id in self.connection_pool:
|
||||
await self.connection_pool[task_id].close()
|
||||
del self.connection_pool[task_id]
|
||||
self.connection_semaphore.release()
|
||||
|
||||
async def execute_with_retry(self, operation, *args):
|
||||
"""Execute database operations with retry logic"""
|
||||
for attempt in range(self.max_retries):
|
||||
try:
|
||||
async with self.get_connection() as db:
|
||||
result = await operation(db, *args)
|
||||
await db.commit()
|
||||
return result
|
||||
except Exception as e:
|
||||
if attempt == self.max_retries - 1:
|
||||
self.logger.error(
|
||||
message="Operation failed after {retries} attempts: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"retries": self.max_retries, "error": str(e)},
|
||||
)
|
||||
raise
|
||||
await asyncio.sleep(1 * (attempt + 1)) # Exponential backoff
|
||||
|
||||
async def ainit_db(self):
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute('''
|
||||
"""Initialize database schema"""
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
await db.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT,
|
||||
@@ -25,90 +238,321 @@ class AsyncDatabaseManager:
|
||||
media TEXT DEFAULT "{}",
|
||||
links TEXT DEFAULT "{}",
|
||||
metadata TEXT DEFAULT "{}",
|
||||
screenshot TEXT DEFAULT ""
|
||||
screenshot TEXT DEFAULT "",
|
||||
response_headers TEXT DEFAULT "{}",
|
||||
downloaded_files TEXT DEFAULT "{}" -- New column added
|
||||
)
|
||||
''')
|
||||
"""
|
||||
)
|
||||
await db.commit()
|
||||
await self.update_db_schema()
|
||||
|
||||
async def update_db_schema(self):
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Check if the 'media' column exists
|
||||
"""Update database schema if needed"""
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
cursor = await db.execute("PRAGMA table_info(crawled_data)")
|
||||
columns = await cursor.fetchall()
|
||||
column_names = [column[1] for column in columns]
|
||||
|
||||
if 'media' not in column_names:
|
||||
await self.aalter_db_add_column('media')
|
||||
|
||||
# Check for other missing columns and add them if necessary
|
||||
for column in ['links', 'metadata', 'screenshot']:
|
||||
|
||||
# List of new columns to add
|
||||
new_columns = [
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"screenshot",
|
||||
"response_headers",
|
||||
"downloaded_files",
|
||||
]
|
||||
|
||||
for column in new_columns:
|
||||
if column not in column_names:
|
||||
await self.aalter_db_add_column(column)
|
||||
await self.aalter_db_add_column(column, db)
|
||||
await db.commit()
|
||||
|
||||
async def aalter_db_add_column(self, new_column: str):
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
|
||||
await db.commit()
|
||||
print(f"Added column '{new_column}' to the database.")
|
||||
except Exception as e:
|
||||
print(f"Error altering database to add {new_column} column: {e}")
|
||||
async def aalter_db_add_column(self, new_column: str, db):
|
||||
"""Add new column to the database"""
|
||||
if new_column == "response_headers":
|
||||
await db.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"'
|
||||
)
|
||||
else:
|
||||
await db.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||
)
|
||||
self.logger.info(
|
||||
message="Added column '{column}' to the database",
|
||||
tag="INIT",
|
||||
params={"column": new_column},
|
||||
)
|
||||
|
||||
async def aget_cached_url(self, url: str) -> Optional[CrawlResult]:
|
||||
"""Retrieve cached URL data as CrawlResult"""
|
||||
|
||||
async def _get(db):
|
||||
async with db.execute(
|
||||
"SELECT * FROM crawled_data WHERE url = ?", (url,)
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
if not row:
|
||||
return None
|
||||
|
||||
# Get column names
|
||||
columns = [description[0] for description in cursor.description]
|
||||
# Create dict from row data
|
||||
row_dict = dict(zip(columns, row))
|
||||
|
||||
# Load content from files using stored hashes
|
||||
content_fields = {
|
||||
"html": row_dict["html"],
|
||||
"cleaned_html": row_dict["cleaned_html"],
|
||||
"markdown": row_dict["markdown"],
|
||||
"extracted_content": row_dict["extracted_content"],
|
||||
"screenshot": row_dict["screenshot"],
|
||||
"screenshots": row_dict["screenshot"],
|
||||
}
|
||||
|
||||
for field, hash_value in content_fields.items():
|
||||
if hash_value:
|
||||
content = await self._load_content(
|
||||
hash_value,
|
||||
field.split("_")[0], # Get content type from field name
|
||||
)
|
||||
row_dict[field] = content or ""
|
||||
else:
|
||||
row_dict[field] = ""
|
||||
|
||||
# Parse JSON fields
|
||||
json_fields = [
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"response_headers",
|
||||
"markdown",
|
||||
]
|
||||
for field in json_fields:
|
||||
try:
|
||||
row_dict[field] = (
|
||||
json.loads(row_dict[field]) if row_dict[field] else {}
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
# Very UGLY, never mention it to me please
|
||||
if field == "markdown" and isinstance(row_dict[field], str):
|
||||
row_dict[field] = row_dict[field]
|
||||
else:
|
||||
row_dict[field] = {}
|
||||
|
||||
if isinstance(row_dict["markdown"], Dict):
|
||||
row_dict["markdown_v2"] = row_dict["markdown"]
|
||||
if row_dict["markdown"].get("raw_markdown"):
|
||||
row_dict["markdown"] = row_dict["markdown"]["raw_markdown"]
|
||||
|
||||
# Parse downloaded_files
|
||||
try:
|
||||
row_dict["downloaded_files"] = (
|
||||
json.loads(row_dict["downloaded_files"])
|
||||
if row_dict["downloaded_files"]
|
||||
else []
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
row_dict["downloaded_files"] = []
|
||||
|
||||
# Remove any fields not in CrawlResult model
|
||||
valid_fields = CrawlResult.__annotations__.keys()
|
||||
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
|
||||
|
||||
return CrawlResult(**filtered_dict)
|
||||
|
||||
async def aget_cached_url(self, url: str) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
async with db.execute('SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?', (url,)) as cursor:
|
||||
return await cursor.fetchone()
|
||||
return await self.execute_with_retry(_get)
|
||||
except Exception as e:
|
||||
print(f"Error retrieving cached URL: {e}")
|
||||
self.logger.error(
|
||||
message="Error retrieving cached URL: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
return None
|
||||
|
||||
async def acache_url(self, url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool, media: str = "{}", links: str = "{}", metadata: str = "{}", screenshot: str = ""):
|
||||
async def acache_url(self, result: CrawlResult):
|
||||
"""Cache CrawlResult data"""
|
||||
# Store content files and get hashes
|
||||
content_map = {
|
||||
"html": (result.html, "html"),
|
||||
"cleaned_html": (result.cleaned_html or "", "cleaned"),
|
||||
"markdown": None,
|
||||
"extracted_content": (result.extracted_content or "", "extracted"),
|
||||
"screenshot": (result.screenshot or "", "screenshots"),
|
||||
}
|
||||
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute('''
|
||||
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
html = excluded.html,
|
||||
cleaned_html = excluded.cleaned_html,
|
||||
markdown = excluded.markdown,
|
||||
extracted_content = excluded.extracted_content,
|
||||
success = excluded.success,
|
||||
media = excluded.media,
|
||||
links = excluded.links,
|
||||
metadata = excluded.metadata,
|
||||
screenshot = excluded.screenshot
|
||||
''', (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot))
|
||||
await db.commit()
|
||||
if isinstance(result.markdown, MarkdownGenerationResult):
|
||||
content_map["markdown"] = (
|
||||
result.markdown.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
elif hasattr(result, "markdown_v2"):
|
||||
content_map["markdown"] = (
|
||||
result.markdown_v2.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
elif isinstance(result.markdown, str):
|
||||
markdown_result = MarkdownGenerationResult(raw_markdown=result.markdown)
|
||||
content_map["markdown"] = (
|
||||
markdown_result.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
else:
|
||||
content_map["markdown"] = (
|
||||
MarkdownGenerationResult().model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error caching URL: {e}")
|
||||
self.logger.warning(
|
||||
message=f"Error processing markdown content: {str(e)}", tag="WARNING"
|
||||
)
|
||||
# Fallback to empty markdown result
|
||||
content_map["markdown"] = (
|
||||
MarkdownGenerationResult().model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
|
||||
content_hashes = {}
|
||||
for field, (content, content_type) in content_map.items():
|
||||
content_hashes[field] = await self._store_content(content, content_type)
|
||||
|
||||
async def _cache(db):
|
||||
await db.execute(
|
||||
"""
|
||||
INSERT INTO crawled_data (
|
||||
url, html, cleaned_html, markdown,
|
||||
extracted_content, success, media, links, metadata,
|
||||
screenshot, response_headers, downloaded_files
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
html = excluded.html,
|
||||
cleaned_html = excluded.cleaned_html,
|
||||
markdown = excluded.markdown,
|
||||
extracted_content = excluded.extracted_content,
|
||||
success = excluded.success,
|
||||
media = excluded.media,
|
||||
links = excluded.links,
|
||||
metadata = excluded.metadata,
|
||||
screenshot = excluded.screenshot,
|
||||
response_headers = excluded.response_headers,
|
||||
downloaded_files = excluded.downloaded_files
|
||||
""",
|
||||
(
|
||||
result.url,
|
||||
content_hashes["html"],
|
||||
content_hashes["cleaned_html"],
|
||||
content_hashes["markdown"],
|
||||
content_hashes["extracted_content"],
|
||||
result.success,
|
||||
json.dumps(result.media),
|
||||
json.dumps(result.links),
|
||||
json.dumps(result.metadata or {}),
|
||||
content_hashes["screenshot"],
|
||||
json.dumps(result.response_headers or {}),
|
||||
json.dumps(result.downloaded_files or []),
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_cache)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error caching URL: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
async def aget_total_count(self) -> int:
|
||||
"""Get total number of cached URLs"""
|
||||
|
||||
async def _count(db):
|
||||
async with db.execute("SELECT COUNT(*) FROM crawled_data") as cursor:
|
||||
result = await cursor.fetchone()
|
||||
return result[0] if result else 0
|
||||
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
async with db.execute('SELECT COUNT(*) FROM crawled_data') as cursor:
|
||||
result = await cursor.fetchone()
|
||||
return result[0] if result else 0
|
||||
return await self.execute_with_retry(_count)
|
||||
except Exception as e:
|
||||
print(f"Error getting total count: {e}")
|
||||
self.logger.error(
|
||||
message="Error getting total count: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
return 0
|
||||
|
||||
async def aclear_db(self):
|
||||
"""Clear all data from the database"""
|
||||
|
||||
async def _clear(db):
|
||||
await db.execute("DELETE FROM crawled_data")
|
||||
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute('DELETE FROM crawled_data')
|
||||
await db.commit()
|
||||
await self.execute_with_retry(_clear)
|
||||
except Exception as e:
|
||||
print(f"Error clearing database: {e}")
|
||||
self.logger.error(
|
||||
message="Error clearing database: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
async def aflush_db(self):
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
await db.execute('DROP TABLE IF EXISTS crawled_data')
|
||||
await db.commit()
|
||||
except Exception as e:
|
||||
print(f"Error flushing database: {e}")
|
||||
"""Drop the entire table"""
|
||||
|
||||
async_db_manager = AsyncDatabaseManager()
|
||||
async def _flush(db):
|
||||
await db.execute("DROP TABLE IF EXISTS crawled_data")
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_flush)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error flushing database: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
async def _store_content(self, content: str, content_type: str) -> str:
|
||||
"""Store content in filesystem and return hash"""
|
||||
if not content:
|
||||
return ""
|
||||
|
||||
content_hash = generate_content_hash(content)
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
|
||||
# Only write if file doesn't exist
|
||||
if not os.path.exists(file_path):
|
||||
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||
await f.write(content)
|
||||
|
||||
return content_hash
|
||||
|
||||
async def _load_content(
|
||||
self, content_hash: str, content_type: str
|
||||
) -> Optional[str]:
|
||||
"""Load content from filesystem by hash"""
|
||||
if not content_hash:
|
||||
return None
|
||||
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
try:
|
||||
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
||||
return await f.read()
|
||||
except:
|
||||
self.logger.error(
|
||||
message="Failed to load content: {file_path}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"file_path": file_path},
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# Create a singleton instance
|
||||
async_db_manager = AsyncDatabaseManager()
|
||||
|
||||
647
crawl4ai/async_dispatcher.py
Normal file
647
crawl4ai/async_dispatcher.py
Normal file
@@ -0,0 +1,647 @@
|
||||
from typing import Dict, Optional, List, Tuple
|
||||
from .async_configs import CrawlerRunConfig
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
CrawlerTaskResult,
|
||||
CrawlStatus,
|
||||
DisplayMode,
|
||||
CrawlStats,
|
||||
DomainState,
|
||||
)
|
||||
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
from rich.console import Console
|
||||
from rich import box
|
||||
from datetime import datetime, timedelta
|
||||
from collections.abc import AsyncGenerator
|
||||
import time
|
||||
import psutil
|
||||
import asyncio
|
||||
import uuid
|
||||
|
||||
from urllib.parse import urlparse
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
base_delay: Tuple[float, float] = (1.0, 3.0),
|
||||
max_delay: float = 60.0,
|
||||
max_retries: int = 3,
|
||||
rate_limit_codes: List[int] = None,
|
||||
):
|
||||
self.base_delay = base_delay
|
||||
self.max_delay = max_delay
|
||||
self.max_retries = max_retries
|
||||
self.rate_limit_codes = rate_limit_codes or [429, 503]
|
||||
self.domains: Dict[str, DomainState] = {}
|
||||
|
||||
def get_domain(self, url: str) -> str:
|
||||
return urlparse(url).netloc
|
||||
|
||||
async def wait_if_needed(self, url: str) -> None:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains.get(domain)
|
||||
|
||||
if not state:
|
||||
self.domains[domain] = DomainState()
|
||||
state = self.domains[domain]
|
||||
|
||||
now = time.time()
|
||||
if state.last_request_time:
|
||||
wait_time = max(0, state.current_delay - (now - state.last_request_time))
|
||||
if wait_time > 0:
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Random delay within base range if no current delay
|
||||
if state.current_delay == 0:
|
||||
state.current_delay = random.uniform(*self.base_delay)
|
||||
|
||||
state.last_request_time = time.time()
|
||||
|
||||
def update_delay(self, url: str, status_code: int) -> bool:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains[domain]
|
||||
|
||||
if status_code in self.rate_limit_codes:
|
||||
state.fail_count += 1
|
||||
if state.fail_count > self.max_retries:
|
||||
return False
|
||||
|
||||
# Exponential backoff with random jitter
|
||||
state.current_delay = min(
|
||||
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
|
||||
)
|
||||
else:
|
||||
# Gradually reduce delay on success
|
||||
state.current_delay = max(
|
||||
random.uniform(*self.base_delay), state.current_delay * 0.75
|
||||
)
|
||||
state.fail_count = 0
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
def __init__(
|
||||
self,
|
||||
max_visible_rows: int = 15,
|
||||
display_mode: DisplayMode = DisplayMode.DETAILED,
|
||||
):
|
||||
self.console = Console()
|
||||
self.max_visible_rows = max_visible_rows
|
||||
self.display_mode = display_mode
|
||||
self.stats: Dict[str, CrawlStats] = {}
|
||||
self.process = psutil.Process()
|
||||
self.start_time = datetime.now()
|
||||
self.live = Live(self._create_table(), refresh_per_second=2)
|
||||
|
||||
def start(self):
|
||||
self.live.start()
|
||||
|
||||
def stop(self):
|
||||
self.live.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
self.stats[task_id] = CrawlStats(
|
||||
task_id=task_id, url=url, status=CrawlStatus.QUEUED
|
||||
)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def update_task(self, task_id: str, **kwargs):
|
||||
if task_id in self.stats:
|
||||
for key, value in kwargs.items():
|
||||
setattr(self.stats[task_id], key, value)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def _create_aggregated_table(self) -> Table:
|
||||
"""Creates a compact table showing only aggregated statistics"""
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Status Overview",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
show_lines=True,
|
||||
)
|
||||
|
||||
# Calculate statistics
|
||||
total_tasks = len(self.stats)
|
||||
queued = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
|
||||
)
|
||||
in_progress = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
# Memory statistics
|
||||
current_memory = self.process.memory_info().rss / (1024 * 1024)
|
||||
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
peak_memory = max(
|
||||
(stat.peak_memory for stat in self.stats.values()), default=0.0
|
||||
)
|
||||
|
||||
# Duration
|
||||
duration = datetime.now() - self.start_time
|
||||
|
||||
# Create status row
|
||||
table.add_column("Status", style="bold cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
|
||||
table.add_row("Total Tasks", str(total_tasks), "100%")
|
||||
table.add_row(
|
||||
"[yellow]In Queue[/yellow]",
|
||||
str(queued),
|
||||
f"{(queued/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[blue]In Progress[/blue]",
|
||||
str(in_progress),
|
||||
f"{(in_progress/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[green]Completed[/green]",
|
||||
str(completed),
|
||||
f"{(completed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[red]Failed[/red]",
|
||||
str(failed),
|
||||
f"{(failed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
|
||||
# Add memory information
|
||||
table.add_section()
|
||||
table.add_row(
|
||||
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[yellow]Runtime[/yellow]",
|
||||
str(timedelta(seconds=int(duration.total_seconds()))),
|
||||
"",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_detailed_table(self) -> Table:
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Performance Monitor",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
)
|
||||
|
||||
# Add columns
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True)
|
||||
table.add_column("URL", style="cyan", no_wrap=True)
|
||||
table.add_column("Status", style="bold")
|
||||
table.add_column("Memory (MB)", justify="right")
|
||||
table.add_column("Peak (MB)", justify="right")
|
||||
table.add_column("Duration", justify="right")
|
||||
table.add_column("Info", style="italic")
|
||||
|
||||
# Add summary row
|
||||
total_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
active_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"[bold yellow]SUMMARY",
|
||||
f"Total: {len(self.stats)}",
|
||||
f"Active: {active_count}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
|
||||
str(
|
||||
timedelta(
|
||||
seconds=int((datetime.now() - self.start_time).total_seconds())
|
||||
)
|
||||
),
|
||||
f"✓{completed_count} ✗{failed_count}",
|
||||
style="bold",
|
||||
)
|
||||
|
||||
table.add_section()
|
||||
|
||||
# Add rows for each task
|
||||
visible_stats = sorted(
|
||||
self.stats.values(),
|
||||
key=lambda x: (
|
||||
x.status != CrawlStatus.IN_PROGRESS,
|
||||
x.status != CrawlStatus.QUEUED,
|
||||
x.end_time or datetime.max,
|
||||
),
|
||||
)[: self.max_visible_rows]
|
||||
|
||||
for stat in visible_stats:
|
||||
status_style = {
|
||||
CrawlStatus.QUEUED: "white",
|
||||
CrawlStatus.IN_PROGRESS: "yellow",
|
||||
CrawlStatus.COMPLETED: "green",
|
||||
CrawlStatus.FAILED: "red",
|
||||
}[stat.status]
|
||||
|
||||
table.add_row(
|
||||
stat.task_id[:8], # Show first 8 chars of task ID
|
||||
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
|
||||
f"[{status_style}]{stat.status.value}[/{status_style}]",
|
||||
f"{stat.memory_usage:.1f}",
|
||||
f"{stat.peak_memory:.1f}",
|
||||
stat.duration,
|
||||
stat.error_message[:40] if stat.error_message else "",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_table(self) -> Table:
|
||||
"""Creates the appropriate table based on display mode"""
|
||||
if self.display_mode == DisplayMode.AGGREGATED:
|
||||
return self._create_aggregated_table()
|
||||
return self._create_detailed_table()
|
||||
|
||||
|
||||
class BaseDispatcher(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
self.crawler = None
|
||||
self._domain_last_hit: Dict[str, float] = {}
|
||||
self.concurrent_sessions = 0
|
||||
self.rate_limiter = rate_limiter
|
||||
self.monitor = monitor
|
||||
|
||||
@abstractmethod
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> CrawlerTaskResult:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
pass
|
||||
|
||||
|
||||
class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
memory_threshold_percent: float = 90.0,
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.memory_threshold_percent = memory_threshold_percent
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.memory_wait_timeout = memory_wait_timeout
|
||||
self.result_queue = asyncio.Queue() # Queue for storing results
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
self.concurrent_sessions += 1
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
error_message=error_message,
|
||||
)
|
||||
await self.result_queue.put(result)
|
||||
return result
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
self.concurrent_sessions -= 1
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
pending_tasks = []
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while task_queue or active_tasks:
|
||||
wait_start_time = time.time()
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
# Check if we've exceeded the timeout
|
||||
if time.time() - wait_start_time > self.memory_wait_timeout:
|
||||
raise MemoryError(
|
||||
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
|
||||
)
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
pending_tasks.extend(done)
|
||||
active_tasks = list(pending)
|
||||
|
||||
return await asyncio.gather(*pending_tasks)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
async def run_urls_stream(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler",
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||
self.crawler = crawler
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while completed_count < total_urls:
|
||||
# Start new tasks if memory permits
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks and not task_queue:
|
||||
break
|
||||
|
||||
# Wait for any task to complete and yield results
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks,
|
||||
timeout=0.1,
|
||||
return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
completed_count += 1
|
||||
yield result
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
class SemaphoreDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
semaphore_count: int = 5,
|
||||
max_session_permit: int = 20,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.semaphore_count = semaphore_count
|
||||
self.max_session_permit = max_session_permit
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
semaphore: asyncio.Semaphore = None,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
async with semaphore:
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
urls: List[str],
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
semaphore = asyncio.Semaphore(self.semaphore_count)
|
||||
tasks = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, semaphore)
|
||||
)
|
||||
tasks.append(task)
|
||||
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
588
crawl4ai/async_dispatcher_.py
Normal file
588
crawl4ai/async_dispatcher_.py
Normal file
@@ -0,0 +1,588 @@
|
||||
from typing import Dict, Optional, List, Tuple
|
||||
from .async_configs import CrawlerRunConfig
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
CrawlerTaskResult,
|
||||
CrawlStatus,
|
||||
DisplayMode,
|
||||
CrawlStats,
|
||||
DomainState,
|
||||
)
|
||||
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
from rich.console import Console
|
||||
from rich import box
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import time
|
||||
import psutil
|
||||
import asyncio
|
||||
import uuid
|
||||
|
||||
from urllib.parse import urlparse
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
base_delay: Tuple[float, float] = (1.0, 3.0),
|
||||
max_delay: float = 60.0,
|
||||
max_retries: int = 3,
|
||||
rate_limit_codes: List[int] = None,
|
||||
):
|
||||
self.base_delay = base_delay
|
||||
self.max_delay = max_delay
|
||||
self.max_retries = max_retries
|
||||
self.rate_limit_codes = rate_limit_codes or [429, 503]
|
||||
self.domains: Dict[str, DomainState] = {}
|
||||
|
||||
def get_domain(self, url: str) -> str:
|
||||
return urlparse(url).netloc
|
||||
|
||||
async def wait_if_needed(self, url: str) -> None:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains.get(domain)
|
||||
|
||||
if not state:
|
||||
self.domains[domain] = DomainState()
|
||||
state = self.domains[domain]
|
||||
|
||||
now = time.time()
|
||||
if state.last_request_time:
|
||||
wait_time = max(0, state.current_delay - (now - state.last_request_time))
|
||||
if wait_time > 0:
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Random delay within base range if no current delay
|
||||
if state.current_delay == 0:
|
||||
state.current_delay = random.uniform(*self.base_delay)
|
||||
|
||||
state.last_request_time = time.time()
|
||||
|
||||
def update_delay(self, url: str, status_code: int) -> bool:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains[domain]
|
||||
|
||||
if status_code in self.rate_limit_codes:
|
||||
state.fail_count += 1
|
||||
if state.fail_count > self.max_retries:
|
||||
return False
|
||||
|
||||
# Exponential backoff with random jitter
|
||||
state.current_delay = min(
|
||||
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
|
||||
)
|
||||
else:
|
||||
# Gradually reduce delay on success
|
||||
state.current_delay = max(
|
||||
random.uniform(*self.base_delay), state.current_delay * 0.75
|
||||
)
|
||||
state.fail_count = 0
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
def __init__(
|
||||
self,
|
||||
max_visible_rows: int = 15,
|
||||
display_mode: DisplayMode = DisplayMode.DETAILED,
|
||||
):
|
||||
self.console = Console()
|
||||
self.max_visible_rows = max_visible_rows
|
||||
self.display_mode = display_mode
|
||||
self.stats: Dict[str, CrawlStats] = {}
|
||||
self.process = psutil.Process()
|
||||
self.start_time = datetime.now()
|
||||
self.live = Live(self._create_table(), refresh_per_second=2)
|
||||
|
||||
def start(self):
|
||||
self.live.start()
|
||||
|
||||
def stop(self):
|
||||
self.live.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
self.stats[task_id] = CrawlStats(
|
||||
task_id=task_id, url=url, status=CrawlStatus.QUEUED
|
||||
)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def update_task(self, task_id: str, **kwargs):
|
||||
if task_id in self.stats:
|
||||
for key, value in kwargs.items():
|
||||
setattr(self.stats[task_id], key, value)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def _create_aggregated_table(self) -> Table:
|
||||
"""Creates a compact table showing only aggregated statistics"""
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Status Overview",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
show_lines=True,
|
||||
)
|
||||
|
||||
# Calculate statistics
|
||||
total_tasks = len(self.stats)
|
||||
queued = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
|
||||
)
|
||||
in_progress = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
# Memory statistics
|
||||
current_memory = self.process.memory_info().rss / (1024 * 1024)
|
||||
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
peak_memory = max(
|
||||
(stat.peak_memory for stat in self.stats.values()), default=0.0
|
||||
)
|
||||
|
||||
# Duration
|
||||
duration = datetime.now() - self.start_time
|
||||
|
||||
# Create status row
|
||||
table.add_column("Status", style="bold cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
|
||||
table.add_row("Total Tasks", str(total_tasks), "100%")
|
||||
table.add_row(
|
||||
"[yellow]In Queue[/yellow]",
|
||||
str(queued),
|
||||
f"{(queued/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[blue]In Progress[/blue]",
|
||||
str(in_progress),
|
||||
f"{(in_progress/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[green]Completed[/green]",
|
||||
str(completed),
|
||||
f"{(completed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[red]Failed[/red]",
|
||||
str(failed),
|
||||
f"{(failed/total_tasks*100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
|
||||
# Add memory information
|
||||
table.add_section()
|
||||
table.add_row(
|
||||
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[yellow]Runtime[/yellow]",
|
||||
str(timedelta(seconds=int(duration.total_seconds()))),
|
||||
"",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_detailed_table(self) -> Table:
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Performance Monitor",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
)
|
||||
|
||||
# Add columns
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True)
|
||||
table.add_column("URL", style="cyan", no_wrap=True)
|
||||
table.add_column("Status", style="bold")
|
||||
table.add_column("Memory (MB)", justify="right")
|
||||
table.add_column("Peak (MB)", justify="right")
|
||||
table.add_column("Duration", justify="right")
|
||||
table.add_column("Info", style="italic")
|
||||
|
||||
# Add summary row
|
||||
total_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
active_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"[bold yellow]SUMMARY",
|
||||
f"Total: {len(self.stats)}",
|
||||
f"Active: {active_count}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
|
||||
str(
|
||||
timedelta(
|
||||
seconds=int((datetime.now() - self.start_time).total_seconds())
|
||||
)
|
||||
),
|
||||
f"✓{completed_count} ✗{failed_count}",
|
||||
style="bold",
|
||||
)
|
||||
|
||||
table.add_section()
|
||||
|
||||
# Add rows for each task
|
||||
visible_stats = sorted(
|
||||
self.stats.values(),
|
||||
key=lambda x: (
|
||||
x.status != CrawlStatus.IN_PROGRESS,
|
||||
x.status != CrawlStatus.QUEUED,
|
||||
x.end_time or datetime.max,
|
||||
),
|
||||
)[: self.max_visible_rows]
|
||||
|
||||
for stat in visible_stats:
|
||||
status_style = {
|
||||
CrawlStatus.QUEUED: "white",
|
||||
CrawlStatus.IN_PROGRESS: "yellow",
|
||||
CrawlStatus.COMPLETED: "green",
|
||||
CrawlStatus.FAILED: "red",
|
||||
}[stat.status]
|
||||
|
||||
table.add_row(
|
||||
stat.task_id[:8], # Show first 8 chars of task ID
|
||||
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
|
||||
f"[{status_style}]{stat.status.value}[/{status_style}]",
|
||||
f"{stat.memory_usage:.1f}",
|
||||
f"{stat.peak_memory:.1f}",
|
||||
stat.duration,
|
||||
stat.error_message[:40] if stat.error_message else "",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_table(self) -> Table:
|
||||
"""Creates the appropriate table based on display mode"""
|
||||
if self.display_mode == DisplayMode.AGGREGATED:
|
||||
return self._create_aggregated_table()
|
||||
return self._create_detailed_table()
|
||||
|
||||
|
||||
class BaseDispatcher(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
self.crawler = None
|
||||
self._domain_last_hit: Dict[str, float] = {}
|
||||
self.concurrent_sessions = 0
|
||||
self.rate_limiter = rate_limiter
|
||||
self.monitor = monitor
|
||||
|
||||
@abstractmethod
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> CrawlerTaskResult:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
pass
|
||||
|
||||
|
||||
class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
memory_threshold_percent: float = 90.0,
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.memory_threshold_percent = memory_threshold_percent
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.memory_wait_timeout = memory_wait_timeout
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
self.concurrent_sessions += 1
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
self.concurrent_sessions -= 1
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
pending_tasks = []
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while task_queue or active_tasks:
|
||||
wait_start_time = time.time()
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
# Check if we've exceeded the timeout
|
||||
if time.time() - wait_start_time > self.memory_wait_timeout:
|
||||
raise MemoryError(
|
||||
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
|
||||
)
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
pending_tasks.extend(done)
|
||||
active_tasks = list(pending)
|
||||
|
||||
return await asyncio.gather(*pending_tasks)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
|
||||
class SemaphoreDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
semaphore_count: int = 5,
|
||||
max_session_permit: int = 20,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.semaphore_count = semaphore_count
|
||||
self.max_session_permit = max_session_permit
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
semaphore: asyncio.Semaphore = None,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = datetime.now()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
async with semaphore:
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=datetime.now(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = datetime.now()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
urls: List[str],
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
semaphore = asyncio.Semaphore(self.semaphore_count)
|
||||
tasks = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, semaphore)
|
||||
)
|
||||
tasks.append(task)
|
||||
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
227
crawl4ai/async_logger.py
Normal file
227
crawl4ai/async_logger.py
Normal file
@@ -0,0 +1,227 @@
|
||||
from enum import Enum
|
||||
from typing import Optional, Dict, Any
|
||||
from colorama import Fore, Style, init
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
DEBUG = 1
|
||||
INFO = 2
|
||||
SUCCESS = 3
|
||||
WARNING = 4
|
||||
ERROR = 5
|
||||
|
||||
|
||||
class AsyncLogger:
|
||||
"""
|
||||
Asynchronous logger with support for colored console output and file logging.
|
||||
Supports templated messages with colored components.
|
||||
"""
|
||||
|
||||
DEFAULT_ICONS = {
|
||||
"INIT": "→",
|
||||
"READY": "✓",
|
||||
"FETCH": "↓",
|
||||
"SCRAPE": "◆",
|
||||
"EXTRACT": "■",
|
||||
"COMPLETE": "●",
|
||||
"ERROR": "×",
|
||||
"DEBUG": "⋯",
|
||||
"INFO": "ℹ",
|
||||
"WARNING": "⚠",
|
||||
}
|
||||
|
||||
DEFAULT_COLORS = {
|
||||
LogLevel.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
LogLevel.INFO: Fore.CYAN,
|
||||
LogLevel.SUCCESS: Fore.GREEN,
|
||||
LogLevel.WARNING: Fore.YELLOW,
|
||||
LogLevel.ERROR: Fore.RED,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
log_file: Optional[str] = None,
|
||||
log_level: LogLevel = LogLevel.DEBUG,
|
||||
tag_width: int = 10,
|
||||
icons: Optional[Dict[str, str]] = None,
|
||||
colors: Optional[Dict[LogLevel, str]] = None,
|
||||
verbose: bool = True,
|
||||
):
|
||||
"""
|
||||
Initialize the logger.
|
||||
|
||||
Args:
|
||||
log_file: Optional file path for logging
|
||||
log_level: Minimum log level to display
|
||||
tag_width: Width for tag formatting
|
||||
icons: Custom icons for different tags
|
||||
colors: Custom colors for different log levels
|
||||
verbose: Whether to output to console
|
||||
"""
|
||||
init() # Initialize colorama
|
||||
self.log_file = log_file
|
||||
self.log_level = log_level
|
||||
self.tag_width = tag_width
|
||||
self.icons = icons or self.DEFAULT_ICONS
|
||||
self.colors = colors or self.DEFAULT_COLORS
|
||||
self.verbose = verbose
|
||||
|
||||
# Create log file directory if needed
|
||||
if log_file:
|
||||
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
|
||||
|
||||
def _format_tag(self, tag: str) -> str:
|
||||
"""Format a tag with consistent width."""
|
||||
return f"[{tag}]".ljust(self.tag_width, ".")
|
||||
|
||||
def _get_icon(self, tag: str) -> str:
|
||||
"""Get the icon for a tag, defaulting to info icon if not found."""
|
||||
return self.icons.get(tag, self.icons["INFO"])
|
||||
|
||||
def _write_to_file(self, message: str):
|
||||
"""Write a message to the log file if configured."""
|
||||
if self.log_file:
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
# Strip ANSI color codes for file output
|
||||
clean_message = message.replace(Fore.RESET, "").replace(
|
||||
Style.RESET_ALL, ""
|
||||
)
|
||||
for color in vars(Fore).values():
|
||||
if isinstance(color, str):
|
||||
clean_message = clean_message.replace(color, "")
|
||||
f.write(f"[{timestamp}] {clean_message}\n")
|
||||
|
||||
def _log(
|
||||
self,
|
||||
level: LogLevel,
|
||||
message: str,
|
||||
tag: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
colors: Optional[Dict[str, str]] = None,
|
||||
base_color: Optional[str] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Core logging method that handles message formatting and output.
|
||||
|
||||
Args:
|
||||
level: Log level for this message
|
||||
message: Message template string
|
||||
tag: Tag for the message
|
||||
params: Parameters to format into the message
|
||||
colors: Color overrides for specific parameters
|
||||
base_color: Base color for the entire message
|
||||
"""
|
||||
if level.value < self.log_level.value:
|
||||
return
|
||||
|
||||
# Format the message with parameters if provided
|
||||
if params:
|
||||
try:
|
||||
# First format the message with raw parameters
|
||||
formatted_message = message.format(**params)
|
||||
|
||||
# Then apply colors if specified
|
||||
if colors:
|
||||
for key, color in colors.items():
|
||||
# Find the formatted value in the message and wrap it with color
|
||||
if key in params:
|
||||
value_str = str(params[key])
|
||||
formatted_message = formatted_message.replace(
|
||||
value_str, f"{color}{value_str}{Style.RESET_ALL}"
|
||||
)
|
||||
|
||||
except KeyError as e:
|
||||
formatted_message = (
|
||||
f"LOGGING ERROR: Missing parameter {e} in message template"
|
||||
)
|
||||
level = LogLevel.ERROR
|
||||
else:
|
||||
formatted_message = message
|
||||
|
||||
# Construct the full log line
|
||||
color = base_color or self.colors[level]
|
||||
log_line = f"{color}{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message}{Style.RESET_ALL}"
|
||||
|
||||
# Output to console if verbose
|
||||
if self.verbose or kwargs.get("force_verbose", False):
|
||||
print(log_line)
|
||||
|
||||
# Write to file if configured
|
||||
self._write_to_file(log_line)
|
||||
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
"""Log a debug message."""
|
||||
self._log(LogLevel.DEBUG, message, tag, **kwargs)
|
||||
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
"""Log an info message."""
|
||||
self._log(LogLevel.INFO, message, tag, **kwargs)
|
||||
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
"""Log a success message."""
|
||||
self._log(LogLevel.SUCCESS, message, tag, **kwargs)
|
||||
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message."""
|
||||
self._log(LogLevel.WARNING, message, tag, **kwargs)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
|
||||
def url_status(
|
||||
self,
|
||||
url: str,
|
||||
success: bool,
|
||||
timing: float,
|
||||
tag: str = "FETCH",
|
||||
url_length: int = 50,
|
||||
):
|
||||
"""
|
||||
Convenience method for logging URL fetch status.
|
||||
|
||||
Args:
|
||||
url: The URL being processed
|
||||
success: Whether the operation was successful
|
||||
timing: Time taken for the operation
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
self._log(
|
||||
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Status: {status} | Time: {timing:.2f}s",
|
||||
tag=tag,
|
||||
params={
|
||||
"url": url,
|
||||
"url_length": url_length,
|
||||
"status": success,
|
||||
"timing": timing,
|
||||
},
|
||||
colors={
|
||||
"status": Fore.GREEN if success else Fore.RED,
|
||||
"timing": Fore.YELLOW,
|
||||
},
|
||||
)
|
||||
|
||||
def error_status(
|
||||
self, url: str, error: str, tag: str = "ERROR", url_length: int = 50
|
||||
):
|
||||
"""
|
||||
Convenience method for logging error status.
|
||||
|
||||
Args:
|
||||
url: The URL being processed
|
||||
error: Error message
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
self._log(
|
||||
level=LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Error: {error}",
|
||||
tag=tag,
|
||||
params={"url": url, "url_length": url_length, "error": error},
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
117
crawl4ai/cache_context.py
Normal file
117
crawl4ai/cache_context.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class CacheMode(Enum):
|
||||
"""
|
||||
Defines the caching behavior for web crawling operations.
|
||||
|
||||
Modes:
|
||||
- ENABLED: Normal caching behavior (read and write)
|
||||
- DISABLED: No caching at all
|
||||
- READ_ONLY: Only read from cache, don't write
|
||||
- WRITE_ONLY: Only write to cache, don't read
|
||||
- BYPASS: Bypass cache for this operation
|
||||
"""
|
||||
|
||||
ENABLED = "enabled"
|
||||
DISABLED = "disabled"
|
||||
READ_ONLY = "read_only"
|
||||
WRITE_ONLY = "write_only"
|
||||
BYPASS = "bypass"
|
||||
|
||||
|
||||
class CacheContext:
|
||||
"""
|
||||
Encapsulates cache-related decisions and URL handling.
|
||||
|
||||
This class centralizes all cache-related logic and URL type checking,
|
||||
making the caching behavior more predictable and maintainable.
|
||||
|
||||
Attributes:
|
||||
url (str): The URL being processed.
|
||||
cache_mode (CacheMode): The cache mode for the current operation.
|
||||
always_bypass (bool): If True, bypasses caching for this operation.
|
||||
is_cacheable (bool): True if the URL is cacheable, False otherwise.
|
||||
is_web_url (bool): True if the URL is a web URL, False otherwise.
|
||||
is_local_file (bool): True if the URL is a local file, False otherwise.
|
||||
is_raw_html (bool): True if the URL is raw HTML, False otherwise.
|
||||
_url_display (str): The display name for the URL (web, local file, or raw HTML).
|
||||
"""
|
||||
|
||||
def __init__(self, url: str, cache_mode: CacheMode, always_bypass: bool = False):
|
||||
"""
|
||||
Initializes the CacheContext with the provided URL and cache mode.
|
||||
|
||||
Args:
|
||||
url (str): The URL being processed.
|
||||
cache_mode (CacheMode): The cache mode for the current operation.
|
||||
always_bypass (bool): If True, bypasses caching for this operation.
|
||||
"""
|
||||
self.url = url
|
||||
self.cache_mode = cache_mode
|
||||
self.always_bypass = always_bypass
|
||||
self.is_cacheable = url.startswith(("http://", "https://", "file://"))
|
||||
self.is_web_url = url.startswith(("http://", "https://"))
|
||||
self.is_local_file = url.startswith("file://")
|
||||
self.is_raw_html = url.startswith("raw:")
|
||||
self._url_display = url if not self.is_raw_html else "Raw HTML"
|
||||
|
||||
def should_read(self) -> bool:
|
||||
"""
|
||||
Determines if cache should be read based on context.
|
||||
|
||||
How it works:
|
||||
1. If always_bypass is True or is_cacheable is False, return False.
|
||||
2. If cache_mode is ENABLED or READ_ONLY, return True.
|
||||
|
||||
Returns:
|
||||
bool: True if cache should be read, False otherwise.
|
||||
"""
|
||||
if self.always_bypass or not self.is_cacheable:
|
||||
return False
|
||||
return self.cache_mode in [CacheMode.ENABLED, CacheMode.READ_ONLY]
|
||||
|
||||
def should_write(self) -> bool:
|
||||
"""
|
||||
Determines if cache should be written based on context.
|
||||
|
||||
How it works:
|
||||
1. If always_bypass is True or is_cacheable is False, return False.
|
||||
2. If cache_mode is ENABLED or WRITE_ONLY, return True.
|
||||
|
||||
Returns:
|
||||
bool: True if cache should be written, False otherwise.
|
||||
"""
|
||||
if self.always_bypass or not self.is_cacheable:
|
||||
return False
|
||||
return self.cache_mode in [CacheMode.ENABLED, CacheMode.WRITE_ONLY]
|
||||
|
||||
@property
|
||||
def display_url(self) -> str:
|
||||
"""Returns the URL in display format."""
|
||||
return self._url_display
|
||||
|
||||
|
||||
def _legacy_to_cache_mode(
|
||||
disable_cache: bool = False,
|
||||
bypass_cache: bool = False,
|
||||
no_cache_read: bool = False,
|
||||
no_cache_write: bool = False,
|
||||
) -> CacheMode:
|
||||
"""
|
||||
Converts legacy cache parameters to the new CacheMode enum.
|
||||
|
||||
This is an internal function to help transition from the old boolean flags
|
||||
to the new CacheMode system.
|
||||
"""
|
||||
if disable_cache:
|
||||
return CacheMode.DISABLED
|
||||
if bypass_cache:
|
||||
return CacheMode.BYPASS
|
||||
if no_cache_read and no_cache_write:
|
||||
return CacheMode.DISABLED
|
||||
if no_cache_read:
|
||||
return CacheMode.WRITE_ONLY
|
||||
if no_cache_write:
|
||||
return CacheMode.READ_ONLY
|
||||
return CacheMode.ENABLED
|
||||
@@ -3,23 +3,53 @@ import re
|
||||
from collections import Counter
|
||||
import string
|
||||
from .model_loader import load_nltk_punkt
|
||||
from .utils import *
|
||||
|
||||
|
||||
# Define the abstract base class for chunking strategies
|
||||
class ChunkingStrategy(ABC):
|
||||
|
||||
"""
|
||||
Abstract base class for chunking strategies.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def chunk(self, text: str) -> list:
|
||||
"""
|
||||
Abstract method to chunk the given text.
|
||||
|
||||
Args:
|
||||
text (str): The text to chunk.
|
||||
|
||||
Returns:
|
||||
list: A list of chunks.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# Create an identity chunking strategy f(x) = [x]
|
||||
class IdentityChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that returns the input text as a single chunk.
|
||||
"""
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
return [text]
|
||||
|
||||
|
||||
# Regex-based chunking
|
||||
class RegexChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text based on regular expression patterns.
|
||||
"""
|
||||
|
||||
def __init__(self, patterns=None, **kwargs):
|
||||
"""
|
||||
Initialize the RegexChunking object.
|
||||
|
||||
Args:
|
||||
patterns (list): A list of regular expression patterns to split text.
|
||||
"""
|
||||
if patterns is None:
|
||||
patterns = [r'\n\n'] # Default split pattern
|
||||
patterns = [r"\n\n"] # Default split pattern
|
||||
self.patterns = patterns
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
@@ -30,12 +60,19 @@ class RegexChunking(ChunkingStrategy):
|
||||
new_paragraphs.extend(re.split(pattern, paragraph))
|
||||
paragraphs = new_paragraphs
|
||||
return paragraphs
|
||||
|
||||
# NLP-based sentence chunking
|
||||
|
||||
|
||||
# NLP-based sentence chunking
|
||||
class NlpSentenceChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into sentences using NLTK's sentence tokenizer.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
Initialize the NlpSentenceChunking object.
|
||||
"""
|
||||
load_nltk_punkt()
|
||||
pass
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
# Improved regex for sentence splitting
|
||||
@@ -43,18 +80,34 @@ class NlpSentenceChunking(ChunkingStrategy):
|
||||
# r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z][A-Z]\.)(?<![A-Za-z]\.)(?<=\.|\?|\!|\n)\s'
|
||||
# )
|
||||
# sentences = sentence_endings.split(text)
|
||||
# sens = [sent.strip() for sent in sentences if sent]
|
||||
# sens = [sent.strip() for sent in sentences if sent]
|
||||
from nltk.tokenize import sent_tokenize
|
||||
|
||||
sentences = sent_tokenize(text)
|
||||
sens = [sent.strip() for sent in sentences]
|
||||
|
||||
sens = [sent.strip() for sent in sentences]
|
||||
|
||||
return list(set(sens))
|
||||
|
||||
|
||||
|
||||
# Topic-based segmentation using TextTiling
|
||||
class TopicSegmentationChunking(ChunkingStrategy):
|
||||
|
||||
"""
|
||||
Chunking strategy that segments text into topics using NLTK's TextTilingTokenizer.
|
||||
|
||||
How it works:
|
||||
1. Segment the text into topics using TextTilingTokenizer
|
||||
2. Extract keywords for each topic segment
|
||||
"""
|
||||
|
||||
def __init__(self, num_keywords=3, **kwargs):
|
||||
"""
|
||||
Initialize the TopicSegmentationChunking object.
|
||||
|
||||
Args:
|
||||
num_keywords (int): The number of keywords to extract for each topic segment.
|
||||
"""
|
||||
import nltk as nl
|
||||
|
||||
self.tokenizer = nl.tokenize.TextTilingTokenizer()
|
||||
self.num_keywords = num_keywords
|
||||
|
||||
@@ -66,8 +119,14 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
||||
def extract_keywords(self, text: str) -> list:
|
||||
# Tokenize and remove stopwords and punctuation
|
||||
import nltk as nl
|
||||
|
||||
tokens = nl.toknize.word_tokenize(text)
|
||||
tokens = [token.lower() for token in tokens if token not in nl.corpus.stopwords.words('english') and token not in string.punctuation]
|
||||
tokens = [
|
||||
token.lower()
|
||||
for token in tokens
|
||||
if token not in nl.corpus.stopwords.words("english")
|
||||
and token not in string.punctuation
|
||||
]
|
||||
|
||||
# Calculate frequency distribution
|
||||
freq_dist = Counter(tokens)
|
||||
@@ -78,15 +137,27 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
||||
# Segment the text into topics
|
||||
segments = self.chunk(text)
|
||||
# Extract keywords for each topic segment
|
||||
segments_with_topics = [(segment, self.extract_keywords(segment)) for segment in segments]
|
||||
segments_with_topics = [
|
||||
(segment, self.extract_keywords(segment)) for segment in segments
|
||||
]
|
||||
return segments_with_topics
|
||||
|
||||
|
||||
|
||||
# Fixed-length word chunks
|
||||
class FixedLengthWordChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into fixed-length word chunks.
|
||||
|
||||
How it works:
|
||||
1. Split the text into words
|
||||
2. Create chunks of fixed length
|
||||
3. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, chunk_size=100, **kwargs):
|
||||
"""
|
||||
Initialize the fixed-length word chunking strategy with the given chunk size.
|
||||
|
||||
|
||||
Args:
|
||||
chunk_size (int): The size of each chunk in words.
|
||||
"""
|
||||
@@ -94,15 +165,28 @@ class FixedLengthWordChunking(ChunkingStrategy):
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
return [' '.join(words[i:i + self.chunk_size]) for i in range(0, len(words), self.chunk_size)]
|
||||
|
||||
return [
|
||||
" ".join(words[i : i + self.chunk_size])
|
||||
for i in range(0, len(words), self.chunk_size)
|
||||
]
|
||||
|
||||
|
||||
# Sliding window chunking
|
||||
class SlidingWindowChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into overlapping word chunks.
|
||||
|
||||
How it works:
|
||||
1. Split the text into words
|
||||
2. Create chunks of fixed length
|
||||
3. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, window_size=100, step=50, **kwargs):
|
||||
"""
|
||||
Initialize the sliding window chunking strategy with the given window size and
|
||||
step size.
|
||||
|
||||
|
||||
Args:
|
||||
window_size (int): The size of the sliding window in words.
|
||||
step (int): The step size for sliding the window in words.
|
||||
@@ -113,27 +197,37 @@ class SlidingWindowChunking(ChunkingStrategy):
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
chunks = []
|
||||
|
||||
|
||||
if len(words) <= self.window_size:
|
||||
return [text]
|
||||
|
||||
|
||||
for i in range(0, len(words) - self.window_size + 1, self.step):
|
||||
chunk = ' '.join(words[i:i + self.window_size])
|
||||
chunk = " ".join(words[i : i + self.window_size])
|
||||
chunks.append(chunk)
|
||||
|
||||
|
||||
# Handle the last chunk if it doesn't align perfectly
|
||||
if i + self.window_size < len(words):
|
||||
chunks.append(' '.join(words[-self.window_size:]))
|
||||
|
||||
chunks.append(" ".join(words[-self.window_size :]))
|
||||
|
||||
return chunks
|
||||
|
||||
|
||||
|
||||
class OverlappingWindowChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into overlapping word chunks.
|
||||
|
||||
How it works:
|
||||
1. Split the text into words using whitespace
|
||||
2. Create chunks of fixed length equal to the window size
|
||||
3. Slide the window by the overlap size
|
||||
4. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, window_size=1000, overlap=100, **kwargs):
|
||||
"""
|
||||
Initialize the overlapping window chunking strategy with the given window size and
|
||||
overlap size.
|
||||
|
||||
|
||||
Args:
|
||||
window_size (int): The size of the window in words.
|
||||
overlap (int): The size of the overlap between consecutive chunks in words.
|
||||
@@ -144,19 +238,19 @@ class OverlappingWindowChunking(ChunkingStrategy):
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
chunks = []
|
||||
|
||||
|
||||
if len(words) <= self.window_size:
|
||||
return [text]
|
||||
|
||||
|
||||
start = 0
|
||||
while start < len(words):
|
||||
end = start + self.window_size
|
||||
chunk = ' '.join(words[start:end])
|
||||
chunk = " ".join(words[start:end])
|
||||
chunks.append(chunk)
|
||||
|
||||
|
||||
if end >= len(words):
|
||||
break
|
||||
|
||||
|
||||
start = end - self.overlap
|
||||
|
||||
return chunks
|
||||
|
||||
return chunks
|
||||
|
||||
123
crawl4ai/cli.py
Normal file
123
crawl4ai/cli.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import click
|
||||
import sys
|
||||
import asyncio
|
||||
from typing import List
|
||||
from .docs_manager import DocsManager
|
||||
from .async_logger import AsyncLogger
|
||||
|
||||
logger = AsyncLogger(verbose=True)
|
||||
docs_manager = DocsManager(logger)
|
||||
|
||||
|
||||
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
|
||||
"""Print formatted table with headers and rows"""
|
||||
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
|
||||
border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+"
|
||||
|
||||
def format_row(row):
|
||||
return (
|
||||
"|"
|
||||
+ "|".join(
|
||||
f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
|
||||
for cell, w in zip(row, widths)
|
||||
)
|
||||
+ "|"
|
||||
)
|
||||
|
||||
click.echo(border)
|
||||
click.echo(format_row(headers))
|
||||
click.echo(border)
|
||||
for row in rows:
|
||||
click.echo(format_row(row))
|
||||
click.echo(border)
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
"""Crawl4AI Command Line Interface"""
|
||||
pass
|
||||
|
||||
|
||||
@cli.group()
|
||||
def docs():
|
||||
"""Documentation operations"""
|
||||
pass
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("sections", nargs=-1)
|
||||
@click.option(
|
||||
"--mode", type=click.Choice(["extended", "condensed"]), default="extended"
|
||||
)
|
||||
def combine(sections: tuple, mode: str):
|
||||
"""Combine documentation sections"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
click.echo(docs_manager.generate(sections, mode))
|
||||
except Exception as e:
|
||||
logger.error(str(e), tag="ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("query")
|
||||
@click.option("--top-k", "-k", default=5)
|
||||
@click.option("--build-index", is_flag=True, help="Build index if missing")
|
||||
def search(query: str, top_k: int, build_index: bool):
|
||||
"""Search documentation"""
|
||||
try:
|
||||
result = docs_manager.search(query, top_k)
|
||||
if result == "No search index available. Call build_search_index() first.":
|
||||
if build_index or click.confirm("No search index found. Build it now?"):
|
||||
asyncio.run(docs_manager.llm_text.generate_index_files())
|
||||
result = docs_manager.search(query, top_k)
|
||||
click.echo(result)
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
def update():
|
||||
"""Update docs from GitHub"""
|
||||
try:
|
||||
asyncio.run(docs_manager.fetch_docs())
|
||||
click.echo("Documentation updated successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.option("--force-facts", is_flag=True, help="Force regenerate fact files")
|
||||
@click.option("--clear-cache", is_flag=True, help="Clear BM25 cache")
|
||||
def index(force_facts: bool, clear_cache: bool):
|
||||
"""Build or rebuild search indexes"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
asyncio.run(
|
||||
docs_manager.llm_text.generate_index_files(
|
||||
force_generate_facts=force_facts, clear_bm25_cache=clear_cache
|
||||
)
|
||||
)
|
||||
click.echo("Search indexes built successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Add docs list command
|
||||
@docs.command()
|
||||
def list():
|
||||
"""List available documentation sections"""
|
||||
try:
|
||||
sections = docs_manager.list()
|
||||
print_table(["Sections"], [[section] for section in sections])
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -8,11 +8,13 @@ DEFAULT_PROVIDER = "openai/gpt-4o-mini"
|
||||
MODEL_REPO_BRANCH = "new-release-0.0.2"
|
||||
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
PROVIDER_MODELS = {
|
||||
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"),
|
||||
"groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"),
|
||||
"openai/gpt-4o-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
@@ -20,27 +22,49 @@ PROVIDER_MODELS = {
|
||||
}
|
||||
|
||||
# Chunk token threshold
|
||||
CHUNK_TOKEN_THRESHOLD = 2 ** 11 # 2048 tokens
|
||||
CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens
|
||||
OVERLAP_RATE = 0.1
|
||||
WORD_TOKEN_RATE = 1.3
|
||||
|
||||
# Threshold for the minimum number of word in a HTML tag to be considered
|
||||
# Threshold for the minimum number of word in a HTML tag to be considered
|
||||
MIN_WORD_THRESHOLD = 1
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1
|
||||
|
||||
IMPORTANT_ATTRS = ['src', 'href', 'alt', 'title', 'width', 'height']
|
||||
ONLY_TEXT_ELIGIBLE_TAGS = ['b', 'i', 'u', 'span', 'del', 'ins', 'sub', 'sup', 'strong', 'em', 'code', 'kbd', 'var', 's', 'q', 'abbr', 'cite', 'dfn', 'time', 'small', 'mark']
|
||||
IMPORTANT_ATTRS = ["src", "href", "alt", "title", "width", "height"]
|
||||
ONLY_TEXT_ELIGIBLE_TAGS = [
|
||||
"b",
|
||||
"i",
|
||||
"u",
|
||||
"span",
|
||||
"del",
|
||||
"ins",
|
||||
"sub",
|
||||
"sup",
|
||||
"strong",
|
||||
"em",
|
||||
"code",
|
||||
"kbd",
|
||||
"var",
|
||||
"s",
|
||||
"q",
|
||||
"abbr",
|
||||
"cite",
|
||||
"dfn",
|
||||
"time",
|
||||
"small",
|
||||
"mark",
|
||||
]
|
||||
SOCIAL_MEDIA_DOMAINS = [
|
||||
'facebook.com',
|
||||
'twitter.com',
|
||||
'x.com',
|
||||
'linkedin.com',
|
||||
'instagram.com',
|
||||
'pinterest.com',
|
||||
'tiktok.com',
|
||||
'snapchat.com',
|
||||
'reddit.com',
|
||||
]
|
||||
"facebook.com",
|
||||
"twitter.com",
|
||||
"x.com",
|
||||
"linkedin.com",
|
||||
"instagram.com",
|
||||
"pinterest.com",
|
||||
"tiktok.com",
|
||||
"snapchat.com",
|
||||
"reddit.com",
|
||||
]
|
||||
|
||||
# Threshold for the Image extraction - Range is 1 to 6
|
||||
# Images are scored based on point based system, to filter based on usefulness. Points are assigned
|
||||
@@ -51,3 +75,12 @@ SOCIAL_MEDIA_DOMAINS = [
|
||||
# If image format is in jpg, png or webp
|
||||
# If image is in the first half of the total images extracted from the page
|
||||
IMAGE_SCORE_THRESHOLD = 2
|
||||
|
||||
MAX_METRICS_HISTORY = 1000
|
||||
|
||||
NEED_MIGRATION = True
|
||||
URL_LOG_SHORTEN_LENGTH = 30
|
||||
SHOW_DEPRECATION_WARNINGS = True
|
||||
SCREENSHOT_HEIGHT_TRESHOLD = 10000
|
||||
PAGE_TIMEOUT = 60000
|
||||
DOWNLOAD_PAGE_TIMEOUT = 60000
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
class ContentCleaningStrategy:
|
||||
def __init__(self):
|
||||
# Precompile regex patterns for performance
|
||||
self.negative_patterns = re.compile(r'nav|footer|header|sidebar|ads|comment', re.I)
|
||||
self.positive_patterns = re.compile(r'content|article|main|post', re.I)
|
||||
self.priority_tags = {'article', 'main', 'section', 'div'}
|
||||
self.non_content_tags = {'nav', 'footer', 'header', 'aside'}
|
||||
# Thresholds
|
||||
self.text_density_threshold = 9.0
|
||||
self.min_word_count = 50
|
||||
self.link_density_threshold = 0.2
|
||||
self.max_dom_depth = 10 # To prevent excessive DOM traversal
|
||||
|
||||
def clean(self, clean_html: str) -> str:
|
||||
"""
|
||||
Main function that takes cleaned HTML and returns super cleaned HTML.
|
||||
|
||||
Args:
|
||||
clean_html (str): The cleaned HTML content.
|
||||
|
||||
Returns:
|
||||
str: The super cleaned HTML containing only the main content.
|
||||
"""
|
||||
try:
|
||||
if not clean_html or not isinstance(clean_html, str):
|
||||
return ''
|
||||
soup = BeautifulSoup(clean_html, 'html.parser')
|
||||
main_content = self.extract_main_content(soup)
|
||||
if main_content:
|
||||
super_clean_element = self.clean_element(main_content)
|
||||
return str(super_clean_element)
|
||||
else:
|
||||
return ''
|
||||
except Exception:
|
||||
# Handle exceptions silently or log them as needed
|
||||
return ''
|
||||
|
||||
def extract_main_content(self, soup: BeautifulSoup) -> Optional[Tag]:
|
||||
"""
|
||||
Identifies and extracts the main content element from the HTML.
|
||||
|
||||
Args:
|
||||
soup (BeautifulSoup): The parsed HTML soup.
|
||||
|
||||
Returns:
|
||||
Optional[Tag]: The Tag object containing the main content, or None if not found.
|
||||
"""
|
||||
candidates = []
|
||||
for element in soup.find_all(self.priority_tags):
|
||||
if self.is_non_content_tag(element):
|
||||
continue
|
||||
if self.has_negative_class_id(element):
|
||||
continue
|
||||
score = self.calculate_content_score(element)
|
||||
candidates.append((score, element))
|
||||
|
||||
if not candidates:
|
||||
return None
|
||||
|
||||
# Sort candidates by score in descending order
|
||||
candidates.sort(key=lambda x: x[0], reverse=True)
|
||||
# Select the element with the highest score
|
||||
best_element = candidates[0][1]
|
||||
return best_element
|
||||
|
||||
def calculate_content_score(self, element: Tag) -> float:
|
||||
"""
|
||||
Calculates a score for an element based on various heuristics.
|
||||
|
||||
Args:
|
||||
element (Tag): The HTML element to score.
|
||||
|
||||
Returns:
|
||||
float: The content score of the element.
|
||||
"""
|
||||
score = 0.0
|
||||
|
||||
if self.is_priority_tag(element):
|
||||
score += 5.0
|
||||
if self.has_positive_class_id(element):
|
||||
score += 3.0
|
||||
if self.has_negative_class_id(element):
|
||||
score -= 3.0
|
||||
if self.is_high_text_density(element):
|
||||
score += 2.0
|
||||
if self.is_low_link_density(element):
|
||||
score += 2.0
|
||||
if self.has_sufficient_content(element):
|
||||
score += 2.0
|
||||
if self.has_headings(element):
|
||||
score += 3.0
|
||||
|
||||
dom_depth = self.calculate_dom_depth(element)
|
||||
score += min(dom_depth, self.max_dom_depth) * 0.5 # Adjust weight as needed
|
||||
|
||||
return score
|
||||
|
||||
def is_priority_tag(self, element: Tag) -> bool:
|
||||
"""Checks if the element is a priority tag."""
|
||||
return element.name in self.priority_tags
|
||||
|
||||
def is_non_content_tag(self, element: Tag) -> bool:
|
||||
"""Checks if the element is a non-content tag."""
|
||||
return element.name in self.non_content_tags
|
||||
|
||||
def has_negative_class_id(self, element: Tag) -> bool:
|
||||
"""Checks if the element has negative indicators in its class or id."""
|
||||
class_id = ' '.join(filter(None, [
|
||||
self.get_attr_str(element.get('class')),
|
||||
element.get('id', '')
|
||||
]))
|
||||
return bool(self.negative_patterns.search(class_id))
|
||||
|
||||
def has_positive_class_id(self, element: Tag) -> bool:
|
||||
"""Checks if the element has positive indicators in its class or id."""
|
||||
class_id = ' '.join(filter(None, [
|
||||
self.get_attr_str(element.get('class')),
|
||||
element.get('id', '')
|
||||
]))
|
||||
return bool(self.positive_patterns.search(class_id))
|
||||
|
||||
@staticmethod
|
||||
def get_attr_str(attr) -> str:
|
||||
"""Converts an attribute value to a string."""
|
||||
if isinstance(attr, list):
|
||||
return ' '.join(attr)
|
||||
elif isinstance(attr, str):
|
||||
return attr
|
||||
else:
|
||||
return ''
|
||||
|
||||
def is_high_text_density(self, element: Tag) -> bool:
|
||||
"""Determines if the element has high text density."""
|
||||
text_density = self.calculate_text_density(element)
|
||||
return text_density > self.text_density_threshold
|
||||
|
||||
def calculate_text_density(self, element: Tag) -> float:
|
||||
"""Calculates the text density of an element."""
|
||||
text_length = len(element.get_text(strip=True))
|
||||
tag_count = len(element.find_all())
|
||||
tag_count = tag_count or 1 # Prevent division by zero
|
||||
return text_length / tag_count
|
||||
|
||||
def is_low_link_density(self, element: Tag) -> bool:
|
||||
"""Determines if the element has low link density."""
|
||||
link_density = self.calculate_link_density(element)
|
||||
return link_density < self.link_density_threshold
|
||||
|
||||
def calculate_link_density(self, element: Tag) -> float:
|
||||
"""Calculates the link density of an element."""
|
||||
text = element.get_text(strip=True)
|
||||
if not text:
|
||||
return 0.0
|
||||
link_text = ' '.join(a.get_text(strip=True) for a in element.find_all('a'))
|
||||
return len(link_text) / len(text) if text else 0.0
|
||||
|
||||
def has_sufficient_content(self, element: Tag) -> bool:
|
||||
"""Checks if the element has sufficient word count."""
|
||||
word_count = len(element.get_text(strip=True).split())
|
||||
return word_count >= self.min_word_count
|
||||
|
||||
def calculate_dom_depth(self, element: Tag) -> int:
|
||||
"""Calculates the depth of an element in the DOM tree."""
|
||||
depth = 0
|
||||
current_element = element
|
||||
while current_element.parent and depth < self.max_dom_depth:
|
||||
depth += 1
|
||||
current_element = current_element.parent
|
||||
return depth
|
||||
|
||||
def has_headings(self, element: Tag) -> bool:
|
||||
"""Checks if the element contains heading tags."""
|
||||
return bool(element.find(['h1', 'h2', 'h3']))
|
||||
|
||||
def clean_element(self, element: Tag) -> Tag:
|
||||
"""
|
||||
Cleans the selected element by removing unnecessary attributes and nested non-content elements.
|
||||
|
||||
Args:
|
||||
element (Tag): The HTML element to clean.
|
||||
|
||||
Returns:
|
||||
Tag: The cleaned HTML element.
|
||||
"""
|
||||
for tag in element.find_all(['script', 'style', 'aside']):
|
||||
tag.decompose()
|
||||
for tag in element.find_all():
|
||||
attrs = dict(tag.attrs)
|
||||
for attr in attrs:
|
||||
if attr in ['style', 'onclick', 'onmouseover', 'align', 'bgcolor']:
|
||||
del tag.attrs[attr]
|
||||
return element
|
||||
999
crawl4ai/content_filter_strategy.py
Normal file
999
crawl4ai/content_filter_strategy.py
Normal file
@@ -0,0 +1,999 @@
|
||||
import re
|
||||
import time
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
from rank_bm25 import BM25Okapi
|
||||
from collections import deque
|
||||
from bs4 import NavigableString, Comment
|
||||
from .utils import clean_tokens, perform_completion_with_backoff, escape_json_string, sanitize_html, get_home_folder, extract_xml_data
|
||||
from abc import ABC, abstractmethod
|
||||
import math
|
||||
from snowballstemmer import stemmer
|
||||
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE
|
||||
from .models import TokenUsage
|
||||
from .prompts import PROMPT_FILTER_CONTENT
|
||||
import os
|
||||
import json
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from colorama import Fore, Style, init
|
||||
|
||||
class RelevantContentFilter(ABC):
|
||||
"""Abstract base class for content filtering strategies"""
|
||||
|
||||
def __init__(self, user_query: str = None):
|
||||
self.user_query = user_query
|
||||
self.included_tags = {
|
||||
# Primary structure
|
||||
"article",
|
||||
"main",
|
||||
"section",
|
||||
"div",
|
||||
# List structures
|
||||
"ul",
|
||||
"ol",
|
||||
"li",
|
||||
"dl",
|
||||
"dt",
|
||||
"dd",
|
||||
# Text content
|
||||
"p",
|
||||
"span",
|
||||
"blockquote",
|
||||
"pre",
|
||||
"code",
|
||||
# Headers
|
||||
"h1",
|
||||
"h2",
|
||||
"h3",
|
||||
"h4",
|
||||
"h5",
|
||||
"h6",
|
||||
# Tables
|
||||
"table",
|
||||
"thead",
|
||||
"tbody",
|
||||
"tr",
|
||||
"td",
|
||||
"th",
|
||||
# Other semantic elements
|
||||
"figure",
|
||||
"figcaption",
|
||||
"details",
|
||||
"summary",
|
||||
# Text formatting
|
||||
"em",
|
||||
"strong",
|
||||
"b",
|
||||
"i",
|
||||
"mark",
|
||||
"small",
|
||||
# Rich content
|
||||
"time",
|
||||
"address",
|
||||
"cite",
|
||||
"q",
|
||||
}
|
||||
self.excluded_tags = {
|
||||
"nav",
|
||||
"footer",
|
||||
"header",
|
||||
"aside",
|
||||
"script",
|
||||
"style",
|
||||
"form",
|
||||
"iframe",
|
||||
"noscript",
|
||||
}
|
||||
self.header_tags = {"h1", "h2", "h3", "h4", "h5", "h6"}
|
||||
self.negative_patterns = re.compile(
|
||||
r"nav|footer|header|sidebar|ads|comment|promo|advert|social|share", re.I
|
||||
)
|
||||
self.min_word_count = 2
|
||||
|
||||
@abstractmethod
|
||||
def filter_content(self, html: str) -> List[str]:
|
||||
"""Abstract method to be implemented by specific filtering strategies"""
|
||||
pass
|
||||
|
||||
def extract_page_query(self, soup: BeautifulSoup, body: Tag) -> str:
|
||||
"""Common method to extract page metadata with fallbacks"""
|
||||
if self.user_query:
|
||||
return self.user_query
|
||||
|
||||
query_parts = []
|
||||
|
||||
# Title
|
||||
try:
|
||||
title = soup.title.string
|
||||
if title:
|
||||
query_parts.append(title)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if soup.find("h1"):
|
||||
query_parts.append(soup.find("h1").get_text())
|
||||
|
||||
# Meta tags
|
||||
temp = ""
|
||||
for meta_name in ["keywords", "description"]:
|
||||
meta = soup.find("meta", attrs={"name": meta_name})
|
||||
if meta and meta.get("content"):
|
||||
query_parts.append(meta["content"])
|
||||
temp += meta["content"]
|
||||
|
||||
# If still empty, grab first significant paragraph
|
||||
if not temp:
|
||||
# Find the first tag P thatits text contains more than 50 characters
|
||||
for p in body.find_all("p"):
|
||||
if len(p.get_text()) > 150:
|
||||
query_parts.append(p.get_text()[:150])
|
||||
break
|
||||
|
||||
return " ".join(filter(None, query_parts))
|
||||
|
||||
def extract_text_chunks(
|
||||
self, body: Tag, min_word_threshold: int = None
|
||||
) -> List[Tuple[str, str]]:
|
||||
"""
|
||||
Extracts text chunks from a BeautifulSoup body element while preserving order.
|
||||
Returns list of tuples (text, tag_name) for classification.
|
||||
|
||||
Args:
|
||||
body: BeautifulSoup Tag object representing the body element
|
||||
|
||||
Returns:
|
||||
List of (text, tag_name) tuples
|
||||
"""
|
||||
# Tags to ignore - inline elements that shouldn't break text flow
|
||||
INLINE_TAGS = {
|
||||
"a",
|
||||
"abbr",
|
||||
"acronym",
|
||||
"b",
|
||||
"bdo",
|
||||
"big",
|
||||
"br",
|
||||
"button",
|
||||
"cite",
|
||||
"code",
|
||||
"dfn",
|
||||
"em",
|
||||
"i",
|
||||
"img",
|
||||
"input",
|
||||
"kbd",
|
||||
"label",
|
||||
"map",
|
||||
"object",
|
||||
"q",
|
||||
"samp",
|
||||
"script",
|
||||
"select",
|
||||
"small",
|
||||
"span",
|
||||
"strong",
|
||||
"sub",
|
||||
"sup",
|
||||
"textarea",
|
||||
"time",
|
||||
"tt",
|
||||
"var",
|
||||
}
|
||||
|
||||
# Tags that typically contain meaningful headers
|
||||
HEADER_TAGS = {"h1", "h2", "h3", "h4", "h5", "h6", "header"}
|
||||
|
||||
chunks = []
|
||||
current_text = []
|
||||
chunk_index = 0
|
||||
|
||||
def should_break_chunk(tag: Tag) -> bool:
|
||||
"""Determine if a tag should cause a break in the current text chunk"""
|
||||
return tag.name not in INLINE_TAGS and not (
|
||||
tag.name == "p" and len(current_text) == 0
|
||||
)
|
||||
|
||||
# Use deque for efficient push/pop operations
|
||||
stack = deque([(body, False)])
|
||||
|
||||
while stack:
|
||||
element, visited = stack.pop()
|
||||
|
||||
if visited:
|
||||
# End of block element - flush accumulated text
|
||||
if current_text and should_break_chunk(element):
|
||||
text = " ".join("".join(current_text).split())
|
||||
if text:
|
||||
tag_type = (
|
||||
"header" if element.name in HEADER_TAGS else "content"
|
||||
)
|
||||
chunks.append((chunk_index, text, tag_type, element))
|
||||
chunk_index += 1
|
||||
current_text = []
|
||||
continue
|
||||
|
||||
if isinstance(element, NavigableString):
|
||||
if str(element).strip():
|
||||
current_text.append(str(element).strip())
|
||||
continue
|
||||
|
||||
# Pre-allocate children to avoid multiple list operations
|
||||
children = list(element.children)
|
||||
if not children:
|
||||
continue
|
||||
|
||||
# Mark block for revisit after processing children
|
||||
stack.append((element, True))
|
||||
|
||||
# Add children in reverse order for correct processing
|
||||
for child in reversed(children):
|
||||
if isinstance(child, (Tag, NavigableString)):
|
||||
stack.append((child, False))
|
||||
|
||||
# Handle any remaining text
|
||||
if current_text:
|
||||
text = " ".join("".join(current_text).split())
|
||||
if text:
|
||||
chunks.append((chunk_index, text, "content", body))
|
||||
|
||||
if min_word_threshold:
|
||||
chunks = [
|
||||
chunk for chunk in chunks if len(chunk[1].split()) >= min_word_threshold
|
||||
]
|
||||
|
||||
return chunks
|
||||
|
||||
def _deprecated_extract_text_chunks(
|
||||
self, soup: BeautifulSoup
|
||||
) -> List[Tuple[int, str, Tag]]:
|
||||
"""Common method for extracting text chunks"""
|
||||
_text_cache = {}
|
||||
|
||||
def fast_text(element: Tag) -> str:
|
||||
elem_id = id(element)
|
||||
if elem_id in _text_cache:
|
||||
return _text_cache[elem_id]
|
||||
texts = []
|
||||
for content in element.contents:
|
||||
if isinstance(content, str):
|
||||
text = content.strip()
|
||||
if text:
|
||||
texts.append(text)
|
||||
result = " ".join(texts)
|
||||
_text_cache[elem_id] = result
|
||||
return result
|
||||
|
||||
candidates = []
|
||||
index = 0
|
||||
|
||||
def dfs(element):
|
||||
nonlocal index
|
||||
if isinstance(element, Tag):
|
||||
if element.name in self.included_tags:
|
||||
if not self.is_excluded(element):
|
||||
text = fast_text(element)
|
||||
word_count = len(text.split())
|
||||
|
||||
# Headers pass through with adjusted minimum
|
||||
if element.name in self.header_tags:
|
||||
if word_count >= 3: # Minimal sanity check for headers
|
||||
candidates.append((index, text, element))
|
||||
index += 1
|
||||
# Regular content uses standard minimum
|
||||
elif word_count >= self.min_word_count:
|
||||
candidates.append((index, text, element))
|
||||
index += 1
|
||||
|
||||
for child in element.children:
|
||||
dfs(child)
|
||||
|
||||
dfs(soup.body if soup.body else soup)
|
||||
return candidates
|
||||
|
||||
def is_excluded(self, tag: Tag) -> bool:
|
||||
"""Common method for exclusion logic"""
|
||||
if tag.name in self.excluded_tags:
|
||||
return True
|
||||
class_id = " ".join(
|
||||
filter(None, [" ".join(tag.get("class", [])), tag.get("id", "")])
|
||||
)
|
||||
return bool(self.negative_patterns.search(class_id))
|
||||
|
||||
def clean_element(self, tag: Tag) -> str:
|
||||
"""Common method for cleaning HTML elements with minimal overhead"""
|
||||
if not tag or not isinstance(tag, Tag):
|
||||
return ""
|
||||
|
||||
unwanted_tags = {"script", "style", "aside", "form", "iframe", "noscript"}
|
||||
unwanted_attrs = {
|
||||
"style",
|
||||
"onclick",
|
||||
"onmouseover",
|
||||
"align",
|
||||
"bgcolor",
|
||||
"class",
|
||||
"id",
|
||||
}
|
||||
|
||||
# Use string builder pattern for better performance
|
||||
builder = []
|
||||
|
||||
def render_tag(elem):
|
||||
if not isinstance(elem, Tag):
|
||||
if isinstance(elem, str):
|
||||
builder.append(elem.strip())
|
||||
return
|
||||
|
||||
if elem.name in unwanted_tags:
|
||||
return
|
||||
|
||||
# Start tag
|
||||
builder.append(f"<{elem.name}")
|
||||
|
||||
# Add cleaned attributes
|
||||
attrs = {k: v for k, v in elem.attrs.items() if k not in unwanted_attrs}
|
||||
for key, value in attrs.items():
|
||||
builder.append(f' {key}="{value}"')
|
||||
|
||||
builder.append(">")
|
||||
|
||||
# Process children
|
||||
for child in elem.children:
|
||||
render_tag(child)
|
||||
|
||||
# Close tag
|
||||
builder.append(f"</{elem.name}>")
|
||||
|
||||
try:
|
||||
render_tag(tag)
|
||||
return "".join(builder)
|
||||
except Exception:
|
||||
return str(tag) # Fallback to original if anything fails
|
||||
|
||||
class BM25ContentFilter(RelevantContentFilter):
|
||||
"""
|
||||
Content filtering using BM25 algorithm with priority tag handling.
|
||||
|
||||
How it works:
|
||||
1. Extracts page metadata with fallbacks.
|
||||
2. Extracts text chunks from the body element.
|
||||
3. Tokenizes the corpus and query.
|
||||
4. Applies BM25 algorithm to calculate scores for each chunk.
|
||||
5. Filters out chunks below the threshold.
|
||||
6. Sorts chunks by score in descending order.
|
||||
7. Returns the top N chunks.
|
||||
|
||||
Attributes:
|
||||
user_query (str): User query for filtering (optional).
|
||||
bm25_threshold (float): BM25 threshold for filtering (default: 1.0).
|
||||
language (str): Language for stemming (default: 'english').
|
||||
|
||||
Methods:
|
||||
filter_content(self, html: str, min_word_threshold: int = None)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
user_query: str = None,
|
||||
bm25_threshold: float = 1.0,
|
||||
language: str = "english",
|
||||
):
|
||||
"""
|
||||
Initializes the BM25ContentFilter class, if not provided, falls back to page metadata.
|
||||
|
||||
Note:
|
||||
If no query is given and no page metadata is available, then it tries to pick up the first significant paragraph.
|
||||
|
||||
Args:
|
||||
user_query (str): User query for filtering (optional).
|
||||
bm25_threshold (float): BM25 threshold for filtering (default: 1.0).
|
||||
language (str): Language for stemming (default: 'english').
|
||||
"""
|
||||
super().__init__(user_query=user_query)
|
||||
self.bm25_threshold = bm25_threshold
|
||||
self.priority_tags = {
|
||||
"h1": 5.0,
|
||||
"h2": 4.0,
|
||||
"h3": 3.0,
|
||||
"title": 4.0,
|
||||
"strong": 2.0,
|
||||
"b": 1.5,
|
||||
"em": 1.5,
|
||||
"blockquote": 2.0,
|
||||
"code": 2.0,
|
||||
"pre": 1.5,
|
||||
"th": 1.5, # Table headers
|
||||
}
|
||||
self.stemmer = stemmer(language)
|
||||
|
||||
def filter_content(self, html: str, min_word_threshold: int = None) -> List[str]:
|
||||
"""
|
||||
Implements content filtering using BM25 algorithm with priority tag handling.
|
||||
|
||||
Note:
|
||||
This method implements the filtering logic for the BM25ContentFilter class.
|
||||
It takes HTML content as input and returns a list of filtered text chunks.
|
||||
|
||||
Args:
|
||||
html (str): HTML content to be filtered.
|
||||
min_word_threshold (int): Minimum word threshold for filtering (optional).
|
||||
|
||||
Returns:
|
||||
List[str]: List of filtered text chunks.
|
||||
"""
|
||||
if not html or not isinstance(html, str):
|
||||
return []
|
||||
|
||||
soup = BeautifulSoup(html, "lxml")
|
||||
|
||||
# Check if body is present
|
||||
if not soup.body:
|
||||
# Wrap in body tag if missing
|
||||
soup = BeautifulSoup(f"<body>{html}</body>", "lxml")
|
||||
body = soup.find("body")
|
||||
|
||||
query = self.extract_page_query(soup, body)
|
||||
|
||||
if not query:
|
||||
return []
|
||||
# return [self.clean_element(soup)]
|
||||
|
||||
candidates = self.extract_text_chunks(body, min_word_threshold)
|
||||
|
||||
if not candidates:
|
||||
return []
|
||||
|
||||
# Tokenize corpus
|
||||
# tokenized_corpus = [chunk.lower().split() for _, chunk, _, _ in candidates]
|
||||
# tokenized_query = query.lower().split()
|
||||
|
||||
# tokenized_corpus = [[ps.stem(word) for word in chunk.lower().split()]
|
||||
# for _, chunk, _, _ in candidates]
|
||||
# tokenized_query = [ps.stem(word) for word in query.lower().split()]
|
||||
|
||||
tokenized_corpus = [
|
||||
[self.stemmer.stemWord(word) for word in chunk.lower().split()]
|
||||
for _, chunk, _, _ in candidates
|
||||
]
|
||||
tokenized_query = [
|
||||
self.stemmer.stemWord(word) for word in query.lower().split()
|
||||
]
|
||||
|
||||
# tokenized_corpus = [[self.stemmer.stemWord(word) for word in tokenize_text(chunk.lower())]
|
||||
# for _, chunk, _, _ in candidates]
|
||||
# tokenized_query = [self.stemmer.stemWord(word) for word in tokenize_text(query.lower())]
|
||||
|
||||
# Clean from stop words and noise
|
||||
tokenized_corpus = [clean_tokens(tokens) for tokens in tokenized_corpus]
|
||||
tokenized_query = clean_tokens(tokenized_query)
|
||||
|
||||
bm25 = BM25Okapi(tokenized_corpus)
|
||||
scores = bm25.get_scores(tokenized_query)
|
||||
|
||||
# Adjust scores with tag weights
|
||||
adjusted_candidates = []
|
||||
for score, (index, chunk, tag_type, tag) in zip(scores, candidates):
|
||||
tag_weight = self.priority_tags.get(tag.name, 1.0)
|
||||
adjusted_score = score * tag_weight
|
||||
adjusted_candidates.append((adjusted_score, index, chunk, tag))
|
||||
|
||||
# Filter candidates by threshold
|
||||
selected_candidates = [
|
||||
(index, chunk, tag)
|
||||
for adjusted_score, index, chunk, tag in adjusted_candidates
|
||||
if adjusted_score >= self.bm25_threshold
|
||||
]
|
||||
|
||||
if not selected_candidates:
|
||||
return []
|
||||
|
||||
# Sort selected candidates by original document order
|
||||
selected_candidates.sort(key=lambda x: x[0])
|
||||
|
||||
return [self.clean_element(tag) for _, _, tag in selected_candidates]
|
||||
|
||||
class PruningContentFilter(RelevantContentFilter):
|
||||
"""
|
||||
Content filtering using pruning algorithm with dynamic threshold.
|
||||
|
||||
How it works:
|
||||
1. Extracts page metadata with fallbacks.
|
||||
2. Extracts text chunks from the body element.
|
||||
3. Applies pruning algorithm to calculate scores for each chunk.
|
||||
4. Filters out chunks below the threshold.
|
||||
5. Sorts chunks by score in descending order.
|
||||
6. Returns the top N chunks.
|
||||
|
||||
Attributes:
|
||||
user_query (str): User query for filtering (optional), if not provided, falls back to page metadata.
|
||||
min_word_threshold (int): Minimum word threshold for filtering (optional).
|
||||
threshold_type (str): Threshold type for dynamic threshold (default: 'fixed').
|
||||
threshold (float): Fixed threshold value (default: 0.48).
|
||||
|
||||
Methods:
|
||||
filter_content(self, html: str, min_word_threshold: int = None):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
user_query: str = None,
|
||||
min_word_threshold: int = None,
|
||||
threshold_type: str = "fixed",
|
||||
threshold: float = 0.48,
|
||||
):
|
||||
"""
|
||||
Initializes the PruningContentFilter class, if not provided, falls back to page metadata.
|
||||
|
||||
Note:
|
||||
If no query is given and no page metadata is available, then it tries to pick up the first significant paragraph.
|
||||
|
||||
Args:
|
||||
user_query (str): User query for filtering (optional).
|
||||
min_word_threshold (int): Minimum word threshold for filtering (optional).
|
||||
threshold_type (str): Threshold type for dynamic threshold (default: 'fixed').
|
||||
threshold (float): Fixed threshold value (default: 0.48).
|
||||
"""
|
||||
super().__init__(None)
|
||||
self.min_word_threshold = min_word_threshold
|
||||
self.threshold_type = threshold_type
|
||||
self.threshold = threshold
|
||||
|
||||
# Add tag importance for dynamic threshold
|
||||
self.tag_importance = {
|
||||
"article": 1.5,
|
||||
"main": 1.4,
|
||||
"section": 1.3,
|
||||
"p": 1.2,
|
||||
"h1": 1.4,
|
||||
"h2": 1.3,
|
||||
"h3": 1.2,
|
||||
"div": 0.7,
|
||||
"span": 0.6,
|
||||
}
|
||||
|
||||
# Metric configuration
|
||||
self.metric_config = {
|
||||
"text_density": True,
|
||||
"link_density": True,
|
||||
"tag_weight": True,
|
||||
"class_id_weight": True,
|
||||
"text_length": True,
|
||||
}
|
||||
|
||||
self.metric_weights = {
|
||||
"text_density": 0.4,
|
||||
"link_density": 0.2,
|
||||
"tag_weight": 0.2,
|
||||
"class_id_weight": 0.1,
|
||||
"text_length": 0.1,
|
||||
}
|
||||
|
||||
self.tag_weights = {
|
||||
"div": 0.5,
|
||||
"p": 1.0,
|
||||
"article": 1.5,
|
||||
"section": 1.0,
|
||||
"span": 0.3,
|
||||
"li": 0.5,
|
||||
"ul": 0.5,
|
||||
"ol": 0.5,
|
||||
"h1": 1.2,
|
||||
"h2": 1.1,
|
||||
"h3": 1.0,
|
||||
"h4": 0.9,
|
||||
"h5": 0.8,
|
||||
"h6": 0.7,
|
||||
}
|
||||
|
||||
def filter_content(self, html: str, min_word_threshold: int = None) -> List[str]:
|
||||
"""
|
||||
Implements content filtering using pruning algorithm with dynamic threshold.
|
||||
|
||||
Note:
|
||||
This method implements the filtering logic for the PruningContentFilter class.
|
||||
It takes HTML content as input and returns a list of filtered text chunks.
|
||||
|
||||
Args:
|
||||
html (str): HTML content to be filtered.
|
||||
min_word_threshold (int): Minimum word threshold for filtering (optional).
|
||||
|
||||
Returns:
|
||||
List[str]: List of filtered text chunks.
|
||||
"""
|
||||
if not html or not isinstance(html, str):
|
||||
return []
|
||||
|
||||
soup = BeautifulSoup(html, "lxml")
|
||||
if not soup.body:
|
||||
soup = BeautifulSoup(f"<body>{html}</body>", "lxml")
|
||||
|
||||
# Remove comments and unwanted tags
|
||||
self._remove_comments(soup)
|
||||
self._remove_unwanted_tags(soup)
|
||||
|
||||
# Prune tree starting from body
|
||||
body = soup.find("body")
|
||||
self._prune_tree(body)
|
||||
|
||||
# Extract remaining content as list of HTML strings
|
||||
content_blocks = []
|
||||
for element in body.children:
|
||||
if isinstance(element, str) or not hasattr(element, "name"):
|
||||
continue
|
||||
if len(element.get_text(strip=True)) > 0:
|
||||
content_blocks.append(str(element))
|
||||
|
||||
return content_blocks
|
||||
|
||||
def _remove_comments(self, soup):
|
||||
"""Removes HTML comments"""
|
||||
for element in soup(text=lambda text: isinstance(text, Comment)):
|
||||
element.extract()
|
||||
|
||||
def _remove_unwanted_tags(self, soup):
|
||||
"""Removes unwanted tags"""
|
||||
for tag in self.excluded_tags:
|
||||
for element in soup.find_all(tag):
|
||||
element.decompose()
|
||||
|
||||
def _prune_tree(self, node):
|
||||
"""
|
||||
Prunes the tree starting from the given node.
|
||||
|
||||
Args:
|
||||
node (Tag): The node from which the pruning starts.
|
||||
"""
|
||||
if not node or not hasattr(node, "name") or node.name is None:
|
||||
return
|
||||
|
||||
text_len = len(node.get_text(strip=True))
|
||||
tag_len = len(node.encode_contents().decode("utf-8"))
|
||||
link_text_len = sum(
|
||||
len(s.strip())
|
||||
for s in (a.string for a in node.find_all("a", recursive=False))
|
||||
if s
|
||||
)
|
||||
|
||||
metrics = {
|
||||
"node": node,
|
||||
"tag_name": node.name,
|
||||
"text_len": text_len,
|
||||
"tag_len": tag_len,
|
||||
"link_text_len": link_text_len,
|
||||
}
|
||||
|
||||
score = self._compute_composite_score(metrics, text_len, tag_len, link_text_len)
|
||||
|
||||
if self.threshold_type == "fixed":
|
||||
should_remove = score < self.threshold
|
||||
else: # dynamic
|
||||
tag_importance = self.tag_importance.get(node.name, 0.7)
|
||||
text_ratio = text_len / tag_len if tag_len > 0 else 0
|
||||
link_ratio = link_text_len / text_len if text_len > 0 else 1
|
||||
|
||||
threshold = self.threshold # base threshold
|
||||
if tag_importance > 1:
|
||||
threshold *= 0.8
|
||||
if text_ratio > 0.4:
|
||||
threshold *= 0.9
|
||||
if link_ratio > 0.6:
|
||||
threshold *= 1.2
|
||||
|
||||
should_remove = score < threshold
|
||||
|
||||
if should_remove:
|
||||
node.decompose()
|
||||
else:
|
||||
children = [child for child in node.children if hasattr(child, "name")]
|
||||
for child in children:
|
||||
self._prune_tree(child)
|
||||
|
||||
def _compute_composite_score(self, metrics, text_len, tag_len, link_text_len):
|
||||
"""Computes the composite score"""
|
||||
if self.min_word_threshold:
|
||||
# Get raw text from metrics node - avoid extra processing
|
||||
text = metrics["node"].get_text(strip=True)
|
||||
word_count = text.count(" ") + 1
|
||||
if word_count < self.min_word_threshold:
|
||||
return -1.0 # Guaranteed removal
|
||||
score = 0.0
|
||||
total_weight = 0.0
|
||||
|
||||
if self.metric_config["text_density"]:
|
||||
density = text_len / tag_len if tag_len > 0 else 0
|
||||
score += self.metric_weights["text_density"] * density
|
||||
total_weight += self.metric_weights["text_density"]
|
||||
|
||||
if self.metric_config["link_density"]:
|
||||
density = 1 - (link_text_len / text_len if text_len > 0 else 0)
|
||||
score += self.metric_weights["link_density"] * density
|
||||
total_weight += self.metric_weights["link_density"]
|
||||
|
||||
if self.metric_config["tag_weight"]:
|
||||
tag_score = self.tag_weights.get(metrics["tag_name"], 0.5)
|
||||
score += self.metric_weights["tag_weight"] * tag_score
|
||||
total_weight += self.metric_weights["tag_weight"]
|
||||
|
||||
if self.metric_config["class_id_weight"]:
|
||||
class_score = self._compute_class_id_weight(metrics["node"])
|
||||
score += self.metric_weights["class_id_weight"] * max(0, class_score)
|
||||
total_weight += self.metric_weights["class_id_weight"]
|
||||
|
||||
if self.metric_config["text_length"]:
|
||||
score += self.metric_weights["text_length"] * math.log(text_len + 1)
|
||||
total_weight += self.metric_weights["text_length"]
|
||||
|
||||
return score / total_weight if total_weight > 0 else 0
|
||||
|
||||
def _compute_class_id_weight(self, node):
|
||||
"""Computes the class ID weight"""
|
||||
class_id_score = 0
|
||||
if "class" in node.attrs:
|
||||
classes = " ".join(node["class"])
|
||||
if self.negative_patterns.match(classes):
|
||||
class_id_score -= 0.5
|
||||
if "id" in node.attrs:
|
||||
element_id = node["id"]
|
||||
if self.negative_patterns.match(element_id):
|
||||
class_id_score -= 0.5
|
||||
return class_id_score
|
||||
|
||||
class LLMContentFilter(RelevantContentFilter):
|
||||
"""Content filtering using LLMs to generate relevant markdown."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
instruction: str = None,
|
||||
chunk_token_threshold: int = int(1e9),
|
||||
overlap_rate: float = OVERLAP_RATE,
|
||||
word_token_rate: float = WORD_TOKEN_RATE,
|
||||
base_url: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
extra_args: Dict = None,
|
||||
verbose: bool = False,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
):
|
||||
super().__init__(None)
|
||||
self.provider = provider
|
||||
self.api_token = (
|
||||
api_token
|
||||
or PROVIDER_MODELS.get(provider, "no-token")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
)
|
||||
self.instruction = instruction
|
||||
self.chunk_token_threshold = chunk_token_threshold
|
||||
self.overlap_rate = overlap_rate
|
||||
self.word_token_rate = word_token_rate
|
||||
self.base_url = base_url
|
||||
self.api_base = api_base or base_url
|
||||
self.extra_args = extra_args or {}
|
||||
self.verbose = verbose
|
||||
|
||||
# Setup logger with custom styling for LLM operations
|
||||
if logger:
|
||||
self.logger = logger
|
||||
elif verbose:
|
||||
self.logger = AsyncLogger(
|
||||
verbose=True,
|
||||
icons={
|
||||
**AsyncLogger.DEFAULT_ICONS,
|
||||
"LLM": "★", # Star for LLM operations
|
||||
"CHUNK": "◈", # Diamond for chunks
|
||||
"CACHE": "⚡", # Lightning for cache operations
|
||||
},
|
||||
colors={
|
||||
**AsyncLogger.DEFAULT_COLORS,
|
||||
LogLevel.INFO: Fore.MAGENTA + Style.DIM, # Dimmed purple for LLM ops
|
||||
}
|
||||
)
|
||||
else:
|
||||
self.logger = None
|
||||
|
||||
self.usages = []
|
||||
self.total_usage = TokenUsage()
|
||||
|
||||
def _get_cache_key(self, html: str, instruction: str) -> str:
|
||||
"""Generate a unique cache key based on HTML and instruction"""
|
||||
content = f"{html}{instruction}"
|
||||
return hashlib.md5(content.encode()).hexdigest()
|
||||
|
||||
def _merge_chunks(self, text: str) -> List[str]:
|
||||
"""Split text into chunks with overlap"""
|
||||
# Calculate tokens and sections
|
||||
total_tokens = len(text.split()) * self.word_token_rate
|
||||
num_sections = max(1, math.floor(total_tokens / self.chunk_token_threshold))
|
||||
adjusted_chunk_threshold = total_tokens / num_sections
|
||||
|
||||
# Split into words
|
||||
words = text.split()
|
||||
chunks = []
|
||||
current_chunk = []
|
||||
current_token_count = 0
|
||||
|
||||
for word in words:
|
||||
word_tokens = len(word) * self.word_token_rate
|
||||
if current_token_count + word_tokens <= adjusted_chunk_threshold:
|
||||
current_chunk.append(word)
|
||||
current_token_count += word_tokens
|
||||
else:
|
||||
# Add overlap if not the last chunk
|
||||
if chunks and self.overlap_rate > 0:
|
||||
overlap_size = int(len(current_chunk) * self.overlap_rate)
|
||||
current_chunk.extend(current_chunk[-overlap_size:])
|
||||
|
||||
chunks.append(" ".join(current_chunk))
|
||||
current_chunk = [word]
|
||||
current_token_count = word_tokens
|
||||
|
||||
if current_chunk:
|
||||
chunks.append(" ".join(current_chunk))
|
||||
|
||||
return chunks
|
||||
|
||||
def filter_content(self, html: str, ignore_cache: bool = False) -> List[str]:
|
||||
if not html or not isinstance(html, str):
|
||||
return []
|
||||
|
||||
if self.logger:
|
||||
self.logger.info(
|
||||
"Starting LLM content filtering process",
|
||||
tag="LLM",
|
||||
params={"provider": self.provider},
|
||||
colors={"provider": Fore.CYAN}
|
||||
)
|
||||
|
||||
# Cache handling
|
||||
cache_dir = Path(get_home_folder()) / "llm_cache" / "content_filter"
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
cache_key = self._get_cache_key(html, self.instruction or "")
|
||||
cache_file = cache_dir / f"{cache_key}.json"
|
||||
|
||||
if not ignore_cache and cache_file.exists():
|
||||
if self.logger:
|
||||
self.logger.info("Found cached result", tag="CACHE")
|
||||
try:
|
||||
with cache_file.open('r') as f:
|
||||
cached_data = json.load(f)
|
||||
usage = TokenUsage(**cached_data['usage'])
|
||||
self.usages.append(usage)
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
return cached_data['blocks']
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.error(f"Cache read error: {str(e)}", tag="CACHE")
|
||||
|
||||
# Split into chunks
|
||||
html_chunks = self._merge_chunks(html)
|
||||
if self.logger:
|
||||
self.logger.info(
|
||||
"Split content into {chunk_count} chunks",
|
||||
tag="CHUNK",
|
||||
params={"chunk_count": len(html_chunks)},
|
||||
colors={"chunk_count": Fore.YELLOW}
|
||||
)
|
||||
|
||||
extracted_content = []
|
||||
start_time = time.time()
|
||||
|
||||
# Process chunks in parallel
|
||||
with ThreadPoolExecutor(max_workers=4) as executor:
|
||||
futures = []
|
||||
for i, chunk in enumerate(html_chunks):
|
||||
if self.logger:
|
||||
self.logger.debug(
|
||||
"Processing chunk {chunk_num}/{total_chunks}",
|
||||
tag="CHUNK",
|
||||
params={
|
||||
"chunk_num": i + 1,
|
||||
"total_chunks": len(html_chunks)
|
||||
}
|
||||
)
|
||||
|
||||
prompt_variables = {
|
||||
"HTML": escape_json_string(sanitize_html(chunk)),
|
||||
"REQUEST": self.instruction or "Convert this HTML into clean, relevant markdown, removing any noise or irrelevant content."
|
||||
}
|
||||
|
||||
prompt = PROMPT_FILTER_CONTENT
|
||||
for var, value in prompt_variables.items():
|
||||
prompt = prompt.replace("{" + var + "}", value)
|
||||
|
||||
future = executor.submit(
|
||||
perform_completion_with_backoff,
|
||||
self.provider,
|
||||
prompt,
|
||||
self.api_token,
|
||||
base_url=self.api_base,
|
||||
extra_args=self.extra_args
|
||||
)
|
||||
futures.append((i, future))
|
||||
|
||||
# Collect results in order
|
||||
ordered_results = []
|
||||
for i, future in sorted(futures):
|
||||
try:
|
||||
response = future.result()
|
||||
|
||||
# Track usage
|
||||
usage = TokenUsage(
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
total_tokens=response.usage.total_tokens,
|
||||
completion_tokens_details=response.usage.completion_tokens_details.__dict__
|
||||
if response.usage.completion_tokens_details else {},
|
||||
prompt_tokens_details=response.usage.prompt_tokens_details.__dict__
|
||||
if response.usage.prompt_tokens_details else {},
|
||||
)
|
||||
self.usages.append(usage)
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
|
||||
blocks = extract_xml_data(["content"], response.choices[0].message.content)["content"]
|
||||
if blocks:
|
||||
ordered_results.append(blocks)
|
||||
if self.logger:
|
||||
self.logger.success(
|
||||
"Successfully processed chunk {chunk_num}",
|
||||
tag="CHUNK",
|
||||
params={"chunk_num": i + 1}
|
||||
)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.error(
|
||||
"Error processing chunk {chunk_num}: {error}",
|
||||
tag="CHUNK",
|
||||
params={
|
||||
"chunk_num": i + 1,
|
||||
"error": str(e)
|
||||
}
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
if self.logger:
|
||||
self.logger.success(
|
||||
"Completed processing in {time:.2f}s",
|
||||
tag="LLM",
|
||||
params={"time": end_time - start_time},
|
||||
colors={"time": Fore.YELLOW}
|
||||
)
|
||||
|
||||
result = ordered_results if ordered_results else []
|
||||
|
||||
# Cache the final result
|
||||
cache_data = {
|
||||
'blocks': result,
|
||||
'usage': self.total_usage.__dict__
|
||||
}
|
||||
with cache_file.open('w') as f:
|
||||
json.dump(cache_data, f)
|
||||
if self.logger:
|
||||
self.logger.info("Cached results for future use", tag="CACHE")
|
||||
|
||||
return result
|
||||
|
||||
def show_usage(self) -> None:
|
||||
"""Print usage statistics"""
|
||||
print("\n=== Token Usage Summary ===")
|
||||
print(f"{'Type':<15} {'Count':>12}")
|
||||
print("-" * 30)
|
||||
print(f"{'Completion':<15} {self.total_usage.completion_tokens:>12,}")
|
||||
print(f"{'Prompt':<15} {self.total_usage.prompt_tokens:>12,}")
|
||||
print(f"{'Total':<15} {self.total_usage.total_tokens:>12,}")
|
||||
|
||||
if self.usages:
|
||||
print("\n=== Usage History ===")
|
||||
print(f"{'Request #':<10} {'Completion':>12} {'Prompt':>12} {'Total':>12}")
|
||||
print("-" * 48)
|
||||
for i, usage in enumerate(self.usages, 1):
|
||||
print(
|
||||
f"{i:<10} {usage.completion_tokens:>12,} "
|
||||
f"{usage.prompt_tokens:>12,} {usage.total_tokens:>12,}"
|
||||
)
|
||||
1372
crawl4ai/content_scraping_strategy.py
Normal file
1372
crawl4ai/content_scraping_strategy.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,456 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Any
|
||||
from bs4 import BeautifulSoup
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import asyncio, requests, re, os
|
||||
from .config import *
|
||||
from bs4 import element, NavigableString, Comment
|
||||
from urllib.parse import urljoin
|
||||
from requests.exceptions import InvalidSchema
|
||||
from .content_cleaning_strategy import ContentCleaningStrategy
|
||||
|
||||
from .utils import (
|
||||
sanitize_input_encode,
|
||||
sanitize_html,
|
||||
extract_metadata,
|
||||
InvalidCSSSelectorError,
|
||||
CustomHTML2Text,
|
||||
normalize_url,
|
||||
is_external_url
|
||||
|
||||
)
|
||||
|
||||
class ContentScrappingStrategy(ABC):
|
||||
@abstractmethod
|
||||
def scrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def ascrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
|
||||
pass
|
||||
|
||||
class WebScrappingStrategy(ContentScrappingStrategy):
|
||||
def scrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
|
||||
return self._get_content_of_website_optimized(url, html, is_async=False, **kwargs)
|
||||
|
||||
async def ascrap(self, url: str, html: str, **kwargs) -> Dict[str, Any]:
|
||||
return await asyncio.to_thread(self._get_content_of_website_optimized, url, html, **kwargs)
|
||||
|
||||
def _get_content_of_website_optimized(self, url: str, html: str, word_count_threshold: int = MIN_WORD_THRESHOLD, css_selector: str = None, **kwargs) -> Dict[str, Any]:
|
||||
success = True
|
||||
if not html:
|
||||
return None
|
||||
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
body = soup.body
|
||||
|
||||
|
||||
image_description_min_word_threshold = kwargs.get('image_description_min_word_threshold', IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD)
|
||||
|
||||
for tag in kwargs.get('excluded_tags', []) or []:
|
||||
for el in body.select(tag):
|
||||
el.decompose()
|
||||
|
||||
if css_selector:
|
||||
selected_elements = body.select(css_selector)
|
||||
if not selected_elements:
|
||||
return {
|
||||
'markdown': '',
|
||||
'cleaned_html': '',
|
||||
'success': True,
|
||||
'media': {'images': [], 'videos': [], 'audios': []},
|
||||
'links': {'internal': [], 'external': []},
|
||||
'metadata': {},
|
||||
'message': f"No elements found for CSS selector: {css_selector}"
|
||||
}
|
||||
# raise InvalidCSSSelectorError(f"Invalid CSS selector, No elements found for CSS selector: {css_selector}")
|
||||
body = soup.new_tag('div')
|
||||
for el in selected_elements:
|
||||
body.append(el)
|
||||
|
||||
links = {'internal': [], 'external': []}
|
||||
media = {'images': [], 'videos': [], 'audios': []}
|
||||
internal_links_dict = {}
|
||||
external_links_dict = {}
|
||||
|
||||
# Extract meaningful text for media files from closest parent
|
||||
def find_closest_parent_with_useful_text(tag):
|
||||
current_tag = tag
|
||||
while current_tag:
|
||||
current_tag = current_tag.parent
|
||||
# Get the text content of the parent tag
|
||||
if current_tag:
|
||||
text_content = current_tag.get_text(separator=' ',strip=True)
|
||||
# Check if the text content has at least word_count_threshold
|
||||
if len(text_content.split()) >= image_description_min_word_threshold:
|
||||
return text_content
|
||||
return None
|
||||
|
||||
def process_image(img, url, index, total_images):
|
||||
#Check if an image has valid display and inside undesired html elements
|
||||
def is_valid_image(img, parent, parent_classes):
|
||||
style = img.get('style', '')
|
||||
src = img.get('src', '')
|
||||
classes_to_check = ['button', 'icon', 'logo']
|
||||
tags_to_check = ['button', 'input']
|
||||
return all([
|
||||
'display:none' not in style,
|
||||
src,
|
||||
not any(s in var for var in [src, img.get('alt', ''), *parent_classes] for s in classes_to_check),
|
||||
parent.name not in tags_to_check
|
||||
])
|
||||
|
||||
#Score an image for it's usefulness
|
||||
def score_image_for_usefulness(img, base_url, index, images_count):
|
||||
# Function to parse image height/width value and units
|
||||
def parse_dimension(dimension):
|
||||
if dimension:
|
||||
match = re.match(r"(\d+)(\D*)", dimension)
|
||||
if match:
|
||||
number = int(match.group(1))
|
||||
unit = match.group(2) or 'px' # Default unit is 'px' if not specified
|
||||
return number, unit
|
||||
return None, None
|
||||
|
||||
# Fetch image file metadata to extract size and extension
|
||||
def fetch_image_file_size(img, base_url):
|
||||
#If src is relative path construct full URL, if not it may be CDN URL
|
||||
img_url = urljoin(base_url,img.get('src'))
|
||||
try:
|
||||
response = requests.head(img_url)
|
||||
if response.status_code == 200:
|
||||
return response.headers.get('Content-Length',None)
|
||||
else:
|
||||
print(f"Failed to retrieve file size for {img_url}")
|
||||
return None
|
||||
except InvalidSchema as e:
|
||||
return None
|
||||
finally:
|
||||
return
|
||||
|
||||
image_height = img.get('height')
|
||||
height_value, height_unit = parse_dimension(image_height)
|
||||
image_width = img.get('width')
|
||||
width_value, width_unit = parse_dimension(image_width)
|
||||
image_size = 0 #int(fetch_image_file_size(img,base_url) or 0)
|
||||
image_src = img.get('src','')
|
||||
if "data:image/" in image_src:
|
||||
image_format = image_src.split(',')[0].split(';')[0].split('/')[1]
|
||||
else:
|
||||
image_format = os.path.splitext(img.get('src',''))[1].lower()
|
||||
# Remove . from format
|
||||
image_format = image_format.strip('.').split('?')[0]
|
||||
score = 0
|
||||
if height_value:
|
||||
if height_unit == 'px' and height_value > 150:
|
||||
score += 1
|
||||
if height_unit in ['%','vh','vmin','vmax'] and height_value >30:
|
||||
score += 1
|
||||
if width_value:
|
||||
if width_unit == 'px' and width_value > 150:
|
||||
score += 1
|
||||
if width_unit in ['%','vh','vmin','vmax'] and width_value >30:
|
||||
score += 1
|
||||
if image_size > 10000:
|
||||
score += 1
|
||||
if img.get('alt') != '':
|
||||
score+=1
|
||||
if any(image_format==format for format in ['jpg','png','webp']):
|
||||
score+=1
|
||||
if index/images_count<0.5:
|
||||
score+=1
|
||||
return score
|
||||
|
||||
|
||||
|
||||
if not is_valid_image(img, img.parent, img.parent.get('class', [])):
|
||||
return None
|
||||
score = score_image_for_usefulness(img, url, index, total_images)
|
||||
if score <= IMAGE_SCORE_THRESHOLD:
|
||||
return None
|
||||
return {
|
||||
'src': img.get('src', ''),
|
||||
'data-src': img.get('data-src', ''),
|
||||
'alt': img.get('alt', ''),
|
||||
'desc': find_closest_parent_with_useful_text(img),
|
||||
'score': score,
|
||||
'type': 'image'
|
||||
}
|
||||
|
||||
def remove_unwanted_attributes(element, important_attrs, keep_data_attributes=False):
|
||||
attrs_to_remove = []
|
||||
for attr in element.attrs:
|
||||
if attr not in important_attrs:
|
||||
if keep_data_attributes:
|
||||
if not attr.startswith('data-'):
|
||||
attrs_to_remove.append(attr)
|
||||
else:
|
||||
attrs_to_remove.append(attr)
|
||||
|
||||
for attr in attrs_to_remove:
|
||||
del element[attr]
|
||||
|
||||
def process_element(element: element.PageElement) -> bool:
|
||||
try:
|
||||
if isinstance(element, NavigableString):
|
||||
if isinstance(element, Comment):
|
||||
element.extract()
|
||||
return False
|
||||
|
||||
# if element.name == 'img':
|
||||
# process_image(element, url, 0, 1)
|
||||
# return True
|
||||
|
||||
if element.name in ['script', 'style', 'link', 'meta', 'noscript']:
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
keep_element = False
|
||||
|
||||
exclude_social_media_domains = SOCIAL_MEDIA_DOMAINS + kwargs.get('exclude_social_media_domains', [])
|
||||
exclude_social_media_domains = list(set(exclude_social_media_domains))
|
||||
|
||||
|
||||
try:
|
||||
if element.name == 'a' and element.get('href'):
|
||||
href = element.get('href', '').strip()
|
||||
if not href: # Skip empty hrefs
|
||||
return False
|
||||
|
||||
url_base = url.split('/')[2]
|
||||
|
||||
# Normalize the URL
|
||||
try:
|
||||
normalized_href = normalize_url(href, url)
|
||||
except ValueError as e:
|
||||
# logging.warning(f"Invalid URL format: {href}, Error: {str(e)}")
|
||||
return False
|
||||
|
||||
link_data = {
|
||||
'href': normalized_href,
|
||||
'text': element.get_text().strip(),
|
||||
'title': element.get('title', '').strip()
|
||||
}
|
||||
|
||||
# Check for duplicates and add to appropriate dictionary
|
||||
is_external = is_external_url(normalized_href, url_base)
|
||||
if is_external:
|
||||
if normalized_href not in external_links_dict:
|
||||
external_links_dict[normalized_href] = link_data
|
||||
else:
|
||||
if normalized_href not in internal_links_dict:
|
||||
internal_links_dict[normalized_href] = link_data
|
||||
|
||||
keep_element = True
|
||||
|
||||
# Handle external link exclusions
|
||||
if is_external:
|
||||
if kwargs.get('exclude_external_links', False):
|
||||
element.decompose()
|
||||
return False
|
||||
elif kwargs.get('exclude_social_media_links', False):
|
||||
if any(domain in normalized_href.lower() for domain in exclude_social_media_domains):
|
||||
element.decompose()
|
||||
return False
|
||||
elif kwargs.get('exclude_domains', []):
|
||||
if any(domain in normalized_href.lower() for domain in kwargs.get('exclude_domains', [])):
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"Error processing links: {str(e)}")
|
||||
|
||||
try:
|
||||
if element.name == 'img':
|
||||
potential_sources = ['src', 'data-src', 'srcset' 'data-lazy-src', 'data-original']
|
||||
src = element.get('src', '')
|
||||
while not src and potential_sources:
|
||||
src = element.get(potential_sources.pop(0), '')
|
||||
if not src:
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
# If it is srcset pick up the first image
|
||||
if 'srcset' in element.attrs:
|
||||
src = element.attrs['srcset'].split(',')[0].split(' ')[0]
|
||||
|
||||
# Check flag if we should remove external images
|
||||
if kwargs.get('exclude_external_images', False):
|
||||
src_url_base = src.split('/')[2]
|
||||
url_base = url.split('/')[2]
|
||||
if url_base not in src_url_base:
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
if not kwargs.get('exclude_external_images', False) and kwargs.get('exclude_social_media_links', False):
|
||||
src_url_base = src.split('/')[2]
|
||||
url_base = url.split('/')[2]
|
||||
if any(domain in src for domain in exclude_social_media_domains):
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
# Handle exclude domains
|
||||
if kwargs.get('exclude_domains', []):
|
||||
if any(domain in src for domain in kwargs.get('exclude_domains', [])):
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
return True # Always keep image elements
|
||||
except Exception as e:
|
||||
raise "Error processing images"
|
||||
|
||||
|
||||
# Check if flag to remove all forms is set
|
||||
if kwargs.get('remove_forms', False) and element.name == 'form':
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
if element.name in ['video', 'audio']:
|
||||
media[f"{element.name}s"].append({
|
||||
'src': element.get('src'),
|
||||
'alt': element.get('alt'),
|
||||
'type': element.name,
|
||||
'description': find_closest_parent_with_useful_text(element)
|
||||
})
|
||||
source_tags = element.find_all('source')
|
||||
for source_tag in source_tags:
|
||||
media[f"{element.name}s"].append({
|
||||
'src': source_tag.get('src'),
|
||||
'alt': element.get('alt'),
|
||||
'type': element.name,
|
||||
'description': find_closest_parent_with_useful_text(element)
|
||||
})
|
||||
return True # Always keep video and audio elements
|
||||
|
||||
if element.name in ONLY_TEXT_ELIGIBLE_TAGS:
|
||||
if kwargs.get('only_text', False):
|
||||
element.replace_with(element.get_text())
|
||||
|
||||
try:
|
||||
remove_unwanted_attributes(element, IMPORTANT_ATTRS, kwargs.get('keep_data_attributes', False))
|
||||
except Exception as e:
|
||||
print('Error removing unwanted attributes:', str(e))
|
||||
|
||||
|
||||
# Process children
|
||||
for child in list(element.children):
|
||||
if isinstance(child, NavigableString) and not isinstance(child, Comment):
|
||||
if len(child.strip()) > 0:
|
||||
keep_element = True
|
||||
else:
|
||||
if process_element(child):
|
||||
keep_element = True
|
||||
|
||||
|
||||
# Check word count
|
||||
if not keep_element:
|
||||
word_count = len(element.get_text(strip=True).split())
|
||||
keep_element = word_count >= word_count_threshold
|
||||
|
||||
if not keep_element:
|
||||
element.decompose()
|
||||
|
||||
return keep_element
|
||||
except Exception as e:
|
||||
print('Error processing element:', str(e))
|
||||
return False
|
||||
|
||||
#process images by filtering and extracting contextual text from the page
|
||||
# imgs = body.find_all('img')
|
||||
# media['images'] = [
|
||||
# result for result in
|
||||
# (process_image(img, url, i, len(imgs)) for i, img in enumerate(imgs))
|
||||
# if result is not None
|
||||
# ]
|
||||
|
||||
process_element(body)
|
||||
|
||||
# Update the links dictionary with unique links
|
||||
links['internal'] = list(internal_links_dict.values())
|
||||
links['external'] = list(external_links_dict.values())
|
||||
|
||||
|
||||
# # Process images using ThreadPoolExecutor
|
||||
imgs = body.find_all('img')
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
image_results = list(executor.map(process_image, imgs, [url]*len(imgs), range(len(imgs)), [len(imgs)]*len(imgs)))
|
||||
media['images'] = [result for result in image_results if result is not None]
|
||||
|
||||
def flatten_nested_elements(node):
|
||||
if isinstance(node, NavigableString):
|
||||
return node
|
||||
if len(node.contents) == 1 and isinstance(node.contents[0], element.Tag) and node.contents[0].name == node.name:
|
||||
return flatten_nested_elements(node.contents[0])
|
||||
node.contents = [flatten_nested_elements(child) for child in node.contents]
|
||||
return node
|
||||
|
||||
body = flatten_nested_elements(body)
|
||||
base64_pattern = re.compile(r'data:image/[^;]+;base64,([^"]+)')
|
||||
for img in imgs:
|
||||
src = img.get('src', '')
|
||||
if base64_pattern.match(src):
|
||||
# Replace base64 data with empty string
|
||||
img['src'] = base64_pattern.sub('', src)
|
||||
|
||||
try:
|
||||
str(body)
|
||||
except Exception as e:
|
||||
# Reset body to the original HTML
|
||||
success = False
|
||||
body = BeautifulSoup(html, 'html.parser')
|
||||
|
||||
# Create a new div with a special ID
|
||||
error_div = body.new_tag('div', id='crawl4ai_error_message')
|
||||
error_div.string = '''
|
||||
Crawl4AI Error: This page is not fully supported.
|
||||
|
||||
Possible reasons:
|
||||
1. The page may have restrictions that prevent crawling.
|
||||
2. The page might not be fully loaded.
|
||||
|
||||
Suggestions:
|
||||
- Try calling the crawl function with these parameters:
|
||||
magic=True,
|
||||
- Set headless=False to visualize what's happening on the page.
|
||||
|
||||
If the issue persists, please check the page's structure and any potential anti-crawling measures.
|
||||
'''
|
||||
|
||||
# Append the error div to the body
|
||||
body.body.append(error_div)
|
||||
|
||||
print(f"[LOG] 😧 Error: After processing the crawled HTML and removing irrelevant tags, nothing was left in the page. Check the markdown for further details.")
|
||||
|
||||
|
||||
cleaned_html = str(body).replace('\n\n', '\n').replace(' ', ' ')
|
||||
|
||||
try:
|
||||
h = CustomHTML2Text()
|
||||
h.update_params(**kwargs.get('html2text', {}))
|
||||
markdown = h.handle(cleaned_html)
|
||||
except Exception as e:
|
||||
markdown = h.handle(sanitize_html(cleaned_html))
|
||||
markdown = markdown.replace(' ```', '```')
|
||||
|
||||
try:
|
||||
meta = extract_metadata(html, soup)
|
||||
except Exception as e:
|
||||
print('Error extracting metadata:', str(e))
|
||||
meta = {}
|
||||
|
||||
cleaner = ContentCleaningStrategy()
|
||||
fit_html = cleaner.clean(cleaned_html)
|
||||
fit_markdown = h.handle(fit_html)
|
||||
|
||||
cleaned_html = sanitize_html(cleaned_html)
|
||||
return {
|
||||
'markdown': markdown,
|
||||
'fit_markdown': fit_markdown,
|
||||
'fit_html': fit_html,
|
||||
'cleaned_html': cleaned_html,
|
||||
'success': success,
|
||||
'media': media,
|
||||
'links': links,
|
||||
'metadata': meta
|
||||
}
|
||||
@@ -15,54 +15,53 @@ import logging, time
|
||||
import base64
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from io import BytesIO
|
||||
from typing import List, Callable
|
||||
from typing import Callable
|
||||
import requests
|
||||
import os
|
||||
from pathlib import Path
|
||||
from .utils import *
|
||||
|
||||
logger = logging.getLogger('selenium.webdriver.remote.remote_connection')
|
||||
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
|
||||
logger.setLevel(logging.WARNING)
|
||||
|
||||
logger_driver = logging.getLogger('selenium.webdriver.common.service')
|
||||
logger_driver = logging.getLogger("selenium.webdriver.common.service")
|
||||
logger_driver.setLevel(logging.WARNING)
|
||||
|
||||
urllib3_logger = logging.getLogger('urllib3.connectionpool')
|
||||
urllib3_logger = logging.getLogger("urllib3.connectionpool")
|
||||
urllib3_logger.setLevel(logging.WARNING)
|
||||
|
||||
# Disable http.client logging
|
||||
http_client_logger = logging.getLogger('http.client')
|
||||
http_client_logger = logging.getLogger("http.client")
|
||||
http_client_logger.setLevel(logging.WARNING)
|
||||
|
||||
# Disable driver_finder and service logging
|
||||
driver_finder_logger = logging.getLogger('selenium.webdriver.common.driver_finder')
|
||||
driver_finder_logger = logging.getLogger("selenium.webdriver.common.driver_finder")
|
||||
driver_finder_logger.setLevel(logging.WARNING)
|
||||
|
||||
|
||||
|
||||
|
||||
class CrawlerStrategy(ABC):
|
||||
@abstractmethod
|
||||
def crawl(self, url: str, **kwargs) -> str:
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def take_screenshot(self, save_path: str):
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def update_user_agent(self, user_agent: str):
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
pass
|
||||
|
||||
|
||||
class CloudCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html = False):
|
||||
def __init__(self, use_cached_html=False):
|
||||
super().__init__()
|
||||
self.use_cached_html = use_cached_html
|
||||
|
||||
|
||||
def crawl(self, url: str) -> str:
|
||||
data = {
|
||||
"urls": [url],
|
||||
@@ -76,6 +75,7 @@ class CloudCrawlerStrategy(CrawlerStrategy):
|
||||
html = response["results"][0]["html"]
|
||||
return sanitize_input_encode(html)
|
||||
|
||||
|
||||
class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
|
||||
super().__init__()
|
||||
@@ -87,20 +87,25 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
if kwargs.get("user_agent"):
|
||||
self.options.add_argument("--user-agent=" + kwargs.get("user_agent"))
|
||||
else:
|
||||
user_agent = kwargs.get("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
||||
user_agent = kwargs.get(
|
||||
"user_agent",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
)
|
||||
self.options.add_argument(f"--user-agent={user_agent}")
|
||||
self.options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
||||
|
||||
self.options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
||||
)
|
||||
|
||||
self.options.headless = kwargs.get("headless", True)
|
||||
if self.options.headless:
|
||||
self.options.add_argument("--headless")
|
||||
|
||||
self.options.add_argument("--disable-gpu")
|
||||
|
||||
self.options.add_argument("--disable-gpu")
|
||||
self.options.add_argument("--window-size=1920,1080")
|
||||
self.options.add_argument("--no-sandbox")
|
||||
self.options.add_argument("--disable-dev-shm-usage")
|
||||
self.options.add_argument("--disable-blink-features=AutomationControlled")
|
||||
|
||||
self.options.add_argument("--disable-blink-features=AutomationControlled")
|
||||
|
||||
# self.options.add_argument("--disable-dev-shm-usage")
|
||||
self.options.add_argument("--disable-gpu")
|
||||
# self.options.add_argument("--disable-extensions")
|
||||
@@ -120,48 +125,45 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
self.use_cached_html = use_cached_html
|
||||
self.js_code = js_code
|
||||
self.verbose = kwargs.get("verbose", False)
|
||||
|
||||
|
||||
# Hooks
|
||||
self.hooks = {
|
||||
'on_driver_created': None,
|
||||
'on_user_agent_updated': None,
|
||||
'before_get_url': None,
|
||||
'after_get_url': None,
|
||||
'before_return_html': None
|
||||
"on_driver_created": None,
|
||||
"on_user_agent_updated": None,
|
||||
"before_get_url": None,
|
||||
"after_get_url": None,
|
||||
"before_return_html": None,
|
||||
}
|
||||
|
||||
# chromedriver_autoinstaller.install()
|
||||
# import chromedriver_autoinstaller
|
||||
# crawl4ai_folder = os.path.join(Path.home(), ".crawl4ai")
|
||||
# crawl4ai_folder = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
||||
# driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=self.options)
|
||||
# chromedriver_path = chromedriver_autoinstaller.install()
|
||||
# chromedriver_path = chromedriver_autoinstaller.utils.download_chromedriver()
|
||||
# self.service = Service(chromedriver_autoinstaller.install())
|
||||
|
||||
|
||||
|
||||
# chromedriver_path = ChromeDriverManager().install()
|
||||
# self.service = Service(chromedriver_path)
|
||||
# self.service.log_path = "NUL"
|
||||
# self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||
|
||||
|
||||
# Use selenium-manager (built into Selenium 4.10.0+)
|
||||
self.service = Service()
|
||||
self.driver = webdriver.Chrome(options=self.options)
|
||||
|
||||
self.driver = self.execute_hook('on_driver_created', self.driver)
|
||||
|
||||
|
||||
self.driver = self.execute_hook("on_driver_created", self.driver)
|
||||
|
||||
if kwargs.get("cookies"):
|
||||
for cookie in kwargs.get("cookies"):
|
||||
self.driver.add_cookie(cookie)
|
||||
|
||||
|
||||
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
if hook_type in self.hooks:
|
||||
self.hooks[hook_type] = hook
|
||||
else:
|
||||
raise ValueError(f"Invalid hook type: {hook_type}")
|
||||
|
||||
|
||||
def execute_hook(self, hook_type: str, *args):
|
||||
hook = self.hooks.get(hook_type)
|
||||
if hook:
|
||||
@@ -170,7 +172,9 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
if isinstance(result, webdriver.Chrome):
|
||||
return result
|
||||
else:
|
||||
raise TypeError(f"Hook {hook_type} must return an instance of webdriver.Chrome or None.")
|
||||
raise TypeError(
|
||||
f"Hook {hook_type} must return an instance of webdriver.Chrome or None."
|
||||
)
|
||||
# If the hook returns None or there is no hook, return self.driver
|
||||
return self.driver
|
||||
|
||||
@@ -178,60 +182,77 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
self.options.add_argument(f"user-agent={user_agent}")
|
||||
self.driver.quit()
|
||||
self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||
self.driver = self.execute_hook('on_user_agent_updated', self.driver)
|
||||
self.driver = self.execute_hook("on_user_agent_updated", self.driver)
|
||||
|
||||
def set_custom_headers(self, headers: dict):
|
||||
# Enable Network domain for sending headers
|
||||
self.driver.execute_cdp_cmd('Network.enable', {})
|
||||
self.driver.execute_cdp_cmd("Network.enable", {})
|
||||
# Set extra HTTP headers
|
||||
self.driver.execute_cdp_cmd('Network.setExtraHTTPHeaders', {'headers': headers})
|
||||
self.driver.execute_cdp_cmd("Network.setExtraHTTPHeaders", {"headers": headers})
|
||||
|
||||
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
|
||||
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
|
||||
initial_length = len(self.driver.page_source)
|
||||
|
||||
|
||||
for ix in range(max_checks):
|
||||
# print(f"Checking page load: {ix}")
|
||||
time.sleep(check_interval)
|
||||
current_length = len(self.driver.page_source)
|
||||
|
||||
|
||||
if current_length != initial_length:
|
||||
break
|
||||
|
||||
return self.driver.page_source
|
||||
|
||||
|
||||
def crawl(self, url: str, **kwargs) -> str:
|
||||
# Create md5 hash of the URL
|
||||
import hashlib
|
||||
|
||||
url_hash = hashlib.md5(url.encode()).hexdigest()
|
||||
|
||||
|
||||
if self.use_cached_html:
|
||||
cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url_hash)
|
||||
cache_file_path = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||
".crawl4ai",
|
||||
"cache",
|
||||
url_hash,
|
||||
)
|
||||
if os.path.exists(cache_file_path):
|
||||
with open(cache_file_path, "r") as f:
|
||||
return sanitize_input_encode(f.read())
|
||||
|
||||
try:
|
||||
self.driver = self.execute_hook('before_get_url', self.driver)
|
||||
self.driver = self.execute_hook("before_get_url", self.driver)
|
||||
if self.verbose:
|
||||
print(f"[LOG] 🕸️ Crawling {url} using LocalSeleniumCrawlerStrategy...")
|
||||
self.driver.get(url) #<html><head></head><body></body></html>
|
||||
|
||||
self.driver.get(url) # <html><head></head><body></body></html>
|
||||
|
||||
WebDriverWait(self.driver, 20).until(
|
||||
lambda d: d.execute_script('return document.readyState') == 'complete'
|
||||
lambda d: d.execute_script("return document.readyState") == "complete"
|
||||
)
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
|
||||
)
|
||||
|
||||
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
|
||||
|
||||
self.driver = self.execute_hook('after_get_url', self.driver)
|
||||
html = sanitize_input_encode(self._ensure_page_load()) # self.driver.page_source
|
||||
can_not_be_done_headless = False # Look at my creativity for naming variables
|
||||
|
||||
|
||||
self.driver.execute_script(
|
||||
"window.scrollTo(0, document.body.scrollHeight);"
|
||||
)
|
||||
|
||||
self.driver = self.execute_hook("after_get_url", self.driver)
|
||||
html = sanitize_input_encode(
|
||||
self._ensure_page_load()
|
||||
) # self.driver.page_source
|
||||
can_not_be_done_headless = (
|
||||
False # Look at my creativity for naming variables
|
||||
)
|
||||
|
||||
# TODO: Very ugly approach, but promise to change it!
|
||||
if kwargs.get('bypass_headless', False) or html == "<html><head></head><body></body></html>":
|
||||
print("[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode...")
|
||||
if (
|
||||
kwargs.get("bypass_headless", False)
|
||||
or html == "<html><head></head><body></body></html>"
|
||||
):
|
||||
print(
|
||||
"[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode..."
|
||||
)
|
||||
can_not_be_done_headless = True
|
||||
options = Options()
|
||||
options.headless = False
|
||||
@@ -239,27 +260,31 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
options.add_argument("--window-size=5,5")
|
||||
driver = webdriver.Chrome(service=self.service, options=options)
|
||||
driver.get(url)
|
||||
self.driver = self.execute_hook('after_get_url', driver)
|
||||
self.driver = self.execute_hook("after_get_url", driver)
|
||||
html = sanitize_input_encode(driver.page_source)
|
||||
driver.quit()
|
||||
|
||||
|
||||
# Execute JS code if provided
|
||||
self.js_code = kwargs.get("js_code", self.js_code)
|
||||
if self.js_code and type(self.js_code) == str:
|
||||
self.driver.execute_script(self.js_code)
|
||||
# Optionally, wait for some condition after executing the JS code
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
lambda driver: driver.execute_script("return document.readyState") == "complete"
|
||||
lambda driver: driver.execute_script("return document.readyState")
|
||||
== "complete"
|
||||
)
|
||||
elif self.js_code and type(self.js_code) == list:
|
||||
for js in self.js_code:
|
||||
self.driver.execute_script(js)
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
lambda driver: driver.execute_script("return document.readyState") == "complete"
|
||||
lambda driver: driver.execute_script(
|
||||
"return document.readyState"
|
||||
)
|
||||
== "complete"
|
||||
)
|
||||
|
||||
|
||||
# Optionally, wait for some condition after executing the JS code : Contributed by (https://github.com/jonymusky)
|
||||
wait_for = kwargs.get('wait_for', False)
|
||||
wait_for = kwargs.get("wait_for", False)
|
||||
if wait_for:
|
||||
if callable(wait_for):
|
||||
print("[LOG] 🔄 Waiting for condition...")
|
||||
@@ -268,32 +293,37 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
print("[LOG] 🔄 Waiting for condition...")
|
||||
WebDriverWait(self.driver, 20).until(
|
||||
EC.presence_of_element_located((By.CSS_SELECTOR, wait_for))
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
if not can_not_be_done_headless:
|
||||
html = sanitize_input_encode(self.driver.page_source)
|
||||
self.driver = self.execute_hook('before_return_html', self.driver, html)
|
||||
|
||||
self.driver = self.execute_hook("before_return_html", self.driver, html)
|
||||
|
||||
# Store in cache
|
||||
cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url_hash)
|
||||
cache_file_path = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||
".crawl4ai",
|
||||
"cache",
|
||||
url_hash,
|
||||
)
|
||||
with open(cache_file_path, "w", encoding="utf-8") as f:
|
||||
f.write(html)
|
||||
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] ✅ Crawled {url} successfully!")
|
||||
|
||||
|
||||
return html
|
||||
except InvalidArgumentException:
|
||||
if not hasattr(e, 'msg'):
|
||||
except InvalidArgumentException as e:
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise InvalidArgumentException(f"Failed to crawl {url}: {e.msg}")
|
||||
except WebDriverException as e:
|
||||
# If e does nlt have msg attribute create it and set it to str(e)
|
||||
if not hasattr(e, 'msg'):
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
|
||||
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
|
||||
except Exception as e:
|
||||
if not hasattr(e, 'msg'):
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise Exception(f"Failed to crawl {url}: {e.msg}")
|
||||
|
||||
@@ -301,7 +331,9 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
try:
|
||||
# Get the dimensions of the page
|
||||
total_width = self.driver.execute_script("return document.body.scrollWidth")
|
||||
total_height = self.driver.execute_script("return document.body.scrollHeight")
|
||||
total_height = self.driver.execute_script(
|
||||
"return document.body.scrollHeight"
|
||||
)
|
||||
|
||||
# Set the window size to the dimensions of the page
|
||||
self.driver.set_window_size(total_width, total_height)
|
||||
@@ -313,25 +345,27 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
image = Image.open(BytesIO(screenshot))
|
||||
|
||||
# Convert image to RGB mode (this will handle both RGB and RGBA images)
|
||||
rgb_image = image.convert('RGB')
|
||||
rgb_image = image.convert("RGB")
|
||||
|
||||
# Convert to JPEG and compress
|
||||
buffered = BytesIO()
|
||||
rgb_image.save(buffered, format="JPEG", quality=85)
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] 📸 Screenshot taken and converted to base64")
|
||||
print("[LOG] 📸 Screenshot taken and converted to base64")
|
||||
|
||||
return img_base64
|
||||
except Exception as e:
|
||||
error_message = sanitize_input_encode(f"Failed to take screenshot: {str(e)}")
|
||||
error_message = sanitize_input_encode(
|
||||
f"Failed to take screenshot: {str(e)}"
|
||||
)
|
||||
print(error_message)
|
||||
|
||||
# Generate an image with black background
|
||||
img = Image.new('RGB', (800, 600), color='black')
|
||||
img = Image.new("RGB", (800, 600), color="black")
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
|
||||
# Load a font
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", 40)
|
||||
@@ -345,16 +379,16 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
|
||||
# Calculate text position
|
||||
text_position = (10, 10)
|
||||
|
||||
|
||||
# Draw the text on the image
|
||||
draw.text(text_position, wrapped_text, fill=text_color, font=font)
|
||||
|
||||
|
||||
# Convert to base64
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
|
||||
return img_base64
|
||||
|
||||
|
||||
def quit(self):
|
||||
self.driver.quit()
|
||||
|
||||
@@ -3,15 +3,17 @@ from pathlib import Path
|
||||
import sqlite3
|
||||
from typing import Optional, Tuple
|
||||
|
||||
DB_PATH = os.path.join(Path.home(), ".crawl4ai")
|
||||
DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
||||
os.makedirs(DB_PATH, exist_ok=True)
|
||||
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
|
||||
|
||||
|
||||
def init_db():
|
||||
global DB_PATH
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('''
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT,
|
||||
@@ -24,31 +26,42 @@ def init_db():
|
||||
metadata TEXT DEFAULT "{}",
|
||||
screenshot TEXT DEFAULT ""
|
||||
)
|
||||
''')
|
||||
"""
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
|
||||
def alter_db_add_screenshot(new_column: str = "media"):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
|
||||
cursor.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error altering database to add screenshot column: {e}")
|
||||
|
||||
|
||||
def check_db_path():
|
||||
if not DB_PATH:
|
||||
raise ValueError("Database path is not set or is empty.")
|
||||
|
||||
def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
||||
|
||||
def get_cached_url(
|
||||
url: str,
|
||||
) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?', (url,))
|
||||
cursor.execute(
|
||||
"SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?",
|
||||
(url,),
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result
|
||||
@@ -56,12 +69,25 @@ def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, str, str
|
||||
print(f"Error retrieving cached URL: {e}")
|
||||
return None
|
||||
|
||||
def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool, media : str = "{}", links : str = "{}", metadata : str = "{}", screenshot: str = ""):
|
||||
|
||||
def cache_url(
|
||||
url: str,
|
||||
html: str,
|
||||
cleaned_html: str,
|
||||
markdown: str,
|
||||
extracted_content: str,
|
||||
success: bool,
|
||||
media: str = "{}",
|
||||
links: str = "{}",
|
||||
metadata: str = "{}",
|
||||
screenshot: str = "",
|
||||
):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('''
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
@@ -74,18 +100,32 @@ def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_c
|
||||
links = excluded.links,
|
||||
metadata = excluded.metadata,
|
||||
screenshot = excluded.screenshot
|
||||
''', (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot))
|
||||
""",
|
||||
(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
success,
|
||||
media,
|
||||
links,
|
||||
metadata,
|
||||
screenshot,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error caching URL: {e}")
|
||||
|
||||
|
||||
def get_total_count() -> int:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT COUNT(*) FROM crawled_data')
|
||||
cursor.execute("SELECT COUNT(*) FROM crawled_data")
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result[0]
|
||||
@@ -93,43 +133,48 @@ def get_total_count() -> int:
|
||||
print(f"Error getting total count: {e}")
|
||||
return 0
|
||||
|
||||
|
||||
def clear_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DELETE FROM crawled_data')
|
||||
cursor.execute("DELETE FROM crawled_data")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error clearing database: {e}")
|
||||
|
||||
|
||||
|
||||
def flush_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DROP TABLE crawled_data')
|
||||
cursor.execute("DROP TABLE crawled_data")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error flushing database: {e}")
|
||||
|
||||
|
||||
def update_existing_records(new_column: str = "media", default_value: str = "{}"):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL')
|
||||
cursor.execute(
|
||||
f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL'
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error updating existing records: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Delete the existing database file
|
||||
if os.path.exists(DB_PATH):
|
||||
os.remove(DB_PATH)
|
||||
init_db()
|
||||
init_db()
|
||||
# alter_db_add_screenshot("COL_NAME")
|
||||
|
||||
|
||||
75
crawl4ai/docs_manager.py
Normal file
75
crawl4ai/docs_manager.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import requests
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from crawl4ai.async_logger import AsyncLogger
|
||||
from crawl4ai.llmtxt import AsyncLLMTextManager
|
||||
|
||||
|
||||
class DocsManager:
|
||||
def __init__(self, logger=None):
|
||||
self.docs_dir = Path.home() / ".crawl4ai" / "docs"
|
||||
self.local_docs = Path(__file__).parent.parent / "docs" / "llm.txt"
|
||||
self.docs_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.logger = logger or AsyncLogger(verbose=True)
|
||||
self.llm_text = AsyncLLMTextManager(self.docs_dir, self.logger)
|
||||
|
||||
async def ensure_docs_exist(self):
|
||||
"""Fetch docs if not present"""
|
||||
if not any(self.docs_dir.iterdir()):
|
||||
await self.fetch_docs()
|
||||
|
||||
async def fetch_docs(self) -> bool:
|
||||
"""Copy from local docs or download from GitHub"""
|
||||
try:
|
||||
# Try local first
|
||||
if self.local_docs.exists() and (
|
||||
any(self.local_docs.glob("*.md"))
|
||||
or any(self.local_docs.glob("*.tokens"))
|
||||
):
|
||||
# Empty the local docs directory
|
||||
for file_path in self.docs_dir.glob("*.md"):
|
||||
file_path.unlink()
|
||||
# for file_path in self.docs_dir.glob("*.tokens"):
|
||||
# file_path.unlink()
|
||||
for file_path in self.local_docs.glob("*.md"):
|
||||
shutil.copy2(file_path, self.docs_dir / file_path.name)
|
||||
# for file_path in self.local_docs.glob("*.tokens"):
|
||||
# shutil.copy2(file_path, self.docs_dir / file_path.name)
|
||||
return True
|
||||
|
||||
# Fallback to GitHub
|
||||
response = requests.get(
|
||||
"https://api.github.com/repos/unclecode/crawl4ai/contents/docs/llm.txt",
|
||||
headers={"Accept": "application/vnd.github.v3+json"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
for item in response.json():
|
||||
if item["type"] == "file" and item["name"].endswith(".md"):
|
||||
content = requests.get(item["download_url"]).text
|
||||
with open(self.docs_dir / item["name"], "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to fetch docs: {str(e)}")
|
||||
raise
|
||||
|
||||
def list(self) -> list[str]:
|
||||
"""List available topics"""
|
||||
names = [file_path.stem for file_path in self.docs_dir.glob("*.md")]
|
||||
# Remove [0-9]+_ prefix
|
||||
names = [name.split("_", 1)[1] if name[0].isdigit() else name for name in names]
|
||||
# Exclude those end with .xs.md and .q.md
|
||||
names = [
|
||||
name
|
||||
for name in names
|
||||
if not name.endswith(".xs") and not name.endswith(".q")
|
||||
]
|
||||
return names
|
||||
|
||||
def generate(self, sections, mode="extended"):
|
||||
return self.llm_text.generate(sections, mode)
|
||||
|
||||
def search(self, query: str, top_k: int = 5):
|
||||
return self.llm_text.search(query, top_k)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -54,13 +54,13 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
self.td_count = 0
|
||||
self.table_start = False
|
||||
self.unicode_snob = config.UNICODE_SNOB # covered in cli
|
||||
|
||||
|
||||
self.escape_snob = config.ESCAPE_SNOB # covered in cli
|
||||
self.escape_backslash = config.ESCAPE_BACKSLASH # covered in cli
|
||||
self.escape_dot = config.ESCAPE_DOT # covered in cli
|
||||
self.escape_plus = config.ESCAPE_PLUS # covered in cli
|
||||
self.escape_dash = config.ESCAPE_DASH # covered in cli
|
||||
|
||||
|
||||
self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH
|
||||
self.body_width = bodywidth # covered in cli
|
||||
self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli
|
||||
@@ -144,8 +144,8 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
|
||||
def update_params(self, **kwargs):
|
||||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
setattr(self, key, value)
|
||||
|
||||
def feed(self, data: str) -> None:
|
||||
data = data.replace("</' + 'script>", "</ignore>")
|
||||
super().feed(data)
|
||||
@@ -903,7 +903,13 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
self.empty_link = False
|
||||
|
||||
if not self.code and not self.pre and not entity_char:
|
||||
data = escape_md_section(data, snob=self.escape_snob, escape_dot=self.escape_dot, escape_plus=self.escape_plus, escape_dash=self.escape_dash)
|
||||
data = escape_md_section(
|
||||
data,
|
||||
snob=self.escape_snob,
|
||||
escape_dot=self.escape_dot,
|
||||
escape_plus=self.escape_plus,
|
||||
escape_dash=self.escape_dash,
|
||||
)
|
||||
self.preceding_data = data
|
||||
self.o(data, puredata=True)
|
||||
|
||||
@@ -1013,3 +1019,134 @@ def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) ->
|
||||
h = HTML2Text(baseurl=baseurl, bodywidth=bodywidth)
|
||||
|
||||
return h.handle(html)
|
||||
|
||||
|
||||
class CustomHTML2Text(HTML2Text):
|
||||
def __init__(self, *args, handle_code_in_pre=False, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.inside_pre = False
|
||||
self.inside_code = False
|
||||
self.preserve_tags = set() # Set of tags to preserve
|
||||
self.current_preserved_tag = None
|
||||
self.preserved_content = []
|
||||
self.preserve_depth = 0
|
||||
self.handle_code_in_pre = handle_code_in_pre
|
||||
|
||||
# Configuration options
|
||||
self.skip_internal_links = False
|
||||
self.single_line_break = False
|
||||
self.mark_code = False
|
||||
self.include_sup_sub = False
|
||||
self.body_width = 0
|
||||
self.ignore_mailto_links = True
|
||||
self.ignore_links = False
|
||||
self.escape_backslash = False
|
||||
self.escape_dot = False
|
||||
self.escape_plus = False
|
||||
self.escape_dash = False
|
||||
self.escape_snob = False
|
||||
|
||||
def update_params(self, **kwargs):
|
||||
"""Update parameters and set preserved tags."""
|
||||
for key, value in kwargs.items():
|
||||
if key == "preserve_tags":
|
||||
self.preserve_tags = set(value)
|
||||
elif key == "handle_code_in_pre":
|
||||
self.handle_code_in_pre = value
|
||||
else:
|
||||
setattr(self, key, value)
|
||||
|
||||
def handle_tag(self, tag, attrs, start):
|
||||
# Handle preserved tags
|
||||
if tag in self.preserve_tags:
|
||||
if start:
|
||||
if self.preserve_depth == 0:
|
||||
self.current_preserved_tag = tag
|
||||
self.preserved_content = []
|
||||
# Format opening tag with attributes
|
||||
attr_str = "".join(
|
||||
f' {k}="{v}"' for k, v in attrs.items() if v is not None
|
||||
)
|
||||
self.preserved_content.append(f"<{tag}{attr_str}>")
|
||||
self.preserve_depth += 1
|
||||
return
|
||||
else:
|
||||
self.preserve_depth -= 1
|
||||
if self.preserve_depth == 0:
|
||||
self.preserved_content.append(f"</{tag}>")
|
||||
# Output the preserved HTML block with proper spacing
|
||||
preserved_html = "".join(self.preserved_content)
|
||||
self.o("\n" + preserved_html + "\n")
|
||||
self.current_preserved_tag = None
|
||||
return
|
||||
|
||||
# If we're inside a preserved tag, collect all content
|
||||
if self.preserve_depth > 0:
|
||||
if start:
|
||||
# Format nested tags with attributes
|
||||
attr_str = "".join(
|
||||
f' {k}="{v}"' for k, v in attrs.items() if v is not None
|
||||
)
|
||||
self.preserved_content.append(f"<{tag}{attr_str}>")
|
||||
else:
|
||||
self.preserved_content.append(f"</{tag}>")
|
||||
return
|
||||
|
||||
# Handle pre tags
|
||||
if tag == "pre":
|
||||
if start:
|
||||
self.o("```\n") # Markdown code block start
|
||||
self.inside_pre = True
|
||||
else:
|
||||
self.o("\n```\n") # Markdown code block end
|
||||
self.inside_pre = False
|
||||
elif tag == "code":
|
||||
if self.inside_pre and not self.handle_code_in_pre:
|
||||
# Ignore code tags inside pre blocks if handle_code_in_pre is False
|
||||
return
|
||||
if start:
|
||||
self.o("`") # Markdown inline code start
|
||||
self.inside_code = True
|
||||
else:
|
||||
self.o("`") # Markdown inline code end
|
||||
self.inside_code = False
|
||||
else:
|
||||
super().handle_tag(tag, attrs, start)
|
||||
|
||||
def handle_data(self, data, entity_char=False):
|
||||
"""Override handle_data to capture content within preserved tags."""
|
||||
if self.preserve_depth > 0:
|
||||
self.preserved_content.append(data)
|
||||
return
|
||||
|
||||
if self.inside_pre:
|
||||
# Output the raw content for pre blocks, including content inside code tags
|
||||
self.o(data) # Directly output the data as-is (preserve newlines)
|
||||
return
|
||||
if self.inside_code:
|
||||
# Inline code: no newlines allowed
|
||||
self.o(data.replace("\n", " "))
|
||||
return
|
||||
|
||||
# Default behavior for other tags
|
||||
super().handle_data(data, entity_char)
|
||||
|
||||
# # Handle pre tags
|
||||
# if tag == 'pre':
|
||||
# if start:
|
||||
# self.o('```\n')
|
||||
# self.inside_pre = True
|
||||
# else:
|
||||
# self.o('\n```')
|
||||
# self.inside_pre = False
|
||||
# # elif tag in ["h1", "h2", "h3", "h4", "h5", "h6"]:
|
||||
# # pass
|
||||
# else:
|
||||
# super().handle_tag(tag, attrs, start)
|
||||
|
||||
# def handle_data(self, data, entity_char=False):
|
||||
# """Override handle_data to capture content within preserved tags."""
|
||||
# if self.preserve_depth > 0:
|
||||
# self.preserved_content.append(data)
|
||||
# return
|
||||
# super().handle_data(data, entity_char)
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
class OutCallback:
|
||||
def __call__(self, s: str) -> None: ...
|
||||
def __call__(self, s: str) -> None:
|
||||
...
|
||||
|
||||
@@ -210,7 +210,7 @@ def escape_md_section(
|
||||
snob: bool = False,
|
||||
escape_dot: bool = True,
|
||||
escape_plus: bool = True,
|
||||
escape_dash: bool = True
|
||||
escape_dash: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
Escapes markdown-sensitive characters across whole document sections.
|
||||
@@ -233,6 +233,7 @@ def escape_md_section(
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
|
||||
"""
|
||||
Given the lines of a table
|
||||
|
||||
109
crawl4ai/install.py
Normal file
109
crawl4ai/install.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import subprocess
|
||||
import sys
|
||||
import asyncio
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
|
||||
# Initialize logger
|
||||
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||
|
||||
|
||||
def post_install():
|
||||
"""Run all post-installation tasks"""
|
||||
logger.info("Running post-installation setup...", tag="INIT")
|
||||
install_playwright()
|
||||
run_migration()
|
||||
logger.success("Post-installation setup completed!", tag="COMPLETE")
|
||||
|
||||
|
||||
def install_playwright():
|
||||
logger.info("Installing Playwright browsers...", tag="INIT")
|
||||
try:
|
||||
# subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chrome"])
|
||||
subprocess.check_call(
|
||||
[
|
||||
sys.executable,
|
||||
"-m",
|
||||
"playwright",
|
||||
"install",
|
||||
"--with-deps",
|
||||
"--force",
|
||||
"chromium",
|
||||
]
|
||||
)
|
||||
logger.success(
|
||||
"Playwright installation completed successfully.", tag="COMPLETE"
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
# logger.error(f"Error during Playwright installation: {e}", tag="ERROR")
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||
)
|
||||
except Exception:
|
||||
# logger.error(f"Unexpected error during Playwright installation: {e}", tag="ERROR")
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||
)
|
||||
|
||||
|
||||
def run_migration():
|
||||
"""Initialize database during installation"""
|
||||
try:
|
||||
logger.info("Starting database initialization...", tag="INIT")
|
||||
from crawl4ai.async_database import async_db_manager
|
||||
|
||||
asyncio.run(async_db_manager.initialize())
|
||||
logger.success(
|
||||
"Database initialization completed successfully.", tag="COMPLETE"
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning("Database module not found. Will initialize on first use.")
|
||||
except Exception as e:
|
||||
logger.warning(f"Database initialization failed: {e}")
|
||||
logger.warning("Database will be initialized on first use")
|
||||
|
||||
|
||||
async def run_doctor():
|
||||
"""Test if Crawl4AI is working properly"""
|
||||
logger.info("Running Crawl4AI health check...", tag="INIT")
|
||||
try:
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
)
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
browser_type="chromium",
|
||||
ignore_https_errors=True,
|
||||
light_mode=True,
|
||||
viewport_width=1280,
|
||||
viewport_height=720,
|
||||
)
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
screenshot=True,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
logger.info("Testing crawling capabilities...", tag="TEST")
|
||||
result = await crawler.arun(url="https://crawl4ai.com", config=run_config)
|
||||
|
||||
if result and result.markdown:
|
||||
logger.success("✅ Crawling test passed!", tag="COMPLETE")
|
||||
return True
|
||||
else:
|
||||
raise Exception("Failed to get content")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Test failed: {e}", tag="ERROR")
|
||||
return False
|
||||
|
||||
|
||||
def doctor():
|
||||
"""Entry point for the doctor command"""
|
||||
import asyncio
|
||||
|
||||
return asyncio.run(run_doctor())
|
||||
18
crawl4ai/js_snippet/__init__.py
Normal file
18
crawl4ai/js_snippet/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import os
|
||||
|
||||
|
||||
# Create a function get name of a js script, then load from the CURRENT folder of this script and return its content as string, make sure its error free
|
||||
def load_js_script(script_name):
|
||||
# Get the path of the current script
|
||||
current_script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
# Get the path of the script to load
|
||||
script_path = os.path.join(current_script_path, script_name + ".js")
|
||||
# Check if the script exists
|
||||
if not os.path.exists(script_path):
|
||||
raise ValueError(
|
||||
f"Script {script_name} not found in the folder {current_script_path}"
|
||||
)
|
||||
# Load the content of the script
|
||||
with open(script_path, "r") as f:
|
||||
script_content = f.read()
|
||||
return script_content
|
||||
25
crawl4ai/js_snippet/navigator_overrider.js
Normal file
25
crawl4ai/js_snippet/navigator_overrider.js
Normal file
@@ -0,0 +1,25 @@
|
||||
// Pass the Permissions Test.
|
||||
const originalQuery = window.navigator.permissions.query;
|
||||
window.navigator.permissions.query = (parameters) =>
|
||||
parameters.name === "notifications"
|
||||
? Promise.resolve({ state: Notification.permission })
|
||||
: originalQuery(parameters);
|
||||
Object.defineProperty(navigator, "webdriver", {
|
||||
get: () => undefined,
|
||||
});
|
||||
window.navigator.chrome = {
|
||||
runtime: {},
|
||||
// Add other properties if necessary
|
||||
};
|
||||
Object.defineProperty(navigator, "plugins", {
|
||||
get: () => [1, 2, 3, 4, 5],
|
||||
});
|
||||
Object.defineProperty(navigator, "languages", {
|
||||
get: () => ["en-US", "en"],
|
||||
});
|
||||
Object.defineProperty(document, "hidden", {
|
||||
get: () => false,
|
||||
});
|
||||
Object.defineProperty(document, "visibilityState", {
|
||||
get: () => "visible",
|
||||
});
|
||||
119
crawl4ai/js_snippet/remove_overlay_elements.js
Normal file
119
crawl4ai/js_snippet/remove_overlay_elements.js
Normal file
@@ -0,0 +1,119 @@
|
||||
async () => {
|
||||
// Function to check if element is visible
|
||||
const isVisible = (elem) => {
|
||||
const style = window.getComputedStyle(elem);
|
||||
return style.display !== "none" && style.visibility !== "hidden" && style.opacity !== "0";
|
||||
};
|
||||
|
||||
// Common selectors for popups and overlays
|
||||
const commonSelectors = [
|
||||
// Close buttons first
|
||||
'button[class*="close" i]',
|
||||
'button[class*="dismiss" i]',
|
||||
'button[aria-label*="close" i]',
|
||||
'button[title*="close" i]',
|
||||
'a[class*="close" i]',
|
||||
'span[class*="close" i]',
|
||||
|
||||
// Cookie notices
|
||||
'[class*="cookie-banner" i]',
|
||||
'[id*="cookie-banner" i]',
|
||||
'[class*="cookie-consent" i]',
|
||||
'[id*="cookie-consent" i]',
|
||||
|
||||
// Newsletter/subscription dialogs
|
||||
'[class*="newsletter" i]',
|
||||
'[class*="subscribe" i]',
|
||||
|
||||
// Generic popups/modals
|
||||
'[class*="popup" i]',
|
||||
'[class*="modal" i]',
|
||||
'[class*="overlay" i]',
|
||||
'[class*="dialog" i]',
|
||||
'[role="dialog"]',
|
||||
'[role="alertdialog"]',
|
||||
];
|
||||
|
||||
// Try to click close buttons first
|
||||
for (const selector of commonSelectors.slice(0, 6)) {
|
||||
const closeButtons = document.querySelectorAll(selector);
|
||||
for (const button of closeButtons) {
|
||||
if (isVisible(button)) {
|
||||
try {
|
||||
button.click();
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
} catch (e) {
|
||||
console.log("Error clicking button:", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove remaining overlay elements
|
||||
const removeOverlays = () => {
|
||||
// Find elements with high z-index
|
||||
const allElements = document.querySelectorAll("*");
|
||||
for (const elem of allElements) {
|
||||
const style = window.getComputedStyle(elem);
|
||||
const zIndex = parseInt(style.zIndex);
|
||||
const position = style.position;
|
||||
|
||||
if (
|
||||
isVisible(elem) &&
|
||||
(zIndex > 999 || position === "fixed" || position === "absolute") &&
|
||||
(elem.offsetWidth > window.innerWidth * 0.5 ||
|
||||
elem.offsetHeight > window.innerHeight * 0.5 ||
|
||||
style.backgroundColor.includes("rgba") ||
|
||||
parseFloat(style.opacity) < 1)
|
||||
) {
|
||||
elem.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// Remove elements matching common selectors
|
||||
for (const selector of commonSelectors) {
|
||||
const elements = document.querySelectorAll(selector);
|
||||
elements.forEach((elem) => {
|
||||
if (isVisible(elem)) {
|
||||
elem.remove();
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Remove overlay elements
|
||||
removeOverlays();
|
||||
|
||||
// Remove any fixed/sticky position elements at the top/bottom
|
||||
const removeFixedElements = () => {
|
||||
const elements = document.querySelectorAll("*");
|
||||
elements.forEach((elem) => {
|
||||
const style = window.getComputedStyle(elem);
|
||||
if ((style.position === "fixed" || style.position === "sticky") && isVisible(elem)) {
|
||||
elem.remove();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
removeFixedElements();
|
||||
|
||||
// Remove empty block elements as: div, p, span, etc.
|
||||
const removeEmptyBlockElements = () => {
|
||||
const blockElements = document.querySelectorAll(
|
||||
"div, p, span, section, article, header, footer, aside, nav, main, ul, ol, li, dl, dt, dd, h1, h2, h3, h4, h5, h6"
|
||||
);
|
||||
blockElements.forEach((elem) => {
|
||||
if (elem.innerText.trim() === "") {
|
||||
elem.remove();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Remove margin-right and padding-right from body (often added by modal scripts)
|
||||
document.body.style.marginRight = "0px";
|
||||
document.body.style.paddingRight = "0px";
|
||||
document.body.style.overflow = "auto";
|
||||
|
||||
// Wait a bit for any animations to complete
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
};
|
||||
54
crawl4ai/js_snippet/update_image_dimensions.js
Normal file
54
crawl4ai/js_snippet/update_image_dimensions.js
Normal file
@@ -0,0 +1,54 @@
|
||||
() => {
|
||||
return new Promise((resolve) => {
|
||||
const filterImage = (img) => {
|
||||
// Filter out images that are too small
|
||||
if (img.width < 100 && img.height < 100) return false;
|
||||
|
||||
// Filter out images that are not visible
|
||||
const rect = img.getBoundingClientRect();
|
||||
if (rect.width === 0 || rect.height === 0) return false;
|
||||
|
||||
// Filter out images with certain class names (e.g., icons, thumbnails)
|
||||
if (img.classList.contains("icon") || img.classList.contains("thumbnail")) return false;
|
||||
|
||||
// Filter out images with certain patterns in their src (e.g., placeholder images)
|
||||
if (img.src.includes("placeholder") || img.src.includes("icon")) return false;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
const images = Array.from(document.querySelectorAll("img")).filter(filterImage);
|
||||
let imagesLeft = images.length;
|
||||
|
||||
if (imagesLeft === 0) {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
const checkImage = (img) => {
|
||||
if (img.complete && img.naturalWidth !== 0) {
|
||||
img.setAttribute("width", img.naturalWidth);
|
||||
img.setAttribute("height", img.naturalHeight);
|
||||
imagesLeft--;
|
||||
if (imagesLeft === 0) resolve();
|
||||
}
|
||||
};
|
||||
|
||||
images.forEach((img) => {
|
||||
checkImage(img);
|
||||
if (!img.complete) {
|
||||
img.onload = () => {
|
||||
checkImage(img);
|
||||
};
|
||||
img.onerror = () => {
|
||||
imagesLeft--;
|
||||
if (imagesLeft === 0) resolve();
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// Fallback timeout of 5 seconds
|
||||
// setTimeout(() => resolve(), 5000);
|
||||
resolve();
|
||||
});
|
||||
};
|
||||
546
crawl4ai/llmtxt.py
Normal file
546
crawl4ai/llmtxt.py
Normal file
@@ -0,0 +1,546 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
import time
|
||||
import psutil
|
||||
import numpy as np
|
||||
from rank_bm25 import BM25Okapi
|
||||
from nltk.tokenize import word_tokenize
|
||||
from nltk.corpus import stopwords
|
||||
from nltk.stem import WordNetLemmatizer
|
||||
from litellm import batch_completion
|
||||
from .async_logger import AsyncLogger
|
||||
import litellm
|
||||
import pickle
|
||||
import hashlib # <--- ADDED for file-hash
|
||||
import glob
|
||||
|
||||
litellm.set_verbose = False
|
||||
|
||||
|
||||
def _compute_file_hash(file_path: Path) -> str:
|
||||
"""Compute MD5 hash for the file's entire content."""
|
||||
hash_md5 = hashlib.md5()
|
||||
with file_path.open("rb") as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
hash_md5.update(chunk)
|
||||
return hash_md5.hexdigest()
|
||||
|
||||
|
||||
class AsyncLLMTextManager:
|
||||
def __init__(
|
||||
self,
|
||||
docs_dir: Path,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
max_concurrent_calls: int = 5,
|
||||
batch_size: int = 3,
|
||||
) -> None:
|
||||
self.docs_dir = docs_dir
|
||||
self.logger = logger
|
||||
self.max_concurrent_calls = max_concurrent_calls
|
||||
self.batch_size = batch_size
|
||||
self.bm25_index = None
|
||||
self.document_map: Dict[str, Any] = {}
|
||||
self.tokenized_facts: List[str] = []
|
||||
self.bm25_index_file = self.docs_dir / "bm25_index.pkl"
|
||||
|
||||
async def _process_document_batch(self, doc_batch: List[Path]) -> None:
|
||||
"""Process a batch of documents in parallel"""
|
||||
contents = []
|
||||
for file_path in doc_batch:
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
contents.append(f.read())
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading {file_path}: {str(e)}")
|
||||
contents.append("") # Add empty content to maintain batch alignment
|
||||
|
||||
prompt = """Given a documentation file, generate a list of atomic facts where each fact:
|
||||
1. Represents a single piece of knowledge
|
||||
2. Contains variations in terminology for the same concept
|
||||
3. References relevant code patterns if they exist
|
||||
4. Is written in a way that would match natural language queries
|
||||
|
||||
Each fact should follow this format:
|
||||
<main_concept>: <fact_statement> | <related_terms> | <code_reference>
|
||||
|
||||
Example Facts:
|
||||
browser_config: Configure headless mode and browser type for AsyncWebCrawler | headless, browser_type, chromium, firefox | BrowserConfig(browser_type="chromium", headless=True)
|
||||
redis_connection: Redis client connection requires host and port configuration | redis setup, redis client, connection params | Redis(host='localhost', port=6379, db=0)
|
||||
pandas_filtering: Filter DataFrame rows using boolean conditions | dataframe filter, query, boolean indexing | df[df['column'] > 5]
|
||||
|
||||
Wrap your response in <index>...</index> tags.
|
||||
"""
|
||||
|
||||
# Prepare messages for batch processing
|
||||
messages_list = [
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}",
|
||||
}
|
||||
]
|
||||
for content in contents
|
||||
if content
|
||||
]
|
||||
|
||||
try:
|
||||
responses = batch_completion(
|
||||
model="anthropic/claude-3-5-sonnet-latest",
|
||||
messages=messages_list,
|
||||
logger_fn=None,
|
||||
)
|
||||
|
||||
# Process responses and save index files
|
||||
for response, file_path in zip(responses, doc_batch):
|
||||
try:
|
||||
index_content_match = re.search(
|
||||
r"<index>(.*?)</index>",
|
||||
response.choices[0].message.content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if not index_content_match:
|
||||
self.logger.warning(
|
||||
f"No <index>...</index> content found for {file_path}"
|
||||
)
|
||||
continue
|
||||
|
||||
index_content = re.sub(
|
||||
r"\n\s*\n", "\n", index_content_match.group(1)
|
||||
).strip()
|
||||
if index_content:
|
||||
index_file = file_path.with_suffix(".q.md")
|
||||
with open(index_file, "w", encoding="utf-8") as f:
|
||||
f.write(index_content)
|
||||
self.logger.info(f"Created index file: {index_file}")
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"No index content found in response for {file_path}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Error processing response for {file_path}: {str(e)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in batch completion: {str(e)}")
|
||||
|
||||
def _validate_fact_line(self, line: str) -> Tuple[bool, Optional[str]]:
|
||||
if "|" not in line:
|
||||
return False, "Missing separator '|'"
|
||||
|
||||
parts = [p.strip() for p in line.split("|")]
|
||||
if len(parts) != 3:
|
||||
return False, f"Expected 3 parts, got {len(parts)}"
|
||||
|
||||
concept_part = parts[0]
|
||||
if ":" not in concept_part:
|
||||
return False, "Missing ':' in concept definition"
|
||||
|
||||
return True, None
|
||||
|
||||
def _load_or_create_token_cache(self, fact_file: Path) -> Dict:
|
||||
"""
|
||||
Load token cache from .q.tokens if present and matching file hash.
|
||||
Otherwise return a new structure with updated file-hash.
|
||||
"""
|
||||
cache_file = fact_file.with_suffix(".q.tokens")
|
||||
current_hash = _compute_file_hash(fact_file)
|
||||
|
||||
if cache_file.exists():
|
||||
try:
|
||||
with open(cache_file, "r") as f:
|
||||
cache = json.load(f)
|
||||
# If the hash matches, return it directly
|
||||
if cache.get("content_hash") == current_hash:
|
||||
return cache
|
||||
# Otherwise, we signal that it's changed
|
||||
self.logger.info(f"Hash changed for {fact_file}, reindex needed.")
|
||||
except json.JSONDecodeError:
|
||||
self.logger.warning(f"Corrupt token cache for {fact_file}, rebuilding.")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error reading cache for {fact_file}: {str(e)}")
|
||||
|
||||
# Return a fresh cache
|
||||
return {"facts": {}, "content_hash": current_hash}
|
||||
|
||||
def _save_token_cache(self, fact_file: Path, cache: Dict) -> None:
|
||||
cache_file = fact_file.with_suffix(".q.tokens")
|
||||
# Always ensure we're saving the correct file-hash
|
||||
cache["content_hash"] = _compute_file_hash(fact_file)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(cache, f)
|
||||
|
||||
def preprocess_text(self, text: str) -> List[str]:
|
||||
parts = [x.strip() for x in text.split("|")] if "|" in text else [text]
|
||||
# Remove : after the first word of parts[0]
|
||||
parts[0] = re.sub(r"^(.*?):", r"\1", parts[0])
|
||||
|
||||
lemmatizer = WordNetLemmatizer()
|
||||
stop_words = set(stopwords.words("english")) - {
|
||||
"how",
|
||||
"what",
|
||||
"when",
|
||||
"where",
|
||||
"why",
|
||||
"which",
|
||||
}
|
||||
|
||||
tokens = []
|
||||
for part in parts:
|
||||
if "(" in part and ")" in part:
|
||||
code_tokens = re.findall(
|
||||
r'[\w_]+(?=\()|[\w_]+(?==[\'"]{1}[\w_]+[\'"]{1})', part
|
||||
)
|
||||
tokens.extend(code_tokens)
|
||||
|
||||
words = word_tokenize(part.lower())
|
||||
tokens.extend(
|
||||
[
|
||||
lemmatizer.lemmatize(token)
|
||||
for token in words
|
||||
if token not in stop_words
|
||||
]
|
||||
)
|
||||
|
||||
return tokens
|
||||
|
||||
def maybe_load_bm25_index(self, clear_cache=False) -> bool:
|
||||
"""
|
||||
Load existing BM25 index from disk, if present and clear_cache=False.
|
||||
"""
|
||||
if not clear_cache and os.path.exists(self.bm25_index_file):
|
||||
self.logger.info("Loading existing BM25 index from disk.")
|
||||
with open(self.bm25_index_file, "rb") as f:
|
||||
data = pickle.load(f)
|
||||
self.tokenized_facts = data["tokenized_facts"]
|
||||
self.bm25_index = data["bm25_index"]
|
||||
return True
|
||||
return False
|
||||
|
||||
def build_search_index(self, clear_cache=False) -> None:
|
||||
"""
|
||||
Checks for new or modified .q.md files by comparing file-hash.
|
||||
If none need reindexing and clear_cache is False, loads existing index if available.
|
||||
Otherwise, reindexes only changed/new files and merges or creates a new index.
|
||||
"""
|
||||
# If clear_cache is True, we skip partial logic: rebuild everything from scratch
|
||||
if clear_cache:
|
||||
self.logger.info("Clearing cache and rebuilding full search index.")
|
||||
if self.bm25_index_file.exists():
|
||||
self.bm25_index_file.unlink()
|
||||
|
||||
process = psutil.Process()
|
||||
self.logger.info("Checking which .q.md files need (re)indexing...")
|
||||
|
||||
# Gather all .q.md files
|
||||
q_files = [
|
||||
self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md")
|
||||
]
|
||||
|
||||
# We'll store known (unchanged) facts in these lists
|
||||
existing_facts: List[str] = []
|
||||
existing_tokens: List[List[str]] = []
|
||||
|
||||
# Keep track of invalid lines for logging
|
||||
invalid_lines = []
|
||||
needSet = [] # files that must be (re)indexed
|
||||
|
||||
for qf in q_files:
|
||||
token_cache_file = qf.with_suffix(".q.tokens")
|
||||
|
||||
# If no .q.tokens or clear_cache is True → definitely reindex
|
||||
if clear_cache or not token_cache_file.exists():
|
||||
needSet.append(qf)
|
||||
continue
|
||||
|
||||
# Otherwise, load the existing cache and compare hash
|
||||
cache = self._load_or_create_token_cache(qf)
|
||||
# If the .q.tokens was out of date (i.e. changed hash), we reindex
|
||||
if len(cache["facts"]) == 0 or cache.get(
|
||||
"content_hash"
|
||||
) != _compute_file_hash(qf):
|
||||
needSet.append(qf)
|
||||
else:
|
||||
# File is unchanged → retrieve cached token data
|
||||
for line, cache_data in cache["facts"].items():
|
||||
existing_facts.append(line)
|
||||
existing_tokens.append(cache_data["tokens"])
|
||||
self.document_map[line] = qf # track the doc for that fact
|
||||
|
||||
if not needSet and not clear_cache:
|
||||
# If no file needs reindexing, try loading existing index
|
||||
if self.maybe_load_bm25_index(clear_cache=False):
|
||||
self.logger.info(
|
||||
"No new/changed .q.md files found. Using existing BM25 index."
|
||||
)
|
||||
return
|
||||
else:
|
||||
# If there's no existing index, we must build a fresh index from the old caches
|
||||
self.logger.info(
|
||||
"No existing BM25 index found. Building from cached facts."
|
||||
)
|
||||
if existing_facts:
|
||||
self.logger.info(
|
||||
f"Building BM25 index with {len(existing_facts)} cached facts."
|
||||
)
|
||||
self.bm25_index = BM25Okapi(existing_tokens)
|
||||
self.tokenized_facts = existing_facts
|
||||
with open(self.bm25_index_file, "wb") as f:
|
||||
pickle.dump(
|
||||
{
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts,
|
||||
},
|
||||
f,
|
||||
)
|
||||
else:
|
||||
self.logger.warning("No facts found at all. Index remains empty.")
|
||||
return
|
||||
|
||||
# ----------------------------------------------------- /Users/unclecode/.crawl4ai/docs/14_proxy_security.q.q.tokens '/Users/unclecode/.crawl4ai/docs/14_proxy_security.q.md'
|
||||
# If we reach here, we have new or changed .q.md files
|
||||
# We'll parse them, reindex them, and then combine with existing_facts
|
||||
# -----------------------------------------------------
|
||||
|
||||
self.logger.info(f"{len(needSet)} file(s) need reindexing. Parsing now...")
|
||||
|
||||
# 1) Parse the new or changed .q.md files
|
||||
new_facts = []
|
||||
new_tokens = []
|
||||
with tqdm(total=len(needSet), desc="Indexing changed files") as file_pbar:
|
||||
for file in needSet:
|
||||
# We'll build up a fresh cache
|
||||
fresh_cache = {"facts": {}, "content_hash": _compute_file_hash(file)}
|
||||
try:
|
||||
with open(file, "r", encoding="utf-8") as f_obj:
|
||||
content = f_obj.read().strip()
|
||||
lines = [l.strip() for l in content.split("\n") if l.strip()]
|
||||
|
||||
for line in lines:
|
||||
is_valid, error = self._validate_fact_line(line)
|
||||
if not is_valid:
|
||||
invalid_lines.append((file, line, error))
|
||||
continue
|
||||
|
||||
tokens = self.preprocess_text(line)
|
||||
fresh_cache["facts"][line] = {
|
||||
"tokens": tokens,
|
||||
"added": time.time(),
|
||||
}
|
||||
new_facts.append(line)
|
||||
new_tokens.append(tokens)
|
||||
self.document_map[line] = file
|
||||
|
||||
# Save the new .q.tokens with updated hash
|
||||
self._save_token_cache(file, fresh_cache)
|
||||
|
||||
mem_usage = process.memory_info().rss / 1024 / 1024
|
||||
self.logger.debug(
|
||||
f"Memory usage after {file.name}: {mem_usage:.2f}MB"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error processing {file}: {str(e)}")
|
||||
|
||||
file_pbar.update(1)
|
||||
|
||||
if invalid_lines:
|
||||
self.logger.warning(f"Found {len(invalid_lines)} invalid fact lines:")
|
||||
for file, line, error in invalid_lines:
|
||||
self.logger.warning(f"{file}: {error} in line: {line[:50]}...")
|
||||
|
||||
# 2) Merge newly tokenized facts with the existing ones
|
||||
all_facts = existing_facts + new_facts
|
||||
all_tokens = existing_tokens + new_tokens
|
||||
|
||||
# 3) Build BM25 index from combined facts
|
||||
self.logger.info(
|
||||
f"Building BM25 index with {len(all_facts)} total facts (old + new)."
|
||||
)
|
||||
self.bm25_index = BM25Okapi(all_tokens)
|
||||
self.tokenized_facts = all_facts
|
||||
|
||||
# 4) Save the updated BM25 index to disk
|
||||
with open(self.bm25_index_file, "wb") as f:
|
||||
pickle.dump(
|
||||
{
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts,
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
final_mem = process.memory_info().rss / 1024 / 1024
|
||||
self.logger.info(f"Search index updated. Final memory usage: {final_mem:.2f}MB")
|
||||
|
||||
async def generate_index_files(
|
||||
self, force_generate_facts: bool = False, clear_bm25_cache: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Generate index files for all documents in parallel batches
|
||||
|
||||
Args:
|
||||
force_generate_facts (bool): If True, regenerate indexes even if they exist
|
||||
clear_bm25_cache (bool): If True, clear existing BM25 index cache
|
||||
"""
|
||||
self.logger.info("Starting index generation for documentation files.")
|
||||
|
||||
md_files = [
|
||||
self.docs_dir / f
|
||||
for f in os.listdir(self.docs_dir)
|
||||
if f.endswith(".md") and not any(f.endswith(x) for x in [".q.md", ".xs.md"])
|
||||
]
|
||||
|
||||
# Filter out files that already have .q files unless force=True
|
||||
if not force_generate_facts:
|
||||
md_files = [
|
||||
f
|
||||
for f in md_files
|
||||
if not (self.docs_dir / f.name.replace(".md", ".q.md")).exists()
|
||||
]
|
||||
|
||||
if not md_files:
|
||||
self.logger.info("All index files exist. Use force=True to regenerate.")
|
||||
else:
|
||||
# Process documents in batches
|
||||
for i in range(0, len(md_files), self.batch_size):
|
||||
batch = md_files[i : i + self.batch_size]
|
||||
self.logger.info(
|
||||
f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}"
|
||||
)
|
||||
await self._process_document_batch(batch)
|
||||
|
||||
self.logger.info("Index generation complete, building/updating search index.")
|
||||
self.build_search_index(clear_cache=clear_bm25_cache)
|
||||
|
||||
def generate(self, sections: List[str], mode: str = "extended") -> str:
|
||||
# Get all markdown files
|
||||
all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + glob.glob(
|
||||
str(self.docs_dir / "[0-9]*.xs.md")
|
||||
)
|
||||
|
||||
# Extract base names without extensions
|
||||
base_docs = {
|
||||
Path(f).name.split(".")[0]
|
||||
for f in all_files
|
||||
if not Path(f).name.endswith(".q.md")
|
||||
}
|
||||
|
||||
# Filter by sections if provided
|
||||
if sections:
|
||||
base_docs = {
|
||||
doc
|
||||
for doc in base_docs
|
||||
if any(section.lower() in doc.lower() for section in sections)
|
||||
}
|
||||
|
||||
# Get file paths based on mode
|
||||
files = []
|
||||
for doc in sorted(
|
||||
base_docs,
|
||||
key=lambda x: int(x.split("_")[0]) if x.split("_")[0].isdigit() else 999999,
|
||||
):
|
||||
if mode == "condensed":
|
||||
xs_file = self.docs_dir / f"{doc}.xs.md"
|
||||
regular_file = self.docs_dir / f"{doc}.md"
|
||||
files.append(str(xs_file if xs_file.exists() else regular_file))
|
||||
else:
|
||||
files.append(str(self.docs_dir / f"{doc}.md"))
|
||||
|
||||
# Read and format content
|
||||
content = []
|
||||
for file in files:
|
||||
try:
|
||||
with open(file, "r", encoding="utf-8") as f:
|
||||
fname = Path(file).name
|
||||
content.append(f"{'#'*20}\n# {fname}\n{'#'*20}\n\n{f.read()}")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading {file}: {str(e)}")
|
||||
|
||||
return "\n\n---\n\n".join(content) if content else ""
|
||||
|
||||
def search(self, query: str, top_k: int = 5) -> str:
|
||||
if not self.bm25_index:
|
||||
return "No search index available. Call build_search_index() first."
|
||||
|
||||
query_tokens = self.preprocess_text(query)
|
||||
doc_scores = self.bm25_index.get_scores(query_tokens)
|
||||
|
||||
mean_score = np.mean(doc_scores)
|
||||
std_score = np.std(doc_scores)
|
||||
score_threshold = mean_score + (0.25 * std_score)
|
||||
|
||||
file_data = self._aggregate_search_scores(
|
||||
doc_scores=doc_scores,
|
||||
score_threshold=score_threshold,
|
||||
query_tokens=query_tokens,
|
||||
)
|
||||
|
||||
ranked_files = sorted(
|
||||
file_data.items(),
|
||||
key=lambda x: (
|
||||
x[1]["code_match_score"] * 2.0
|
||||
+ x[1]["match_count"] * 1.5
|
||||
+ x[1]["total_score"]
|
||||
),
|
||||
reverse=True,
|
||||
)[:top_k]
|
||||
|
||||
results = []
|
||||
for file, _ in ranked_files:
|
||||
main_doc = str(file).replace(".q.md", ".md")
|
||||
if os.path.exists(self.docs_dir / main_doc):
|
||||
with open(self.docs_dir / main_doc, "r", encoding="utf-8") as f:
|
||||
only_file_name = main_doc.split("/")[-1]
|
||||
content = ["#" * 20, f"# {only_file_name}", "#" * 20, "", f.read()]
|
||||
results.append("\n".join(content))
|
||||
|
||||
return "\n\n---\n\n".join(results)
|
||||
|
||||
def _aggregate_search_scores(
|
||||
self, doc_scores: List[float], score_threshold: float, query_tokens: List[str]
|
||||
) -> Dict:
|
||||
file_data = {}
|
||||
|
||||
for idx, score in enumerate(doc_scores):
|
||||
if score <= score_threshold:
|
||||
continue
|
||||
|
||||
fact = self.tokenized_facts[idx]
|
||||
file_path = self.document_map[fact]
|
||||
|
||||
if file_path not in file_data:
|
||||
file_data[file_path] = {
|
||||
"total_score": 0,
|
||||
"match_count": 0,
|
||||
"code_match_score": 0,
|
||||
"matched_facts": [],
|
||||
}
|
||||
|
||||
components = fact.split("|") if "|" in fact else [fact]
|
||||
|
||||
code_match_score = 0
|
||||
if len(components) == 3:
|
||||
code_ref = components[2].strip()
|
||||
code_tokens = self.preprocess_text(code_ref)
|
||||
code_match_score = len(set(query_tokens) & set(code_tokens)) / len(
|
||||
query_tokens
|
||||
)
|
||||
|
||||
file_data[file_path]["total_score"] += score
|
||||
file_data[file_path]["match_count"] += 1
|
||||
file_data[file_path]["code_match_score"] = max(
|
||||
file_data[file_path]["code_match_score"], code_match_score
|
||||
)
|
||||
file_data[file_path]["matched_facts"].append(fact)
|
||||
|
||||
return file_data
|
||||
|
||||
def refresh_index(self) -> None:
|
||||
"""Convenience method for a full rebuild."""
|
||||
self.build_search_index(clear_cache=True)
|
||||
253
crawl4ai/markdown_generation_strategy.py
Normal file
253
crawl4ai/markdown_generation_strategy.py
Normal file
@@ -0,0 +1,253 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, Dict, Any, Tuple
|
||||
from .models import MarkdownGenerationResult
|
||||
from .html2text import CustomHTML2Text
|
||||
from .content_filter_strategy import RelevantContentFilter
|
||||
import re
|
||||
from urllib.parse import urljoin
|
||||
|
||||
# Pre-compile the regex pattern
|
||||
LINK_PATTERN = re.compile(r'!?\[([^\]]+)\]\(([^)]+?)(?:\s+"([^"]*)")?\)')
|
||||
|
||||
|
||||
def fast_urljoin(base: str, url: str) -> str:
|
||||
"""Fast URL joining for common cases."""
|
||||
if url.startswith(("http://", "https://", "mailto:", "//")):
|
||||
return url
|
||||
if url.startswith("/"):
|
||||
# Handle absolute paths
|
||||
if base.endswith("/"):
|
||||
return base[:-1] + url
|
||||
return base + url
|
||||
return urljoin(base, url)
|
||||
|
||||
|
||||
class MarkdownGenerationStrategy(ABC):
|
||||
"""Abstract base class for markdown generation strategies."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
self.content_filter = content_filter
|
||||
self.options = options or {}
|
||||
|
||||
@abstractmethod
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""Generate markdown from cleaned HTML."""
|
||||
pass
|
||||
|
||||
|
||||
class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
"""
|
||||
Default implementation of markdown generation strategy.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
Args:
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
super().__init__(content_filter, options)
|
||||
|
||||
def convert_links_to_citations(
|
||||
self, markdown: str, base_url: str = ""
|
||||
) -> Tuple[str, str]:
|
||||
"""
|
||||
Convert links in markdown to citations.
|
||||
|
||||
How it works:
|
||||
1. Find all links in the markdown.
|
||||
2. Convert links to citations.
|
||||
3. Return converted markdown and references markdown.
|
||||
|
||||
Note:
|
||||
This function uses a regex pattern to find links in markdown.
|
||||
|
||||
Args:
|
||||
markdown (str): Markdown text.
|
||||
base_url (str): Base URL for URL joins.
|
||||
|
||||
Returns:
|
||||
Tuple[str, str]: Converted markdown and references markdown.
|
||||
"""
|
||||
link_map = {}
|
||||
url_cache = {} # Cache for URL joins
|
||||
parts = []
|
||||
last_end = 0
|
||||
counter = 1
|
||||
|
||||
for match in LINK_PATTERN.finditer(markdown):
|
||||
parts.append(markdown[last_end : match.start()])
|
||||
text, url, title = match.groups()
|
||||
|
||||
# Use cached URL if available, otherwise compute and cache
|
||||
if base_url and not url.startswith(("http://", "https://", "mailto:")):
|
||||
if url not in url_cache:
|
||||
url_cache[url] = fast_urljoin(base_url, url)
|
||||
url = url_cache[url]
|
||||
|
||||
if url not in link_map:
|
||||
desc = []
|
||||
if title:
|
||||
desc.append(title)
|
||||
if text and text != title:
|
||||
desc.append(text)
|
||||
link_map[url] = (counter, ": " + " - ".join(desc) if desc else "")
|
||||
counter += 1
|
||||
|
||||
num = link_map[url][0]
|
||||
parts.append(
|
||||
f"{text}⟨{num}⟩"
|
||||
if not match.group(0).startswith("!")
|
||||
else f"![{text}⟨{num}⟩]"
|
||||
)
|
||||
last_end = match.end()
|
||||
|
||||
parts.append(markdown[last_end:])
|
||||
converted_text = "".join(parts)
|
||||
|
||||
# Pre-build reference strings
|
||||
references = ["\n\n## References\n\n"]
|
||||
references.extend(
|
||||
f"⟨{num}⟩ {url}{desc}\n"
|
||||
for url, (num, desc) in sorted(link_map.items(), key=lambda x: x[1][0])
|
||||
)
|
||||
|
||||
return converted_text, "".join(references)
|
||||
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""
|
||||
Generate markdown with citations from cleaned HTML.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
Args:
|
||||
cleaned_html (str): Cleaned HTML content.
|
||||
base_url (str): Base URL for URL joins.
|
||||
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
citations (bool): Whether to generate citations.
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
"""
|
||||
try:
|
||||
# Initialize HTML2Text with default options for better conversion
|
||||
h = CustomHTML2Text(baseurl=base_url)
|
||||
default_options = {
|
||||
"body_width": 0, # Disable text wrapping
|
||||
"ignore_emphasis": False,
|
||||
"ignore_links": False,
|
||||
"ignore_images": False,
|
||||
"protect_links": True,
|
||||
"single_line_break": True,
|
||||
"mark_code": True,
|
||||
"escape_snob": False,
|
||||
}
|
||||
|
||||
# Update with custom options if provided
|
||||
if html2text_options:
|
||||
default_options.update(html2text_options)
|
||||
elif options:
|
||||
default_options.update(options)
|
||||
elif self.options:
|
||||
default_options.update(self.options)
|
||||
|
||||
h.update_params(**default_options)
|
||||
|
||||
# Ensure we have valid input
|
||||
if not cleaned_html:
|
||||
cleaned_html = ""
|
||||
elif not isinstance(cleaned_html, str):
|
||||
cleaned_html = str(cleaned_html)
|
||||
|
||||
# Generate raw markdown
|
||||
try:
|
||||
raw_markdown = h.handle(cleaned_html)
|
||||
except Exception as e:
|
||||
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
|
||||
|
||||
raw_markdown = raw_markdown.replace(" ```", "```")
|
||||
|
||||
# Convert links to citations
|
||||
markdown_with_citations: str = raw_markdown
|
||||
references_markdown: str = ""
|
||||
if citations:
|
||||
try:
|
||||
(
|
||||
markdown_with_citations,
|
||||
references_markdown,
|
||||
) = self.convert_links_to_citations(raw_markdown, base_url)
|
||||
except Exception as e:
|
||||
markdown_with_citations = raw_markdown
|
||||
references_markdown = f"Error generating citations: {str(e)}"
|
||||
|
||||
# Generate fit markdown if content filter is provided
|
||||
fit_markdown: Optional[str] = ""
|
||||
filtered_html: Optional[str] = ""
|
||||
if content_filter or self.content_filter:
|
||||
try:
|
||||
content_filter = content_filter or self.content_filter
|
||||
filtered_html = content_filter.filter_content(cleaned_html)
|
||||
filtered_html = "\n".join(
|
||||
"<div>{}</div>".format(s) for s in filtered_html
|
||||
)
|
||||
fit_markdown = h.handle(filtered_html)
|
||||
except Exception as e:
|
||||
fit_markdown = f"Error generating fit markdown: {str(e)}"
|
||||
filtered_html = ""
|
||||
|
||||
return MarkdownGenerationResult(
|
||||
raw_markdown=raw_markdown or "",
|
||||
markdown_with_citations=markdown_with_citations or "",
|
||||
references_markdown=references_markdown or "",
|
||||
fit_markdown=fit_markdown or "",
|
||||
fit_html=filtered_html or "",
|
||||
)
|
||||
except Exception as e:
|
||||
# If anything fails, return empty strings with error message
|
||||
error_msg = f"Error in markdown generation: {str(e)}"
|
||||
return MarkdownGenerationResult(
|
||||
raw_markdown=error_msg,
|
||||
markdown_with_citations=error_msg,
|
||||
references_markdown="",
|
||||
fit_markdown="",
|
||||
fit_html="",
|
||||
)
|
||||
194
crawl4ai/migrations.py
Normal file
194
crawl4ai/migrations.py
Normal file
@@ -0,0 +1,194 @@
|
||||
import os
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
import aiosqlite
|
||||
from typing import Optional
|
||||
import xxhash
|
||||
import aiofiles
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
|
||||
# Initialize logger
|
||||
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
# logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatabaseMigration:
|
||||
def __init__(self, db_path: str):
|
||||
self.db_path = db_path
|
||||
self.content_paths = self._ensure_content_dirs(os.path.dirname(db_path))
|
||||
|
||||
def _ensure_content_dirs(self, base_path: str) -> dict:
|
||||
dirs = {
|
||||
"html": "html_content",
|
||||
"cleaned": "cleaned_html",
|
||||
"markdown": "markdown_content",
|
||||
"extracted": "extracted_content",
|
||||
"screenshots": "screenshots",
|
||||
}
|
||||
content_paths = {}
|
||||
for key, dirname in dirs.items():
|
||||
path = os.path.join(base_path, dirname)
|
||||
os.makedirs(path, exist_ok=True)
|
||||
content_paths[key] = path
|
||||
return content_paths
|
||||
|
||||
def _generate_content_hash(self, content: str) -> str:
|
||||
x = xxhash.xxh64()
|
||||
x.update(content.encode())
|
||||
content_hash = x.hexdigest()
|
||||
return content_hash
|
||||
# return hashlib.sha256(content.encode()).hexdigest()
|
||||
|
||||
async def _store_content(self, content: str, content_type: str) -> str:
|
||||
if not content:
|
||||
return ""
|
||||
|
||||
content_hash = self._generate_content_hash(content)
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||
await f.write(content)
|
||||
|
||||
return content_hash
|
||||
|
||||
async def migrate_database(self):
|
||||
"""Migrate existing database to file-based storage"""
|
||||
# logger.info("Starting database migration...")
|
||||
logger.info("Starting database migration...", tag="INIT")
|
||||
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Get all rows
|
||||
async with db.execute(
|
||||
"""SELECT url, html, cleaned_html, markdown,
|
||||
extracted_content, screenshot FROM crawled_data"""
|
||||
) as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
|
||||
migrated_count = 0
|
||||
for row in rows:
|
||||
(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
screenshot,
|
||||
) = row
|
||||
|
||||
# Store content in files and get hashes
|
||||
html_hash = await self._store_content(html, "html")
|
||||
cleaned_hash = await self._store_content(cleaned_html, "cleaned")
|
||||
markdown_hash = await self._store_content(markdown, "markdown")
|
||||
extracted_hash = await self._store_content(
|
||||
extracted_content, "extracted"
|
||||
)
|
||||
screenshot_hash = await self._store_content(
|
||||
screenshot, "screenshots"
|
||||
)
|
||||
|
||||
# Update database with hashes
|
||||
await db.execute(
|
||||
"""
|
||||
UPDATE crawled_data
|
||||
SET html = ?,
|
||||
cleaned_html = ?,
|
||||
markdown = ?,
|
||||
extracted_content = ?,
|
||||
screenshot = ?
|
||||
WHERE url = ?
|
||||
""",
|
||||
(
|
||||
html_hash,
|
||||
cleaned_hash,
|
||||
markdown_hash,
|
||||
extracted_hash,
|
||||
screenshot_hash,
|
||||
url,
|
||||
),
|
||||
)
|
||||
|
||||
migrated_count += 1
|
||||
if migrated_count % 100 == 0:
|
||||
logger.info(f"Migrated {migrated_count} records...", tag="INIT")
|
||||
|
||||
await db.commit()
|
||||
logger.success(
|
||||
f"Migration completed. {migrated_count} records processed.",
|
||||
tag="COMPLETE",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# logger.error(f"Migration failed: {e}")
|
||||
logger.error(
|
||||
message="Migration failed: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
async def backup_database(db_path: str) -> str:
|
||||
"""Create backup of existing database"""
|
||||
if not os.path.exists(db_path):
|
||||
logger.info("No existing database found. Skipping backup.", tag="INIT")
|
||||
return None
|
||||
|
||||
# Create backup with timestamp
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
backup_path = f"{db_path}.backup_{timestamp}"
|
||||
|
||||
try:
|
||||
# Wait for any potential write operations to finish
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Create backup
|
||||
shutil.copy2(db_path, backup_path)
|
||||
logger.info(f"Database backup created at: {backup_path}", tag="COMPLETE")
|
||||
return backup_path
|
||||
except Exception as e:
|
||||
# logger.error(f"Backup failed: {e}")
|
||||
logger.error(
|
||||
message="Migration failed: {error}", tag="ERROR", params={"error": str(e)}
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
async def run_migration(db_path: Optional[str] = None):
|
||||
"""Run database migration"""
|
||||
if db_path is None:
|
||||
db_path = os.path.join(Path.home(), ".crawl4ai", "crawl4ai.db")
|
||||
|
||||
if not os.path.exists(db_path):
|
||||
logger.info("No existing database found. Skipping migration.", tag="INIT")
|
||||
return
|
||||
|
||||
# Create backup first
|
||||
backup_path = await backup_database(db_path)
|
||||
if not backup_path:
|
||||
return
|
||||
|
||||
migration = DatabaseMigration(db_path)
|
||||
await migration.migrate_database()
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point for migration"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Migrate Crawl4AI database to file-based storage"
|
||||
)
|
||||
parser.add_argument("--db-path", help="Custom database path")
|
||||
args = parser.parse_args()
|
||||
|
||||
asyncio.run(run_migration(args.db_path))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -2,109 +2,125 @@ from functools import lru_cache
|
||||
from pathlib import Path
|
||||
import subprocess, os
|
||||
import shutil
|
||||
import tarfile
|
||||
from .model_loader import *
|
||||
import argparse
|
||||
import urllib.request
|
||||
from crawl4ai.config import MODEL_REPO_BRANCH
|
||||
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_available_memory(device):
|
||||
import torch
|
||||
if device.type == 'cuda':
|
||||
|
||||
if device.type == "cuda":
|
||||
return torch.cuda.get_device_properties(device).total_memory
|
||||
elif device.type == 'mps':
|
||||
return 48 * 1024 ** 3 # Assuming 8GB for MPS, as a conservative estimate
|
||||
elif device.type == "mps":
|
||||
return 48 * 1024**3 # Assuming 8GB for MPS, as a conservative estimate
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def calculate_batch_size(device):
|
||||
available_memory = get_available_memory(device)
|
||||
|
||||
if device.type == 'cpu':
|
||||
|
||||
if device.type == "cpu":
|
||||
return 16
|
||||
elif device.type in ['cuda', 'mps']:
|
||||
elif device.type in ["cuda", "mps"]:
|
||||
# Adjust these thresholds based on your model size and available memory
|
||||
if available_memory >= 31 * 1024 ** 3: # > 32GB
|
||||
if available_memory >= 31 * 1024**3: # > 32GB
|
||||
return 256
|
||||
elif available_memory >= 15 * 1024 ** 3: # > 16GB to 32GB
|
||||
elif available_memory >= 15 * 1024**3: # > 16GB to 32GB
|
||||
return 128
|
||||
elif available_memory >= 8 * 1024 ** 3: # 8GB to 16GB
|
||||
elif available_memory >= 8 * 1024**3: # 8GB to 16GB
|
||||
return 64
|
||||
else:
|
||||
return 32
|
||||
else:
|
||||
return 16 # Default batch size
|
||||
|
||||
return 16 # Default batch size
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_device():
|
||||
import torch
|
||||
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device('cuda')
|
||||
device = torch.device("cuda")
|
||||
elif torch.backends.mps.is_available():
|
||||
device = torch.device('mps')
|
||||
device = torch.device("mps")
|
||||
else:
|
||||
device = torch.device('cpu')
|
||||
return device
|
||||
|
||||
device = torch.device("cpu")
|
||||
return device
|
||||
|
||||
|
||||
def set_model_device(model):
|
||||
device = get_device()
|
||||
model.to(device)
|
||||
model.to(device)
|
||||
return model, device
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_home_folder():
|
||||
home_folder = os.path.join(Path.home(), ".crawl4ai")
|
||||
home_folder = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(home_folder, exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/cache", exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/models", exist_ok=True)
|
||||
return home_folder
|
||||
return home_folder
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_bert_base_uncased():
|
||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', resume_download=None)
|
||||
model = BertModel.from_pretrained('bert-base-uncased', resume_download=None)
|
||||
from transformers import BertTokenizer, BertModel
|
||||
|
||||
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", resume_download=None)
|
||||
model = BertModel.from_pretrained("bert-base-uncased", resume_download=None)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
return tokenizer, model
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_HF_embedding_model(model_name="BAAI/bge-small-en-v1.5") -> tuple:
|
||||
"""Load the Hugging Face model for embedding.
|
||||
|
||||
|
||||
Args:
|
||||
model_name (str, optional): The model name to load. Defaults to "BAAI/bge-small-en-v1.5".
|
||||
|
||||
|
||||
Returns:
|
||||
tuple: The tokenizer and model.
|
||||
"""
|
||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
||||
from transformers import AutoTokenizer, AutoModel
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name, resume_download=None)
|
||||
model = AutoModel.from_pretrained(model_name, resume_download=None)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
return tokenizer, model
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_text_classifier():
|
||||
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
from transformers import pipeline
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
||||
model = AutoModelForSequenceClassification.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||
)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||
)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
||||
return pipe
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_text_multilabel_classifier():
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
import numpy as np
|
||||
from scipy.special import expit
|
||||
import torch
|
||||
|
||||
@@ -116,18 +132,27 @@ def load_text_multilabel_classifier():
|
||||
# else:
|
||||
# device = torch.device("cpu")
|
||||
# # return load_spacy_model(), torch.device("cpu")
|
||||
|
||||
|
||||
MODEL = "cardiffnlp/tweet-topic-21-multi"
|
||||
tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(MODEL, resume_download=None)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
MODEL, resume_download=None
|
||||
)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
class_mapping = model.config.id2label
|
||||
|
||||
def _classifier(texts, threshold=0.5, max_length=64):
|
||||
tokens = tokenizer(texts, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
||||
tokens = {key: val.to(device) for key, val in tokens.items()} # Move tokens to the selected device
|
||||
tokens = tokenizer(
|
||||
texts,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
truncation=True,
|
||||
max_length=max_length,
|
||||
)
|
||||
tokens = {
|
||||
key: val.to(device) for key, val in tokens.items()
|
||||
} # Move tokens to the selected device
|
||||
|
||||
with torch.no_grad():
|
||||
output = model(**tokens)
|
||||
@@ -138,35 +163,41 @@ def load_text_multilabel_classifier():
|
||||
|
||||
batch_labels = []
|
||||
for prediction in predictions:
|
||||
labels = [class_mapping[i] for i, value in enumerate(prediction) if value == 1]
|
||||
labels = [
|
||||
class_mapping[i] for i, value in enumerate(prediction) if value == 1
|
||||
]
|
||||
batch_labels.append(labels)
|
||||
|
||||
return batch_labels
|
||||
|
||||
return _classifier, device
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_nltk_punkt():
|
||||
import nltk
|
||||
|
||||
try:
|
||||
nltk.data.find('tokenizers/punkt')
|
||||
nltk.data.find("tokenizers/punkt")
|
||||
except LookupError:
|
||||
nltk.download('punkt')
|
||||
return nltk.data.find('tokenizers/punkt')
|
||||
nltk.download("punkt")
|
||||
return nltk.data.find("tokenizers/punkt")
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_spacy_model():
|
||||
import spacy
|
||||
|
||||
name = "models/reuters"
|
||||
home_folder = get_home_folder()
|
||||
model_folder = Path(home_folder) / name
|
||||
|
||||
|
||||
# Check if the model directory already exists
|
||||
if not (model_folder.exists() and any(model_folder.iterdir())):
|
||||
repo_url = "https://github.com/unclecode/crawl4ai.git"
|
||||
branch = MODEL_REPO_BRANCH
|
||||
branch = MODEL_REPO_BRANCH
|
||||
repo_folder = Path(home_folder) / "crawl4ai"
|
||||
|
||||
|
||||
print("[LOG] ⏬ Downloading Spacy model for the first time...")
|
||||
|
||||
# Remove existing repo folder if it exists
|
||||
@@ -176,7 +207,9 @@ def load_spacy_model():
|
||||
if model_folder.exists():
|
||||
shutil.rmtree(model_folder)
|
||||
except PermissionError:
|
||||
print("[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:")
|
||||
print(
|
||||
"[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:"
|
||||
)
|
||||
print(f"- {repo_folder}")
|
||||
print(f"- {model_folder}")
|
||||
return None
|
||||
@@ -187,7 +220,7 @@ def load_spacy_model():
|
||||
["git", "clone", "-b", branch, repo_url, str(repo_folder)],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
check=True
|
||||
check=True,
|
||||
)
|
||||
|
||||
# Create the models directory if it doesn't exist
|
||||
@@ -215,6 +248,7 @@ def load_spacy_model():
|
||||
print(f"Error loading spacy model: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def download_all_models(remove_existing=False):
|
||||
"""Download all models required for Crawl4AI."""
|
||||
if remove_existing:
|
||||
@@ -243,14 +277,20 @@ def download_all_models(remove_existing=False):
|
||||
load_nltk_punkt()
|
||||
print("[LOG] ✅ All models downloaded successfully.")
|
||||
|
||||
|
||||
def main():
|
||||
print("[LOG] Welcome to the Crawl4AI Model Downloader!")
|
||||
print("[LOG] This script will download all the models required for Crawl4AI.")
|
||||
parser = argparse.ArgumentParser(description="Crawl4AI Model Downloader")
|
||||
parser.add_argument('--remove-existing', action='store_true', help="Remove existing models before downloading")
|
||||
parser.add_argument(
|
||||
"--remove-existing",
|
||||
action="store_true",
|
||||
help="Remove existing models before downloading",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
download_all_models(remove_existing=args.remove_existing)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,10 +1,100 @@
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
from typing import List, Dict, Optional
|
||||
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from .ssl_certificate import SSLCertificate
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
###############################
|
||||
# Dispatcher Models
|
||||
###############################
|
||||
@dataclass
|
||||
class DomainState:
|
||||
last_request_time: float = 0
|
||||
current_delay: float = 0
|
||||
fail_count: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrawlerTaskResult:
|
||||
task_id: str
|
||||
url: str
|
||||
result: "CrawlResult"
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: datetime
|
||||
end_time: datetime
|
||||
error_message: str = ""
|
||||
|
||||
|
||||
class CrawlStatus(Enum):
|
||||
QUEUED = "QUEUED"
|
||||
IN_PROGRESS = "IN_PROGRESS"
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrawlStats:
|
||||
task_id: str
|
||||
url: str
|
||||
status: CrawlStatus
|
||||
start_time: Optional[datetime] = None
|
||||
end_time: Optional[datetime] = None
|
||||
memory_usage: float = 0.0
|
||||
peak_memory: float = 0.0
|
||||
error_message: str = ""
|
||||
|
||||
@property
|
||||
def duration(self) -> str:
|
||||
if not self.start_time:
|
||||
return "0:00"
|
||||
end = self.end_time or datetime.now()
|
||||
duration = end - self.start_time
|
||||
return str(timedelta(seconds=int(duration.total_seconds())))
|
||||
|
||||
|
||||
class DisplayMode(Enum):
|
||||
DETAILED = "DETAILED"
|
||||
AGGREGATED = "AGGREGATED"
|
||||
|
||||
|
||||
###############################
|
||||
# Crawler Models
|
||||
###############################
|
||||
@dataclass
|
||||
class TokenUsage:
|
||||
completion_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
completion_tokens_details: Optional[dict] = None
|
||||
prompt_tokens_details: Optional[dict] = None
|
||||
|
||||
|
||||
class UrlModel(BaseModel):
|
||||
url: HttpUrl
|
||||
forced: bool = False
|
||||
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
references_markdown: str
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
|
||||
|
||||
class DispatchResult(BaseModel):
|
||||
task_id: str
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: datetime
|
||||
end_time: datetime
|
||||
error_message: str = ""
|
||||
|
||||
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
@@ -12,8 +102,11 @@ class CrawlResult(BaseModel):
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
screenshot: Optional[str] = None
|
||||
markdown: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
markdown: Optional[Union[str, MarkdownGenerationResult]] = None
|
||||
markdown_v2: Optional[MarkdownGenerationResult] = None
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
extracted_content: Optional[str] = None
|
||||
@@ -21,4 +114,69 @@ class CrawlResult(BaseModel):
|
||||
error_message: Optional[str] = None
|
||||
session_id: Optional[str] = None
|
||||
response_headers: Optional[dict] = None
|
||||
status_code: Optional[int] = None
|
||||
status_code: Optional[int] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
|
||||
class AsyncCrawlResponse(BaseModel):
|
||||
html: str
|
||||
response_headers: Dict[str, str]
|
||||
status_code: int
|
||||
screenshot: Optional[str] = None
|
||||
pdf_data: Optional[bytes] = None
|
||||
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
redirected_url: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
|
||||
###############################
|
||||
# Scraping Models
|
||||
###############################
|
||||
class MediaItem(BaseModel):
|
||||
src: Optional[str] = ""
|
||||
alt: Optional[str] = ""
|
||||
desc: Optional[str] = ""
|
||||
score: Optional[int] = 0
|
||||
type: str = "image"
|
||||
group_id: Optional[int] = 0
|
||||
format: Optional[str] = None
|
||||
width: Optional[int] = None
|
||||
|
||||
|
||||
class Link(BaseModel):
|
||||
href: Optional[str] = ""
|
||||
text: Optional[str] = ""
|
||||
title: Optional[str] = ""
|
||||
base_domain: Optional[str] = ""
|
||||
|
||||
|
||||
class Media(BaseModel):
|
||||
images: List[MediaItem] = []
|
||||
videos: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Video model if needed
|
||||
audios: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
|
||||
|
||||
|
||||
class Links(BaseModel):
|
||||
internal: List[Link] = []
|
||||
external: List[Link] = []
|
||||
|
||||
|
||||
class ScrapingResult(BaseModel):
|
||||
cleaned_html: str
|
||||
success: bool
|
||||
media: Media = Media()
|
||||
links: Links = Links()
|
||||
metadata: Dict[str, Any] = {}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
{
|
||||
"_name_or_path": "sentence-transformers/all-MiniLM-L6-v2",
|
||||
"architectures": [
|
||||
"BertModel"
|
||||
],
|
||||
"attention_probs_dropout_prob": 0.1,
|
||||
"classifier_dropout": null,
|
||||
"gradient_checkpointing": false,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout_prob": 0.1,
|
||||
"hidden_size": 384,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 1536,
|
||||
"layer_norm_eps": 1e-12,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "bert",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 6,
|
||||
"pad_token_id": 0,
|
||||
"position_embedding_type": "absolute",
|
||||
"transformers_version": "4.27.4",
|
||||
"type_vocab_size": 2,
|
||||
"use_cache": true,
|
||||
"vocab_size": 30522
|
||||
}
|
||||
Binary file not shown.
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"cls_token": "[CLS]",
|
||||
"mask_token": "[MASK]",
|
||||
"pad_token": "[PAD]",
|
||||
"sep_token": "[SEP]",
|
||||
"unk_token": "[UNK]"
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"cls_token": "[CLS]",
|
||||
"do_basic_tokenize": true,
|
||||
"do_lower_case": true,
|
||||
"mask_token": "[MASK]",
|
||||
"model_max_length": 512,
|
||||
"never_split": null,
|
||||
"pad_token": "[PAD]",
|
||||
"sep_token": "[SEP]",
|
||||
"special_tokens_map_file": "/Users/hammad/.cache/huggingface/hub/models--sentence-transformers--all-MiniLM-L6-v2/snapshots/7dbbc90392e2f80f3d3c277d6e90027e55de9125/special_tokens_map.json",
|
||||
"strip_accents": null,
|
||||
"tokenize_chinese_chars": true,
|
||||
"tokenizer_class": "BertTokenizer",
|
||||
"unk_token": "[UNK]"
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -202,3 +202,808 @@ Avoid Common Mistakes:
|
||||
|
||||
Result
|
||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
|
||||
|
||||
|
||||
PROMPT_FILTER_CONTENT = """Your task is to filter and convert HTML content into clean, focused markdown that's optimized for use with LLMs and information retrieval systems.
|
||||
|
||||
INPUT HTML:
|
||||
<|HTML_CONTENT_START|>
|
||||
{HTML}
|
||||
<|HTML_CONTENT_END|>
|
||||
|
||||
|
||||
SPECIFIC INSTRUCTION:
|
||||
<|USER_INSTRUCTION_START|>
|
||||
{REQUEST}
|
||||
<|USER_INSTRUCTION_END|>
|
||||
|
||||
TASK DETAILS:
|
||||
1. Content Selection
|
||||
- DO: Keep essential information, main content, key details
|
||||
- DO: Preserve hierarchical structure using markdown headers
|
||||
- DO: Keep code blocks, tables, key lists
|
||||
- DON'T: Include navigation menus, ads, footers, cookie notices
|
||||
- DON'T: Keep social media widgets, sidebars, related content
|
||||
|
||||
2. Content Transformation
|
||||
- DO: Use proper markdown syntax (#, ##, **, `, etc)
|
||||
- DO: Convert tables to markdown tables
|
||||
- DO: Preserve code formatting with ```language blocks
|
||||
- DO: Maintain link texts but remove tracking parameters
|
||||
- DON'T: Include HTML tags in output
|
||||
- DON'T: Keep class names, ids, or other HTML attributes
|
||||
|
||||
3. Content Organization
|
||||
- DO: Maintain logical flow of information
|
||||
- DO: Group related content under appropriate headers
|
||||
- DO: Use consistent header levels
|
||||
- DON'T: Fragment related content
|
||||
- DON'T: Duplicate information
|
||||
|
||||
Example Input:
|
||||
<div class="main-content"><h1>Setup Guide</h1><p>Follow these steps...</p></div>
|
||||
<div class="sidebar">Related articles...</div>
|
||||
|
||||
Example Output:
|
||||
# Setup Guide
|
||||
Follow these steps...
|
||||
|
||||
IMPORTANT: If specific instruction is provided above, prioritize those requirements over these general guidelines.
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Wrap your response in <content> tags. Use proper markdown throughout.
|
||||
<content>
|
||||
[Your markdown content here]
|
||||
</content>
|
||||
|
||||
Begin filtering now."""
|
||||
|
||||
JSON_SCHEMA_BUILDER= """
|
||||
# HTML Schema Generation Instructions
|
||||
You are a specialized model designed to analyze HTML patterns and generate extraction schemas. Your primary job is to create structured JSON schemas that can be used to extract data from HTML in a consistent and reliable way. When presented with HTML content, you must analyze its structure and generate a schema that captures all relevant data points.
|
||||
|
||||
## Your Core Responsibilities:
|
||||
1. Analyze HTML structure to identify repeating patterns and important data points
|
||||
2. Generate valid JSON schemas following the specified format
|
||||
3. Create appropriate selectors that will work reliably for data extraction
|
||||
4. Name fields meaningfully based on their content and purpose
|
||||
5. Handle both specific user requests and autonomous pattern detection
|
||||
|
||||
## Available Schema Types You Can Generate:
|
||||
|
||||
<schema_types>
|
||||
1. Basic Single-Level Schema
|
||||
- Use for simple, flat data structures
|
||||
- Example: Product cards, user profiles
|
||||
- Direct field extractions
|
||||
|
||||
2. Nested Object Schema
|
||||
- Use for hierarchical data
|
||||
- Example: Articles with author details
|
||||
- Contains objects within objects
|
||||
|
||||
3. List Schema
|
||||
- Use for repeating elements
|
||||
- Example: Comment sections, product lists
|
||||
- Handles arrays of similar items
|
||||
|
||||
4. Complex Nested Lists
|
||||
- Use for multi-level data
|
||||
- Example: Categories with subcategories
|
||||
- Multiple levels of nesting
|
||||
|
||||
5. Transformation Schema
|
||||
- Use for data requiring processing
|
||||
- Supports regex and text transformations
|
||||
- Special attribute handling
|
||||
</schema_types>
|
||||
|
||||
<schema_structure>
|
||||
Your output must always be a JSON object with this structure:
|
||||
{
|
||||
"name": "Descriptive name of the pattern",
|
||||
"baseSelector": "CSS selector for the repeating element",
|
||||
"fields": [
|
||||
{
|
||||
"name": "field_name",
|
||||
"selector": "CSS selector",
|
||||
"type": "text|attribute|nested|list|regex",
|
||||
"attribute": "attribute_name", // Optional
|
||||
"transform": "transformation_type", // Optional
|
||||
"pattern": "regex_pattern", // Optional
|
||||
"fields": [] // For nested/list types
|
||||
}
|
||||
]
|
||||
}
|
||||
</schema_structure>
|
||||
|
||||
<type_definitions>
|
||||
Available field types:
|
||||
- text: Direct text extraction
|
||||
- attribute: HTML attribute extraction
|
||||
- nested: Object containing other fields
|
||||
- list: Array of similar items
|
||||
- regex: Pattern-based extraction
|
||||
</type_definitions>
|
||||
|
||||
<behavior_rules>
|
||||
1. When given a specific query:
|
||||
- Focus on extracting requested data points
|
||||
- Use most specific selectors possible
|
||||
- Include all fields mentioned in the query
|
||||
|
||||
2. When no query is provided:
|
||||
- Identify main content areas
|
||||
- Extract all meaningful data points
|
||||
- Use semantic structure to determine importance
|
||||
- Include prices, dates, titles, and other common data types
|
||||
|
||||
3. Always:
|
||||
- Use reliable CSS selectors
|
||||
- Handle dynamic class names appropriately
|
||||
- Create descriptive field names
|
||||
- Follow consistent naming conventions
|
||||
</behavior_rules>
|
||||
|
||||
<examples>
|
||||
1. Basic Product Card Example:
|
||||
<html>
|
||||
<div class="product-card" data-cat-id="electronics" data-subcat-id="laptops">
|
||||
<h2 class="product-title">Gaming Laptop</h2>
|
||||
<span class="price">$999.99</span>
|
||||
<img src="laptop.jpg" alt="Gaming Laptop">
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Product Cards",
|
||||
"baseSelector": ".product-card",
|
||||
"baseFields": [
|
||||
{"name": "data_cat_id", "type": "attribute", "attribute": "data-cat-id"},
|
||||
{"name": "data_subcat_id", "type": "attribute", "attribute": "data-subcat-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".product-title",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".price",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "image_url",
|
||||
"selector": "img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2. Article with Author Details Example:
|
||||
<html>
|
||||
<article>
|
||||
<h1>The Future of AI</h1>
|
||||
<div class="author-info">
|
||||
<span class="author-name">Dr. Smith</span>
|
||||
<img src="author.jpg" alt="Dr. Smith">
|
||||
</div>
|
||||
</article>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Article Details",
|
||||
"baseSelector": "article",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h1",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "author",
|
||||
"type": "nested",
|
||||
"selector": ".author-info",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".author-name",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "avatar",
|
||||
"selector": "img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
3. Comments Section Example:
|
||||
<html>
|
||||
<div class="comments-container">
|
||||
<div class="comment" data-user-id="123">
|
||||
<div class="user-name">John123</div>
|
||||
<p class="comment-text">Great article!</p>
|
||||
</div>
|
||||
<div class="comment" data-user-id="456">
|
||||
<div class="user-name">Alice456</div>
|
||||
<p class="comment-text">Thanks for sharing.</p>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Comment Section",
|
||||
"baseSelector": ".comments-container",
|
||||
"baseFields": [
|
||||
{"name": "data_user_id", "type": "attribute", "attribute": "data-user-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "comments",
|
||||
"type": "list",
|
||||
"selector": ".comment",
|
||||
"fields": [
|
||||
{
|
||||
"name": "user",
|
||||
"selector": ".user-name",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "content",
|
||||
"selector": ".comment-text",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
4. E-commerce Categories Example:
|
||||
<html>
|
||||
<div class="category-section" data-category="electronics">
|
||||
<h2>Electronics</h2>
|
||||
<div class="subcategory">
|
||||
<h3>Laptops</h3>
|
||||
<div class="product">
|
||||
<span class="product-name">MacBook Pro</span>
|
||||
<span class="price">$1299</span>
|
||||
</div>
|
||||
<div class="product">
|
||||
<span class="product-name">Dell XPS</span>
|
||||
<span class="price">$999</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "E-commerce Categories",
|
||||
"baseSelector": ".category-section",
|
||||
"baseFields": [
|
||||
{"name": "data_category", "type": "attribute", "attribute": "data-category"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "category_name",
|
||||
"selector": "h2",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "subcategories",
|
||||
"type": "nested_list",
|
||||
"selector": ".subcategory",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": "h3",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "products",
|
||||
"type": "list",
|
||||
"selector": ".product",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".product-name",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".price",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
5. Job Listings with Transformations Example:
|
||||
<html>
|
||||
<div class="job-post">
|
||||
<h3 class="job-title">Senior Developer</h3>
|
||||
<span class="salary-text">Salary: $120,000/year</span>
|
||||
<span class="location"> New York, NY </span>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Job Listings",
|
||||
"baseSelector": ".job-post",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".job-title",
|
||||
"type": "text",
|
||||
"transform": "uppercase"
|
||||
},
|
||||
{
|
||||
"name": "salary",
|
||||
"selector": ".salary-text",
|
||||
"type": "regex",
|
||||
"pattern": "\\$([\\d,]+)"
|
||||
},
|
||||
{
|
||||
"name": "location",
|
||||
"selector": ".location",
|
||||
"type": "text",
|
||||
"transform": "strip"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
6. Skyscanner Place Card Example:
|
||||
<html>
|
||||
<div class="PlaceCard_descriptionContainer__M2NjN" data-testid="description-container">
|
||||
<div class="PlaceCard_nameContainer__ZjZmY" tabindex="0" role="link">
|
||||
<div class="PlaceCard_nameContent__ODUwZ">
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY">Doha</span>
|
||||
</div>
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY PlaceCard_subName__NTVkY">Qatar</span>
|
||||
</div>
|
||||
<span class="PlaceCard_advertLabel__YTM0N">Sunny days and the warmest welcome awaits</span>
|
||||
<a class="BpkLink_bpk-link__MmQwY PlaceCard_descriptionLink__NzYwN" href="/flights/del/doha/" data-testid="flights-link">
|
||||
<div class="PriceDescription_container__NjEzM">
|
||||
<span class="BpkText_bpk-text--heading-5__MTRjZ">₹17,559</span>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Skyscanner Place Cards",
|
||||
"baseSelector": "div[class^='PlaceCard_descriptionContainer__']",
|
||||
"baseFields": [
|
||||
{"name": "data_testid", "type": "attribute", "attribute": "data-testid"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "city_name",
|
||||
"selector": "div[class^='PlaceCard_nameContent__'] .BpkText_bpk-text--heading-4__",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "country_name",
|
||||
"selector": "span[class*='PlaceCard_subName__']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "description",
|
||||
"selector": "span[class*='PlaceCard_advertLabel__']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_price",
|
||||
"selector": "a[data-testid='flights-link'] .BpkText_bpk-text--heading-5__",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_url",
|
||||
"selector": "a[data-testid='flights-link']",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
}
|
||||
]
|
||||
}
|
||||
</examples>
|
||||
|
||||
|
||||
<output_requirements>
|
||||
Your output must:
|
||||
1. Be valid JSON only
|
||||
2. Include no explanatory text
|
||||
3. Follow the exact schema structure provided
|
||||
4. Use appropriate field types
|
||||
5. Include all required fields
|
||||
6. Use valid CSS selectors
|
||||
</output_requirements>
|
||||
|
||||
"""
|
||||
|
||||
JSON_SCHEMA_BUILDER_XPATH = """
|
||||
# HTML Schema Generation Instructions
|
||||
You are a specialized model designed to analyze HTML patterns and generate extraction schemas. Your primary job is to create structured JSON schemas that can be used to extract data from HTML in a consistent and reliable way. When presented with HTML content, you must analyze its structure and generate a schema that captures all relevant data points.
|
||||
|
||||
## Your Core Responsibilities:
|
||||
1. Analyze HTML structure to identify repeating patterns and important data points
|
||||
2. Generate valid JSON schemas following the specified format
|
||||
3. Create appropriate XPath selectors that will work reliably for data extraction
|
||||
4. Name fields meaningfully based on their content and purpose
|
||||
5. Handle both specific user requests and autonomous pattern detection
|
||||
|
||||
## Available Schema Types You Can Generate:
|
||||
|
||||
<schema_types>
|
||||
1. Basic Single-Level Schema
|
||||
- Use for simple, flat data structures
|
||||
- Example: Product cards, user profiles
|
||||
- Direct field extractions
|
||||
|
||||
2. Nested Object Schema
|
||||
- Use for hierarchical data
|
||||
- Example: Articles with author details
|
||||
- Contains objects within objects
|
||||
|
||||
3. List Schema
|
||||
- Use for repeating elements
|
||||
- Example: Comment sections, product lists
|
||||
- Handles arrays of similar items
|
||||
|
||||
4. Complex Nested Lists
|
||||
- Use for multi-level data
|
||||
- Example: Categories with subcategories
|
||||
- Multiple levels of nesting
|
||||
|
||||
5. Transformation Schema
|
||||
- Use for data requiring processing
|
||||
- Supports regex and text transformations
|
||||
- Special attribute handling
|
||||
</schema_types>
|
||||
|
||||
<schema_structure>
|
||||
Your output must always be a JSON object with this structure:
|
||||
{
|
||||
"name": "Descriptive name of the pattern",
|
||||
"baseSelector": "XPath selector for the repeating element",
|
||||
"fields": [
|
||||
{
|
||||
"name": "field_name",
|
||||
"selector": "XPath selector",
|
||||
"type": "text|attribute|nested|list|regex",
|
||||
"attribute": "attribute_name", // Optional
|
||||
"transform": "transformation_type", // Optional
|
||||
"pattern": "regex_pattern", // Optional
|
||||
"fields": [] // For nested/list types
|
||||
}
|
||||
]
|
||||
}
|
||||
</schema_structure>
|
||||
|
||||
<type_definitions>
|
||||
Available field types:
|
||||
- text: Direct text extraction
|
||||
- attribute: HTML attribute extraction
|
||||
- nested: Object containing other fields
|
||||
- list: Array of similar items
|
||||
- regex: Pattern-based extraction
|
||||
</type_definitions>
|
||||
|
||||
<behavior_rules>
|
||||
1. When given a specific query:
|
||||
- Focus on extracting requested data points
|
||||
- Use most specific selectors possible
|
||||
- Include all fields mentioned in the query
|
||||
|
||||
2. When no query is provided:
|
||||
- Identify main content areas
|
||||
- Extract all meaningful data points
|
||||
- Use semantic structure to determine importance
|
||||
- Include prices, dates, titles, and other common data types
|
||||
|
||||
3. Always:
|
||||
- Use reliable XPath selectors
|
||||
- Handle dynamic element IDs appropriately
|
||||
- Create descriptive field names
|
||||
- Follow consistent naming conventions
|
||||
</behavior_rules>
|
||||
|
||||
<examples>
|
||||
1. Basic Product Card Example:
|
||||
<html>
|
||||
<div class="product-card" data-cat-id="electronics" data-subcat-id="laptops">
|
||||
<h2 class="product-title">Gaming Laptop</h2>
|
||||
<span class="price">$999.99</span>
|
||||
<img src="laptop.jpg" alt="Gaming Laptop">
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Product Cards",
|
||||
"baseSelector": "//div[@class='product-card']",
|
||||
"baseFields": [
|
||||
{"name": "data_cat_id", "type": "attribute", "attribute": "data-cat-id"},
|
||||
{"name": "data_subcat_id", "type": "attribute", "attribute": "data-subcat-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".//h2[@class='product-title']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".//span[@class='price']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "image_url",
|
||||
"selector": ".//img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2. Article with Author Details Example:
|
||||
<html>
|
||||
<article>
|
||||
<h1>The Future of AI</h1>
|
||||
<div class="author-info">
|
||||
<span class="author-name">Dr. Smith</span>
|
||||
<img src="author.jpg" alt="Dr. Smith">
|
||||
</div>
|
||||
</article>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Article Details",
|
||||
"baseSelector": "//article",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".//h1",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "author",
|
||||
"type": "nested",
|
||||
"selector": ".//div[@class='author-info']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".//span[@class='author-name']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "avatar",
|
||||
"selector": ".//img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
3. Comments Section Example:
|
||||
<html>
|
||||
<div class="comments-container">
|
||||
<div class="comment" data-user-id="123">
|
||||
<div class="user-name">John123</div>
|
||||
<p class="comment-text">Great article!</p>
|
||||
</div>
|
||||
<div class="comment" data-user-id="456">
|
||||
<div class="user-name">Alice456</div>
|
||||
<p class="comment-text">Thanks for sharing.</p>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Comment Section",
|
||||
"baseSelector": "//div[@class='comments-container']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "comments",
|
||||
"type": "list",
|
||||
"selector": ".//div[@class='comment']",
|
||||
"baseFields": [
|
||||
{"name": "data_user_id", "type": "attribute", "attribute": "data-user-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "user",
|
||||
"selector": ".//div[@class='user-name']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "content",
|
||||
"selector": ".//p[@class='comment-text']",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
4. E-commerce Categories Example:
|
||||
<html>
|
||||
<div class="category-section" data-category="electronics">
|
||||
<h2>Electronics</h2>
|
||||
<div class="subcategory">
|
||||
<h3>Laptops</h3>
|
||||
<div class="product">
|
||||
<span class="product-name">MacBook Pro</span>
|
||||
<span class="price">$1299</span>
|
||||
</div>
|
||||
<div class="product">
|
||||
<span class="product-name">Dell XPS</span>
|
||||
<span class="price">$999</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "E-commerce Categories",
|
||||
"baseSelector": "//div[@class='category-section']",
|
||||
"baseFields": [
|
||||
{"name": "data_category", "type": "attribute", "attribute": "data-category"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "category_name",
|
||||
"selector": ".//h2",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "subcategories",
|
||||
"type": "nested_list",
|
||||
"selector": ".//div[@class='subcategory']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".//h3",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "products",
|
||||
"type": "list",
|
||||
"selector": ".//div[@class='product']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".//span[@class='product-name']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".//span[@class='price']",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
5. Job Listings with Transformations Example:
|
||||
<html>
|
||||
<div class="job-post">
|
||||
<h3 class="job-title">Senior Developer</h3>
|
||||
<span class="salary-text">Salary: $120,000/year</span>
|
||||
<span class="location"> New York, NY </span>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Job Listings",
|
||||
"baseSelector": "//div[@class='job-post']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".//h3[@class='job-title']",
|
||||
"type": "text",
|
||||
"transform": "uppercase"
|
||||
},
|
||||
{
|
||||
"name": "salary",
|
||||
"selector": ".//span[@class='salary-text']",
|
||||
"type": "regex",
|
||||
"pattern": "\\$([\\d,]+)"
|
||||
},
|
||||
{
|
||||
"name": "location",
|
||||
"selector": ".//span[@class='location']",
|
||||
"type": "text",
|
||||
"transform": "strip"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
6. Skyscanner Place Card Example:
|
||||
<html>
|
||||
<div class="PlaceCard_descriptionContainer__M2NjN" data-testid="description-container">
|
||||
<div class="PlaceCard_nameContainer__ZjZmY" tabindex="0" role="link">
|
||||
<div class="PlaceCard_nameContent__ODUwZ">
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY">Doha</span>
|
||||
</div>
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY PlaceCard_subName__NTVkY">Qatar</span>
|
||||
</div>
|
||||
<span class="PlaceCard_advertLabel__YTM0N">Sunny days and the warmest welcome awaits</span>
|
||||
<a class="BpkLink_bpk-link__MmQwY PlaceCard_descriptionLink__NzYwN" href="/flights/del/doha/" data-testid="flights-link">
|
||||
<div class="PriceDescription_container__NjEzM">
|
||||
<span class="BpkText_bpk-text--heading-5__MTRjZ">₹17,559</span>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Skyscanner Place Cards",
|
||||
"baseSelector": "//div[contains(@class, 'PlaceCard_descriptionContainer__')]",
|
||||
"baseFields": [
|
||||
{"name": "data_testid", "type": "attribute", "attribute": "data-testid"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "city_name",
|
||||
"selector": ".//div[contains(@class, 'PlaceCard_nameContent__')]//span[contains(@class, 'BpkText_bpk-text--heading-4__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "country_name",
|
||||
"selector": ".//span[contains(@class, 'PlaceCard_subName__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "description",
|
||||
"selector": ".//span[contains(@class, 'PlaceCard_advertLabel__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_price",
|
||||
"selector": ".//a[@data-testid='flights-link']//span[contains(@class, 'BpkText_bpk-text--heading-5__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_url",
|
||||
"selector": ".//a[@data-testid='flights-link']",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
}
|
||||
]
|
||||
}
|
||||
</examples>
|
||||
|
||||
<output_requirements>
|
||||
Your output must:
|
||||
1. Be valid JSON only
|
||||
2. Include no explanatory text
|
||||
3. Follow the exact schema structure provided
|
||||
4. Use appropriate field types
|
||||
5. Include all required fields
|
||||
6. Use valid XPath selectors
|
||||
</output_requirements>
|
||||
"""
|
||||
184
crawl4ai/ssl_certificate.py
Normal file
184
crawl4ai/ssl_certificate.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""SSL Certificate class for handling certificate operations."""
|
||||
|
||||
import ssl
|
||||
import socket
|
||||
import base64
|
||||
import json
|
||||
from typing import Dict, Any, Optional
|
||||
from urllib.parse import urlparse
|
||||
import OpenSSL.crypto
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class SSLCertificate:
|
||||
"""
|
||||
A class representing an SSL certificate with methods to export in various formats.
|
||||
|
||||
Attributes:
|
||||
cert_info (Dict[str, Any]): The certificate information.
|
||||
|
||||
Methods:
|
||||
from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']: Create SSLCertificate instance from a URL.
|
||||
from_file(file_path: str) -> Optional['SSLCertificate']: Create SSLCertificate instance from a file.
|
||||
from_binary(binary_data: bytes) -> Optional['SSLCertificate']: Create SSLCertificate instance from binary data.
|
||||
export_as_pem() -> str: Export the certificate as PEM format.
|
||||
export_as_der() -> bytes: Export the certificate as DER format.
|
||||
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
|
||||
export_as_text() -> str: Export the certificate as text format.
|
||||
"""
|
||||
|
||||
def __init__(self, cert_info: Dict[str, Any]):
|
||||
self._cert_info = self._decode_cert_data(cert_info)
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL.
|
||||
|
||||
Args:
|
||||
url (str): URL of the website.
|
||||
timeout (int): Timeout for the connection (default: 10).
|
||||
|
||||
Returns:
|
||||
Optional[SSLCertificate]: SSLCertificate instance if successful, None otherwise.
|
||||
"""
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
|
||||
context = ssl.create_default_context()
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
|
||||
cert_info = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(),
|
||||
"not_after": x509.get_notAfter(),
|
||||
"fingerprint": x509.digest("sha256").hex(),
|
||||
"signature_algorithm": x509.get_signature_algorithm(),
|
||||
"raw_cert": base64.b64encode(cert_binary),
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info["extensions"] = extensions
|
||||
|
||||
return SSLCertificate(cert_info)
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _decode_cert_data(data: Any) -> Any:
|
||||
"""Helper method to decode bytes in certificate data."""
|
||||
if isinstance(data, bytes):
|
||||
return data.decode("utf-8")
|
||||
elif isinstance(data, dict):
|
||||
return {
|
||||
(
|
||||
k.decode("utf-8") if isinstance(k, bytes) else k
|
||||
): SSLCertificate._decode_cert_data(v)
|
||||
for k, v in data.items()
|
||||
}
|
||||
elif isinstance(data, list):
|
||||
return [SSLCertificate._decode_cert_data(item) for item in data]
|
||||
return data
|
||||
|
||||
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as JSON.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the JSON file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: JSON string if successful, None otherwise.
|
||||
"""
|
||||
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
|
||||
if filepath:
|
||||
Path(filepath).write_text(json_str, encoding="utf-8")
|
||||
return None
|
||||
return json_str
|
||||
|
||||
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as PEM.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the PEM file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: PEM string if successful, None otherwise.
|
||||
"""
|
||||
try:
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1,
|
||||
base64.b64decode(self._cert_info["raw_cert"]),
|
||||
)
|
||||
pem_data = OpenSSL.crypto.dump_certificate(
|
||||
OpenSSL.crypto.FILETYPE_PEM, x509
|
||||
).decode("utf-8")
|
||||
|
||||
if filepath:
|
||||
Path(filepath).write_text(pem_data, encoding="utf-8")
|
||||
return None
|
||||
return pem_data
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
||||
"""
|
||||
Export certificate as DER.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the DER file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[bytes]: DER bytes if successful, None otherwise.
|
||||
"""
|
||||
try:
|
||||
der_data = base64.b64decode(self._cert_info["raw_cert"])
|
||||
if filepath:
|
||||
Path(filepath).write_bytes(der_data)
|
||||
return None
|
||||
return der_data
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
"""Get certificate issuer information."""
|
||||
return self._cert_info.get("issuer", {})
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
"""Get certificate subject information."""
|
||||
return self._cert_info.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
"""Get certificate validity start date."""
|
||||
return self._cert_info.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
"""Get certificate validity end date."""
|
||||
return self._cert_info.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
"""Get certificate fingerprint."""
|
||||
return self._cert_info.get("fingerprint", "")
|
||||
@@ -1,146 +0,0 @@
|
||||
import spacy
|
||||
from spacy.training import Example
|
||||
import random
|
||||
import nltk
|
||||
from nltk.corpus import reuters
|
||||
import torch
|
||||
|
||||
def save_spacy_model_as_torch(nlp, model_dir="models/reuters"):
|
||||
# Extract the TextCategorizer component
|
||||
textcat = nlp.get_pipe("textcat_multilabel")
|
||||
|
||||
# Convert the weights to a PyTorch state dictionary
|
||||
state_dict = {name: torch.tensor(param.data) for name, param in textcat.model.named_parameters()}
|
||||
|
||||
# Save the state dictionary
|
||||
torch.save(state_dict, f"{model_dir}/model_weights.pth")
|
||||
|
||||
# Extract and save the vocabulary
|
||||
vocab = extract_vocab(nlp)
|
||||
with open(f"{model_dir}/vocab.txt", "w") as vocab_file:
|
||||
for word, idx in vocab.items():
|
||||
vocab_file.write(f"{word}\t{idx}\n")
|
||||
|
||||
print(f"Model weights and vocabulary saved to: {model_dir}")
|
||||
|
||||
def extract_vocab(nlp):
|
||||
# Extract vocabulary from the SpaCy model
|
||||
vocab = {word: i for i, word in enumerate(nlp.vocab.strings)}
|
||||
return vocab
|
||||
|
||||
nlp = spacy.load("models/reuters")
|
||||
save_spacy_model_as_torch(nlp, model_dir="models")
|
||||
|
||||
def train_and_save_reuters_model(model_dir="models/reuters"):
|
||||
# Ensure the Reuters corpus is downloaded
|
||||
nltk.download('reuters')
|
||||
nltk.download('punkt')
|
||||
if not reuters.fileids():
|
||||
print("Reuters corpus not found.")
|
||||
return
|
||||
|
||||
# Load a blank English spaCy model
|
||||
nlp = spacy.blank("en")
|
||||
|
||||
# Create a TextCategorizer with the ensemble model for multi-label classification
|
||||
textcat = nlp.add_pipe("textcat_multilabel")
|
||||
|
||||
# Add labels to text classifier
|
||||
for label in reuters.categories():
|
||||
textcat.add_label(label)
|
||||
|
||||
# Prepare training data
|
||||
train_examples = []
|
||||
for fileid in reuters.fileids():
|
||||
categories = reuters.categories(fileid)
|
||||
text = reuters.raw(fileid)
|
||||
cats = {label: label in categories for label in reuters.categories()}
|
||||
# Prepare spacy Example objects
|
||||
doc = nlp.make_doc(text)
|
||||
example = Example.from_dict(doc, {'cats': cats})
|
||||
train_examples.append(example)
|
||||
|
||||
# Initialize the text categorizer with the example objects
|
||||
nlp.initialize(lambda: train_examples)
|
||||
|
||||
# Train the model
|
||||
random.seed(1)
|
||||
spacy.util.fix_random_seed(1)
|
||||
for i in range(5): # Adjust iterations for better accuracy
|
||||
random.shuffle(train_examples)
|
||||
losses = {}
|
||||
# Create batches of data
|
||||
batches = spacy.util.minibatch(train_examples, size=8)
|
||||
for batch in batches:
|
||||
nlp.update(batch, drop=0.2, losses=losses)
|
||||
print(f"Losses at iteration {i}: {losses}")
|
||||
|
||||
# Save the trained model
|
||||
nlp.to_disk(model_dir)
|
||||
print(f"Model saved to: {model_dir}")
|
||||
|
||||
def train_model(model_dir, additional_epochs=0):
|
||||
# Load the model if it exists, otherwise start with a blank model
|
||||
try:
|
||||
nlp = spacy.load(model_dir)
|
||||
print("Model loaded from disk.")
|
||||
except IOError:
|
||||
print("No existing model found. Starting with a new model.")
|
||||
nlp = spacy.blank("en")
|
||||
textcat = nlp.add_pipe("textcat_multilabel")
|
||||
for label in reuters.categories():
|
||||
textcat.add_label(label)
|
||||
|
||||
# Prepare training data
|
||||
train_examples = []
|
||||
for fileid in reuters.fileids():
|
||||
categories = reuters.categories(fileid)
|
||||
text = reuters.raw(fileid)
|
||||
cats = {label: label in categories for label in reuters.categories()}
|
||||
doc = nlp.make_doc(text)
|
||||
example = Example.from_dict(doc, {'cats': cats})
|
||||
train_examples.append(example)
|
||||
|
||||
# Initialize the model if it was newly created
|
||||
if 'textcat_multilabel' not in nlp.pipe_names:
|
||||
nlp.initialize(lambda: train_examples)
|
||||
else:
|
||||
print("Continuing training with existing model.")
|
||||
|
||||
# Train the model
|
||||
random.seed(1)
|
||||
spacy.util.fix_random_seed(1)
|
||||
num_epochs = 5 + additional_epochs
|
||||
for i in range(num_epochs):
|
||||
random.shuffle(train_examples)
|
||||
losses = {}
|
||||
batches = spacy.util.minibatch(train_examples, size=8)
|
||||
for batch in batches:
|
||||
nlp.update(batch, drop=0.2, losses=losses)
|
||||
print(f"Losses at iteration {i}: {losses}")
|
||||
|
||||
# Save the trained model
|
||||
nlp.to_disk(model_dir)
|
||||
print(f"Model saved to: {model_dir}")
|
||||
|
||||
def load_model_and_predict(model_dir, text, tok_k = 3):
|
||||
# Load the trained model from the specified directory
|
||||
nlp = spacy.load(model_dir)
|
||||
|
||||
# Process the text with the loaded model
|
||||
doc = nlp(text)
|
||||
|
||||
# gee top 3 categories
|
||||
top_categories = sorted(doc.cats.items(), key=lambda x: x[1], reverse=True)[:tok_k]
|
||||
print(f"Top {tok_k} categories:")
|
||||
|
||||
return top_categories
|
||||
|
||||
if __name__ == "__main__":
|
||||
train_and_save_reuters_model()
|
||||
train_model("models/reuters", additional_epochs=5)
|
||||
model_directory = "reuters_model_10"
|
||||
print(reuters.categories())
|
||||
example_text = "Apple Inc. is reportedly buying a startup for $1 billion"
|
||||
r =load_model_and_predict(model_directory, example_text)
|
||||
print(r)
|
||||
299
crawl4ai/user_agent_generator.py
Normal file
299
crawl4ai/user_agent_generator.py
Normal file
@@ -0,0 +1,299 @@
|
||||
import random
|
||||
from typing import Optional, Literal, List, Dict, Tuple
|
||||
import re
|
||||
|
||||
|
||||
class UserAgentGenerator:
|
||||
"""
|
||||
Generate random user agents with specified constraints.
|
||||
|
||||
Attributes:
|
||||
desktop_platforms (dict): A dictionary of possible desktop platforms and their corresponding user agent strings.
|
||||
mobile_platforms (dict): A dictionary of possible mobile platforms and their corresponding user agent strings.
|
||||
browser_combinations (dict): A dictionary of possible browser combinations and their corresponding user agent strings.
|
||||
rendering_engines (dict): A dictionary of possible rendering engines and their corresponding user agent strings.
|
||||
chrome_versions (list): A list of possible Chrome browser versions.
|
||||
firefox_versions (list): A list of possible Firefox browser versions.
|
||||
edge_versions (list): A list of possible Edge browser versions.
|
||||
safari_versions (list): A list of possible Safari browser versions.
|
||||
ios_versions (list): A list of possible iOS browser versions.
|
||||
android_versions (list): A list of possible Android browser versions.
|
||||
|
||||
Methods:
|
||||
generate_user_agent(
|
||||
platform: Literal["desktop", "mobile"] = "desktop",
|
||||
browser: str = "chrome",
|
||||
rendering_engine: str = "chrome_webkit",
|
||||
chrome_version: Optional[str] = None,
|
||||
firefox_version: Optional[str] = None,
|
||||
edge_version: Optional[str] = None,
|
||||
safari_version: Optional[str] = None,
|
||||
ios_version: Optional[str] = None,
|
||||
android_version: Optional[str] = None
|
||||
): Generates a random user agent string based on the specified parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Previous platform definitions remain the same...
|
||||
self.desktop_platforms = {
|
||||
"windows": {
|
||||
"10_64": "(Windows NT 10.0; Win64; x64)",
|
||||
"10_32": "(Windows NT 10.0; WOW64)",
|
||||
},
|
||||
"macos": {
|
||||
"intel": "(Macintosh; Intel Mac OS X 10_15_7)",
|
||||
"newer": "(Macintosh; Intel Mac OS X 10.15; rv:109.0)",
|
||||
},
|
||||
"linux": {
|
||||
"generic": "(X11; Linux x86_64)",
|
||||
"ubuntu": "(X11; Ubuntu; Linux x86_64)",
|
||||
"chrome_os": "(X11; CrOS x86_64 14541.0.0)",
|
||||
},
|
||||
}
|
||||
|
||||
self.mobile_platforms = {
|
||||
"android": {
|
||||
"samsung": "(Linux; Android 13; SM-S901B)",
|
||||
"pixel": "(Linux; Android 12; Pixel 6)",
|
||||
"oneplus": "(Linux; Android 13; OnePlus 9 Pro)",
|
||||
"xiaomi": "(Linux; Android 12; M2102J20SG)",
|
||||
},
|
||||
"ios": {
|
||||
"iphone": "(iPhone; CPU iPhone OS 16_5 like Mac OS X)",
|
||||
"ipad": "(iPad; CPU OS 16_5 like Mac OS X)",
|
||||
},
|
||||
}
|
||||
|
||||
# Browser Combinations
|
||||
self.browser_combinations = {
|
||||
1: [["chrome"], ["firefox"], ["safari"], ["edge"]],
|
||||
2: [["gecko", "firefox"], ["chrome", "safari"], ["webkit", "safari"]],
|
||||
3: [["chrome", "safari", "edge"], ["webkit", "chrome", "safari"]],
|
||||
}
|
||||
|
||||
# Rendering Engines with versions
|
||||
self.rendering_engines = {
|
||||
"chrome_webkit": "AppleWebKit/537.36",
|
||||
"safari_webkit": "AppleWebKit/605.1.15",
|
||||
"gecko": [ # Added Gecko versions
|
||||
"Gecko/20100101",
|
||||
"Gecko/20100101", # Firefox usually uses this constant version
|
||||
"Gecko/2010010",
|
||||
],
|
||||
}
|
||||
|
||||
# Browser Versions
|
||||
self.chrome_versions = [
|
||||
"Chrome/119.0.6045.199",
|
||||
"Chrome/118.0.5993.117",
|
||||
"Chrome/117.0.5938.149",
|
||||
"Chrome/116.0.5845.187",
|
||||
"Chrome/115.0.5790.171",
|
||||
]
|
||||
|
||||
self.edge_versions = [
|
||||
"Edg/119.0.2151.97",
|
||||
"Edg/118.0.2088.76",
|
||||
"Edg/117.0.2045.47",
|
||||
"Edg/116.0.1938.81",
|
||||
"Edg/115.0.1901.203",
|
||||
]
|
||||
|
||||
self.safari_versions = [
|
||||
"Safari/537.36", # For Chrome-based
|
||||
"Safari/605.1.15",
|
||||
"Safari/604.1",
|
||||
"Safari/602.1",
|
||||
"Safari/601.5.17",
|
||||
]
|
||||
|
||||
# Added Firefox versions
|
||||
self.firefox_versions = [
|
||||
"Firefox/119.0",
|
||||
"Firefox/118.0.2",
|
||||
"Firefox/117.0.1",
|
||||
"Firefox/116.0",
|
||||
"Firefox/115.0.3",
|
||||
"Firefox/114.0.2",
|
||||
"Firefox/113.0.1",
|
||||
"Firefox/112.0",
|
||||
"Firefox/111.0.1",
|
||||
"Firefox/110.0",
|
||||
]
|
||||
|
||||
def get_browser_stack(self, num_browsers: int = 1) -> List[str]:
|
||||
"""
|
||||
Get a valid combination of browser versions.
|
||||
|
||||
How it works:
|
||||
1. Check if the number of browsers is supported.
|
||||
2. Randomly choose a combination of browsers.
|
||||
3. Iterate through the combination and add browser versions.
|
||||
4. Return the browser stack.
|
||||
|
||||
Args:
|
||||
num_browsers: Number of browser specifications (1-3)
|
||||
|
||||
Returns:
|
||||
List[str]: A list of browser versions.
|
||||
"""
|
||||
if num_browsers not in self.browser_combinations:
|
||||
raise ValueError(f"Unsupported number of browsers: {num_browsers}")
|
||||
|
||||
combination = random.choice(self.browser_combinations[num_browsers])
|
||||
browser_stack = []
|
||||
|
||||
for browser in combination:
|
||||
if browser == "chrome":
|
||||
browser_stack.append(random.choice(self.chrome_versions))
|
||||
elif browser == "firefox":
|
||||
browser_stack.append(random.choice(self.firefox_versions))
|
||||
elif browser == "safari":
|
||||
browser_stack.append(random.choice(self.safari_versions))
|
||||
elif browser == "edge":
|
||||
browser_stack.append(random.choice(self.edge_versions))
|
||||
elif browser == "gecko":
|
||||
browser_stack.append(random.choice(self.rendering_engines["gecko"]))
|
||||
elif browser == "webkit":
|
||||
browser_stack.append(self.rendering_engines["chrome_webkit"])
|
||||
|
||||
return browser_stack
|
||||
|
||||
def generate(
|
||||
self,
|
||||
device_type: Optional[Literal["desktop", "mobile"]] = None,
|
||||
os_type: Optional[str] = None,
|
||||
device_brand: Optional[str] = None,
|
||||
browser_type: Optional[Literal["chrome", "edge", "safari", "firefox"]] = None,
|
||||
num_browsers: int = 3,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a random user agent with specified constraints.
|
||||
|
||||
Args:
|
||||
device_type: 'desktop' or 'mobile'
|
||||
os_type: 'windows', 'macos', 'linux', 'android', 'ios'
|
||||
device_brand: Specific device brand
|
||||
browser_type: 'chrome', 'edge', 'safari', or 'firefox'
|
||||
num_browsers: Number of browser specifications (1-3)
|
||||
"""
|
||||
# Get platform string
|
||||
platform = self.get_random_platform(device_type, os_type, device_brand)
|
||||
|
||||
# Start with Mozilla
|
||||
components = ["Mozilla/5.0", platform]
|
||||
|
||||
# Add browser stack
|
||||
browser_stack = self.get_browser_stack(num_browsers)
|
||||
|
||||
# Add appropriate legacy token based on browser stack
|
||||
if "Firefox" in str(browser_stack):
|
||||
components.append(random.choice(self.rendering_engines["gecko"]))
|
||||
elif "Chrome" in str(browser_stack) or "Safari" in str(browser_stack):
|
||||
components.append(self.rendering_engines["chrome_webkit"])
|
||||
components.append("(KHTML, like Gecko)")
|
||||
|
||||
# Add browser versions
|
||||
components.extend(browser_stack)
|
||||
|
||||
return " ".join(components)
|
||||
|
||||
def generate_with_client_hints(self, **kwargs) -> Tuple[str, str]:
|
||||
"""Generate both user agent and matching client hints"""
|
||||
user_agent = self.generate(**kwargs)
|
||||
client_hints = self.generate_client_hints(user_agent)
|
||||
return user_agent, client_hints
|
||||
|
||||
def get_random_platform(self, device_type, os_type, device_brand):
|
||||
"""Helper method to get random platform based on constraints"""
|
||||
platforms = (
|
||||
self.desktop_platforms
|
||||
if device_type == "desktop"
|
||||
else self.mobile_platforms
|
||||
if device_type == "mobile"
|
||||
else {**self.desktop_platforms, **self.mobile_platforms}
|
||||
)
|
||||
|
||||
if os_type:
|
||||
for platform_group in [self.desktop_platforms, self.mobile_platforms]:
|
||||
if os_type in platform_group:
|
||||
platforms = {os_type: platform_group[os_type]}
|
||||
break
|
||||
|
||||
os_key = random.choice(list(platforms.keys()))
|
||||
if device_brand and device_brand in platforms[os_key]:
|
||||
return platforms[os_key][device_brand]
|
||||
return random.choice(list(platforms[os_key].values()))
|
||||
|
||||
def parse_user_agent(self, user_agent: str) -> Dict[str, str]:
|
||||
"""Parse a user agent string to extract browser and version information"""
|
||||
browsers = {
|
||||
"chrome": r"Chrome/(\d+)",
|
||||
"edge": r"Edg/(\d+)",
|
||||
"safari": r"Version/(\d+)",
|
||||
"firefox": r"Firefox/(\d+)",
|
||||
}
|
||||
|
||||
result = {}
|
||||
for browser, pattern in browsers.items():
|
||||
match = re.search(pattern, user_agent)
|
||||
if match:
|
||||
result[browser] = match.group(1)
|
||||
|
||||
return result
|
||||
|
||||
def generate_client_hints(self, user_agent: str) -> str:
|
||||
"""Generate Sec-CH-UA header value based on user agent string"""
|
||||
browsers = self.parse_user_agent(user_agent)
|
||||
|
||||
# Client hints components
|
||||
hints = []
|
||||
|
||||
# Handle different browser combinations
|
||||
if "chrome" in browsers:
|
||||
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
if "edge" in browsers:
|
||||
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
||||
else:
|
||||
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
||||
|
||||
elif "firefox" in browsers:
|
||||
# Firefox doesn't typically send Sec-CH-UA
|
||||
return '""'
|
||||
|
||||
elif "safari" in browsers:
|
||||
# Safari's format for client hints
|
||||
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
return ", ".join(hints)
|
||||
|
||||
|
||||
# Example usage:
|
||||
if __name__ == "__main__":
|
||||
generator = UserAgentGenerator()
|
||||
print(generator.generate())
|
||||
|
||||
print("\nSingle browser (Chrome):")
|
||||
print(generator.generate(num_browsers=1, browser_type="chrome"))
|
||||
|
||||
print("\nTwo browsers (Gecko/Firefox):")
|
||||
print(generator.generate(num_browsers=2))
|
||||
|
||||
print("\nThree browsers (Chrome/Safari/Edge):")
|
||||
print(generator.generate(num_browsers=3))
|
||||
|
||||
print("\nFirefox on Linux:")
|
||||
print(
|
||||
generator.generate(
|
||||
device_type="desktop",
|
||||
os_type="linux",
|
||||
browser_type="firefox",
|
||||
num_browsers=2,
|
||||
)
|
||||
)
|
||||
|
||||
print("\nChrome/Safari/Edge on Windows:")
|
||||
print(generator.generate(device_type="desktop", os_type="windows", num_browsers=3))
|
||||
2071
crawl4ai/utils.py
2071
crawl4ai/utils.py
File diff suppressed because it is too large
Load Diff
29
crawl4ai/version_manager.py
Normal file
29
crawl4ai/version_manager.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# version_manager.py
|
||||
from pathlib import Path
|
||||
from packaging import version
|
||||
from . import __version__
|
||||
|
||||
|
||||
class VersionManager:
|
||||
def __init__(self):
|
||||
self.home_dir = Path.home() / ".crawl4ai"
|
||||
self.version_file = self.home_dir / "version.txt"
|
||||
|
||||
def get_installed_version(self):
|
||||
"""Get the version recorded in home directory"""
|
||||
if not self.version_file.exists():
|
||||
return None
|
||||
try:
|
||||
return version.parse(self.version_file.read_text().strip())
|
||||
except:
|
||||
return None
|
||||
|
||||
def update_version(self):
|
||||
"""Update the version file to current library version"""
|
||||
self.version_file.write_text(__version__.__version__)
|
||||
|
||||
def needs_update(self):
|
||||
"""Check if database needs update based on version"""
|
||||
installed = self.get_installed_version()
|
||||
current = version.parse(__version__.__version__)
|
||||
return installed is None or installed < current
|
||||
@@ -1,357 +0,0 @@
|
||||
import os, time
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
from pathlib import Path
|
||||
|
||||
from .models import UrlModel, CrawlResult
|
||||
from .database import init_db, get_cached_url, cache_url, DB_PATH, flush_db
|
||||
from .utils import *
|
||||
from .chunking_strategy import *
|
||||
from .extraction_strategy import *
|
||||
from .crawler_strategy import *
|
||||
from typing import List
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from .config import *
|
||||
|
||||
|
||||
class WebCrawler:
|
||||
def __init__(
|
||||
self,
|
||||
# db_path: str = None,
|
||||
crawler_strategy: CrawlerStrategy = None,
|
||||
always_by_pass_cache: bool = False,
|
||||
verbose: bool = False,
|
||||
):
|
||||
# self.db_path = db_path
|
||||
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(verbose=verbose)
|
||||
self.always_by_pass_cache = always_by_pass_cache
|
||||
|
||||
# Create the .crawl4ai folder in the user's home directory if it doesn't exist
|
||||
self.crawl4ai_folder = os.path.join(Path.home(), ".crawl4ai")
|
||||
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
||||
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
||||
|
||||
# If db_path is not provided, use the default path
|
||||
# if not db_path:
|
||||
# self.db_path = f"{self.crawl4ai_folder}/crawl4ai.db"
|
||||
|
||||
# flush_db()
|
||||
init_db()
|
||||
|
||||
self.ready = False
|
||||
|
||||
def warmup(self):
|
||||
print("[LOG] 🌤️ Warming up the WebCrawler")
|
||||
result = self.run(
|
||||
url='https://crawl4ai.uccode.io/',
|
||||
word_count_threshold=5,
|
||||
extraction_strategy= NoExtractionStrategy(),
|
||||
bypass_cache=False,
|
||||
verbose = False
|
||||
)
|
||||
self.ready = True
|
||||
print("[LOG] 🌞 WebCrawler is ready to crawl")
|
||||
|
||||
def fetch_page(
|
||||
self,
|
||||
url_model: UrlModel,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
use_cached_html: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
return self.run(
|
||||
url_model.url,
|
||||
word_count_threshold,
|
||||
extraction_strategy or NoExtractionStrategy(),
|
||||
chunking_strategy,
|
||||
bypass_cache=url_model.forced,
|
||||
css_selector=css_selector,
|
||||
screenshot=screenshot,
|
||||
**kwargs,
|
||||
)
|
||||
pass
|
||||
|
||||
def run_old(
|
||||
self,
|
||||
url: str,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
if user_agent:
|
||||
self.crawler_strategy.update_user_agent(user_agent)
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
extraction_strategy.verbose = verbose
|
||||
# Check if extraction strategy is an instance of ExtractionStrategy if not raise an error
|
||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||
raise ValueError("Unsupported extraction strategy")
|
||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||
raise ValueError("Unsupported chunking strategy")
|
||||
|
||||
# make sure word_count_threshold is not lesser than MIN_WORD_THRESHOLD
|
||||
if word_count_threshold < MIN_WORD_THRESHOLD:
|
||||
word_count_threshold = MIN_WORD_THRESHOLD
|
||||
|
||||
# Check cache first
|
||||
if not bypass_cache and not self.always_by_pass_cache:
|
||||
cached = get_cached_url(url)
|
||||
if cached:
|
||||
return CrawlResult(
|
||||
**{
|
||||
"url": cached[0],
|
||||
"html": cached[1],
|
||||
"cleaned_html": cached[2],
|
||||
"markdown": cached[3],
|
||||
"extracted_content": cached[4],
|
||||
"success": cached[5],
|
||||
"media": json.loads(cached[6] or "{}"),
|
||||
"links": json.loads(cached[7] or "{}"),
|
||||
"metadata": json.loads(cached[8] or "{}"), # "metadata": "{}
|
||||
"screenshot": cached[9],
|
||||
"error_message": "",
|
||||
}
|
||||
)
|
||||
|
||||
# Initialize WebDriver for crawling
|
||||
t = time.time()
|
||||
if kwargs.get("js", None):
|
||||
self.crawler_strategy.js_code = kwargs.get("js")
|
||||
html = self.crawler_strategy.crawl(url)
|
||||
base64_image = None
|
||||
if screenshot:
|
||||
base64_image = self.crawler_strategy.take_screenshot()
|
||||
success = True
|
||||
error_message = ""
|
||||
# Extract content from HTML
|
||||
try:
|
||||
result = get_content_of_website(url, html, word_count_threshold, css_selector=css_selector)
|
||||
metadata = extract_metadata(html)
|
||||
if result is None:
|
||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
cleaned_html = result.get("cleaned_html", "")
|
||||
markdown = result.get("markdown", "")
|
||||
media = result.get("media", [])
|
||||
links = result.get("links", [])
|
||||
|
||||
# Print a profession LOG style message, show time taken and say crawling is done
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Crawling done for {url}, success: {success}, time taken: {time.time() - t} seconds"
|
||||
)
|
||||
|
||||
extracted_content = []
|
||||
if verbose:
|
||||
print(f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}")
|
||||
t = time.time()
|
||||
# Split markdown into sections
|
||||
sections = chunking_strategy.chunk(markdown)
|
||||
# sections = merge_chunks_based_on_token_threshold(sections, CHUNK_TOKEN_THRESHOLD)
|
||||
|
||||
extracted_content = extraction_strategy.run(
|
||||
url, sections,
|
||||
)
|
||||
extracted_content = json.dumps(extracted_content)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t} seconds."
|
||||
)
|
||||
|
||||
# Cache the result
|
||||
cleaned_html = beautify_html(cleaned_html)
|
||||
cache_url(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
success,
|
||||
json.dumps(media),
|
||||
json.dumps(links),
|
||||
json.dumps(metadata),
|
||||
screenshot=base64_image,
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=cleaned_html,
|
||||
markdown=markdown,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=base64_image,
|
||||
extracted_content=extracted_content,
|
||||
success=success,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
def fetch_pages(
|
||||
self,
|
||||
url_models: List[UrlModel],
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
use_cached_html: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> List[CrawlResult]:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
def fetch_page_wrapper(url_model, *args, **kwargs):
|
||||
return self.fetch_page(url_model, *args, **kwargs)
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
results = list(
|
||||
executor.map(
|
||||
fetch_page_wrapper,
|
||||
url_models,
|
||||
[provider] * len(url_models),
|
||||
[api_token] * len(url_models),
|
||||
[extract_blocks_flag] * len(url_models),
|
||||
[word_count_threshold] * len(url_models),
|
||||
[css_selector] * len(url_models),
|
||||
[screenshot] * len(url_models),
|
||||
[use_cached_html] * len(url_models),
|
||||
[extraction_strategy] * len(url_models),
|
||||
[chunking_strategy] * len(url_models),
|
||||
*[kwargs] * len(url_models),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def run(
|
||||
self,
|
||||
url: str,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
extraction_strategy.verbose = verbose
|
||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||
raise ValueError("Unsupported extraction strategy")
|
||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||
raise ValueError("Unsupported chunking strategy")
|
||||
|
||||
if word_count_threshold < MIN_WORD_THRESHOLD:
|
||||
word_count_threshold = MIN_WORD_THRESHOLD
|
||||
|
||||
# Check cache first
|
||||
cached = None
|
||||
extracted_content = None
|
||||
if not bypass_cache and not self.always_by_pass_cache:
|
||||
cached = get_cached_url(url)
|
||||
|
||||
if cached:
|
||||
html = cached[1]
|
||||
extracted_content = cached[2]
|
||||
if screenshot:
|
||||
screenshot = cached[9]
|
||||
|
||||
else:
|
||||
if user_agent:
|
||||
self.crawler_strategy.update_user_agent(user_agent)
|
||||
html = self.crawler_strategy.crawl(url)
|
||||
if screenshot:
|
||||
screenshot = self.crawler_strategy.take_screenshot()
|
||||
|
||||
return self.process_html(url, html, extracted_content, word_count_threshold, extraction_strategy, chunking_strategy, css_selector, screenshot, verbose, bool(cached), **kwargs)
|
||||
|
||||
def process_html(
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
word_count_threshold: int,
|
||||
extraction_strategy: ExtractionStrategy,
|
||||
chunking_strategy: ChunkingStrategy,
|
||||
css_selector: str,
|
||||
screenshot: bool,
|
||||
verbose: bool,
|
||||
is_cached: bool,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
t = time.time()
|
||||
# Extract content from HTML
|
||||
try:
|
||||
result = get_content_of_website(url, html, word_count_threshold, css_selector=css_selector)
|
||||
metadata = extract_metadata(html)
|
||||
if result is None:
|
||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
cleaned_html = result.get("cleaned_html", "")
|
||||
markdown = result.get("markdown", "")
|
||||
media = result.get("media", [])
|
||||
links = result.get("links", [])
|
||||
|
||||
if verbose:
|
||||
print(f"[LOG] 🚀 Crawling done for {url}, success: True, time taken: {time.time() - t} seconds")
|
||||
|
||||
if extracted_content is None:
|
||||
if verbose:
|
||||
print(f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}")
|
||||
|
||||
sections = chunking_strategy.chunk(markdown)
|
||||
extracted_content = extraction_strategy.run(url, sections)
|
||||
extracted_content = json.dumps(extracted_content)
|
||||
|
||||
if verbose:
|
||||
print(f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t} seconds.")
|
||||
|
||||
screenshot = None if not screenshot else screenshot
|
||||
|
||||
if not is_cached:
|
||||
cache_url(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
True,
|
||||
json.dumps(media),
|
||||
json.dumps(links),
|
||||
json.dumps(metadata),
|
||||
screenshot=screenshot,
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=cleaned_html,
|
||||
markdown=markdown,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=screenshot,
|
||||
extracted_content=extracted_content,
|
||||
success=True,
|
||||
error_message="",
|
||||
)
|
||||
@@ -1,43 +1,58 @@
|
||||
import os, time
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
from pathlib import Path
|
||||
|
||||
from .models import UrlModel, CrawlResult
|
||||
from .database import init_db, get_cached_url, cache_url, DB_PATH, flush_db
|
||||
from .database import init_db, get_cached_url, cache_url
|
||||
from .utils import *
|
||||
from .chunking_strategy import *
|
||||
from .extraction_strategy import *
|
||||
from .crawler_strategy import *
|
||||
from typing import List
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from .content_scraping_strategy import WebScrapingStrategy
|
||||
from .config import *
|
||||
import warnings
|
||||
import json
|
||||
warnings.filterwarnings("ignore", message='Field "model_name" has conflict with protected namespace "model_".')
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message='Field "model_name" has conflict with protected namespace "model_".',
|
||||
)
|
||||
|
||||
|
||||
class WebCrawler:
|
||||
def __init__(self, crawler_strategy: CrawlerStrategy = None, always_by_pass_cache: bool = False, verbose: bool = False):
|
||||
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(verbose=verbose)
|
||||
def __init__(
|
||||
self,
|
||||
crawler_strategy: CrawlerStrategy = None,
|
||||
always_by_pass_cache: bool = False,
|
||||
verbose: bool = False,
|
||||
):
|
||||
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(
|
||||
verbose=verbose
|
||||
)
|
||||
self.always_by_pass_cache = always_by_pass_cache
|
||||
self.crawl4ai_folder = os.path.join(Path.home(), ".crawl4ai")
|
||||
self.crawl4ai_folder = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
||||
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
||||
init_db()
|
||||
self.ready = False
|
||||
|
||||
|
||||
def warmup(self):
|
||||
print("[LOG] 🌤️ Warming up the WebCrawler")
|
||||
self.run(
|
||||
url='https://google.com/',
|
||||
url="https://google.com/",
|
||||
word_count_threshold=5,
|
||||
extraction_strategy=NoExtractionStrategy(),
|
||||
bypass_cache=False,
|
||||
verbose=False
|
||||
verbose=False,
|
||||
)
|
||||
self.ready = True
|
||||
print("[LOG] 🌞 WebCrawler is ready to crawl")
|
||||
|
||||
|
||||
def fetch_page(
|
||||
self,
|
||||
url_model: UrlModel,
|
||||
@@ -79,6 +94,7 @@ class WebCrawler:
|
||||
**kwargs,
|
||||
) -> List[CrawlResult]:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
|
||||
def fetch_page_wrapper(url_model, *args, **kwargs):
|
||||
return self.fetch_page(url_model, *args, **kwargs)
|
||||
|
||||
@@ -103,136 +119,176 @@ class WebCrawler:
|
||||
return results
|
||||
|
||||
def run(
|
||||
self,
|
||||
url: str,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
try:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
extraction_strategy.verbose = verbose
|
||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||
raise ValueError("Unsupported extraction strategy")
|
||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||
raise ValueError("Unsupported chunking strategy")
|
||||
|
||||
word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD)
|
||||
self,
|
||||
url: str,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
try:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
extraction_strategy.verbose = verbose
|
||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||
raise ValueError("Unsupported extraction strategy")
|
||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||
raise ValueError("Unsupported chunking strategy")
|
||||
|
||||
cached = None
|
||||
screenshot_data = None
|
||||
extracted_content = None
|
||||
if not bypass_cache and not self.always_by_pass_cache:
|
||||
cached = get_cached_url(url)
|
||||
|
||||
if kwargs.get("warmup", True) and not self.ready:
|
||||
return None
|
||||
|
||||
if cached:
|
||||
html = sanitize_input_encode(cached[1])
|
||||
extracted_content = sanitize_input_encode(cached[4])
|
||||
if screenshot:
|
||||
screenshot_data = cached[9]
|
||||
if not screenshot_data:
|
||||
cached = None
|
||||
|
||||
if not cached or not html:
|
||||
if user_agent:
|
||||
self.crawler_strategy.update_user_agent(user_agent)
|
||||
t1 = time.time()
|
||||
html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs))
|
||||
t2 = time.time()
|
||||
if verbose:
|
||||
print(f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds")
|
||||
if screenshot:
|
||||
screenshot_data = self.crawler_strategy.take_screenshot()
|
||||
word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD)
|
||||
|
||||
|
||||
crawl_result = self.process_html(url, html, extracted_content, word_count_threshold, extraction_strategy, chunking_strategy, css_selector, screenshot_data, verbose, bool(cached), **kwargs)
|
||||
crawl_result.success = bool(html)
|
||||
return crawl_result
|
||||
except Exception as e:
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = str(e)
|
||||
print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}")
|
||||
return CrawlResult(url=url, html="", success=False, error_message=e.msg)
|
||||
cached = None
|
||||
screenshot_data = None
|
||||
extracted_content = None
|
||||
if not bypass_cache and not self.always_by_pass_cache:
|
||||
cached = get_cached_url(url)
|
||||
|
||||
if kwargs.get("warmup", True) and not self.ready:
|
||||
return None
|
||||
|
||||
if cached:
|
||||
html = sanitize_input_encode(cached[1])
|
||||
extracted_content = sanitize_input_encode(cached[4])
|
||||
if screenshot:
|
||||
screenshot_data = cached[9]
|
||||
if not screenshot_data:
|
||||
cached = None
|
||||
|
||||
if not cached or not html:
|
||||
if user_agent:
|
||||
self.crawler_strategy.update_user_agent(user_agent)
|
||||
t1 = time.time()
|
||||
html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs))
|
||||
t2 = time.time()
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds"
|
||||
)
|
||||
if screenshot:
|
||||
screenshot_data = self.crawler_strategy.take_screenshot()
|
||||
|
||||
crawl_result = self.process_html(
|
||||
url,
|
||||
html,
|
||||
extracted_content,
|
||||
word_count_threshold,
|
||||
extraction_strategy,
|
||||
chunking_strategy,
|
||||
css_selector,
|
||||
screenshot_data,
|
||||
verbose,
|
||||
bool(cached),
|
||||
**kwargs,
|
||||
)
|
||||
crawl_result.success = bool(html)
|
||||
return crawl_result
|
||||
except Exception as e:
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = str(e)
|
||||
print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}")
|
||||
return CrawlResult(url=url, html="", success=False, error_message=e.msg)
|
||||
|
||||
def process_html(
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
word_count_threshold: int,
|
||||
extraction_strategy: ExtractionStrategy,
|
||||
chunking_strategy: ChunkingStrategy,
|
||||
css_selector: str,
|
||||
screenshot: bool,
|
||||
verbose: bool,
|
||||
is_cached: bool,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
t = time.time()
|
||||
# Extract content from HTML
|
||||
try:
|
||||
t1 = time.time()
|
||||
result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False))
|
||||
if verbose:
|
||||
print(f"[LOG] 🚀 Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds")
|
||||
|
||||
if result is None:
|
||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||
markdown = sanitize_input_encode(result.get("markdown", ""))
|
||||
media = result.get("media", [])
|
||||
links = result.get("links", [])
|
||||
metadata = result.get("metadata", {})
|
||||
|
||||
if extracted_content is None:
|
||||
if verbose:
|
||||
print(f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}")
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
word_count_threshold: int,
|
||||
extraction_strategy: ExtractionStrategy,
|
||||
chunking_strategy: ChunkingStrategy,
|
||||
css_selector: str,
|
||||
screenshot: bool,
|
||||
verbose: bool,
|
||||
is_cached: bool,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
t = time.time()
|
||||
# Extract content from HTML
|
||||
try:
|
||||
t1 = time.time()
|
||||
scrapping_strategy = WebScrapingStrategy()
|
||||
extra_params = {
|
||||
k: v
|
||||
for k, v in kwargs.items()
|
||||
if k not in ["only_text", "image_description_min_word_threshold"]
|
||||
}
|
||||
result = scrapping_strategy.scrap(
|
||||
url,
|
||||
html,
|
||||
word_count_threshold=word_count_threshold,
|
||||
css_selector=css_selector,
|
||||
only_text=kwargs.get("only_text", False),
|
||||
image_description_min_word_threshold=kwargs.get(
|
||||
"image_description_min_word_threshold",
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
),
|
||||
**extra_params,
|
||||
)
|
||||
|
||||
sections = chunking_strategy.chunk(markdown)
|
||||
extracted_content = extraction_strategy.run(url, sections)
|
||||
extracted_content = json.dumps(extracted_content, indent=4, default=str, ensure_ascii=False)
|
||||
# result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False))
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds"
|
||||
)
|
||||
|
||||
if verbose:
|
||||
print(f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t:.2f} seconds.")
|
||||
|
||||
screenshot = None if not screenshot else screenshot
|
||||
|
||||
if not is_cached:
|
||||
cache_url(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
True,
|
||||
json.dumps(media),
|
||||
json.dumps(links),
|
||||
json.dumps(metadata),
|
||||
screenshot=screenshot,
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=format_html(cleaned_html),
|
||||
markdown=markdown,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
if result is None:
|
||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||
markdown = sanitize_input_encode(result.get("markdown", ""))
|
||||
media = result.get("media", [])
|
||||
links = result.get("links", [])
|
||||
metadata = result.get("metadata", {})
|
||||
|
||||
if extracted_content is None:
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}"
|
||||
)
|
||||
|
||||
sections = chunking_strategy.chunk(markdown)
|
||||
extracted_content = extraction_strategy.run(url, sections)
|
||||
extracted_content = json.dumps(
|
||||
extracted_content, indent=4, default=str, ensure_ascii=False
|
||||
)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t:.2f} seconds."
|
||||
)
|
||||
|
||||
screenshot = None if not screenshot else screenshot
|
||||
|
||||
if not is_cached:
|
||||
cache_url(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
True,
|
||||
json.dumps(media),
|
||||
json.dumps(links),
|
||||
json.dumps(metadata),
|
||||
screenshot=screenshot,
|
||||
extracted_content=extracted_content,
|
||||
success=True,
|
||||
error_message="",
|
||||
)
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=format_html(cleaned_html),
|
||||
markdown=markdown,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=screenshot,
|
||||
extracted_content=extracted_content,
|
||||
success=True,
|
||||
error_message="",
|
||||
)
|
||||
|
||||
67
docker-compose.yml
Normal file
67
docker-compose.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
services:
|
||||
# Local build services for different platforms
|
||||
crawl4ai-amd64:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
PYTHON_VERSION: "3.10"
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-basic}
|
||||
ENABLE_GPU: false
|
||||
platforms:
|
||||
- linux/amd64
|
||||
profiles: ["local-amd64"]
|
||||
extends: &base-config
|
||||
file: docker-compose.yml
|
||||
service: base-config
|
||||
|
||||
crawl4ai-arm64:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
PYTHON_VERSION: "3.10"
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-basic}
|
||||
ENABLE_GPU: false
|
||||
platforms:
|
||||
- linux/arm64
|
||||
profiles: ["local-arm64"]
|
||||
extends: *base-config
|
||||
|
||||
# Hub services for different platforms and versions
|
||||
crawl4ai-hub-amd64:
|
||||
image: unclecode/crawl4ai:${VERSION:-basic}-amd64
|
||||
profiles: ["hub-amd64"]
|
||||
extends: *base-config
|
||||
|
||||
crawl4ai-hub-arm64:
|
||||
image: unclecode/crawl4ai:${VERSION:-basic}-arm64
|
||||
profiles: ["hub-arm64"]
|
||||
extends: *base-config
|
||||
|
||||
# Base configuration to be extended
|
||||
base-config:
|
||||
ports:
|
||||
- "11235:11235"
|
||||
- "8000:8000"
|
||||
- "9222:9222"
|
||||
- "8080:8080"
|
||||
environment:
|
||||
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-}
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- CLAUDE_API_KEY=${CLAUDE_API_KEY:-}
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
memory: 1G
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11235/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
BIN
docs/.DS_Store
vendored
BIN
docs/.DS_Store
vendored
Binary file not shown.
BIN
docs/assets/pitch-dark.png
Normal file
BIN
docs/assets/pitch-dark.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 33 KiB |
64
docs/assets/pitch-dark.svg
Normal file
64
docs/assets/pitch-dark.svg
Normal file
@@ -0,0 +1,64 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 800 500">
|
||||
<!-- Background -->
|
||||
<rect width="800" height="500" fill="#1a1a1a"/>
|
||||
|
||||
<!-- Opportunities Section -->
|
||||
<g transform="translate(50,50)">
|
||||
<!-- Opportunity 1 Box -->
|
||||
<rect x="0" y="0" width="300" height="150" rx="10" fill="#1a2d3d" stroke="#64b5f6" stroke-width="2"/>
|
||||
<text x="150" y="30" text-anchor="middle" font-family="Arial" font-weight="bold" font-size="16" fill="#64b5f6">Data Capitalization Opportunity</text>
|
||||
<text x="150" y="60" text-anchor="middle" font-family="Arial" font-size="12" fill="#e0e0e0">
|
||||
<tspan x="150" dy="0">Transform digital footprints into assets</tspan>
|
||||
<tspan x="150" dy="20">Personal data as capital</tspan>
|
||||
<tspan x="150" dy="20">Enterprise knowledge valuation</tspan>
|
||||
<tspan x="150" dy="20">New form of wealth creation</tspan>
|
||||
</text>
|
||||
|
||||
<!-- Opportunity 2 Box -->
|
||||
<rect x="0" y="200" width="300" height="150" rx="10" fill="#1a2d1a" stroke="#81c784" stroke-width="2"/>
|
||||
<text x="150" y="230" text-anchor="middle" font-family="Arial" font-weight="bold" font-size="16" fill="#81c784">Authentic Data Potential</text>
|
||||
<text x="150" y="260" text-anchor="middle" font-family="Arial" font-size="12" fill="#e0e0e0">
|
||||
<tspan x="150" dy="0">Vast reservoir of real insights</tspan>
|
||||
<tspan x="150" dy="20">Enhanced AI development</tspan>
|
||||
<tspan x="150" dy="20">Diverse human knowledge</tspan>
|
||||
<tspan x="150" dy="20">Willing participation model</tspan>
|
||||
</text>
|
||||
</g>
|
||||
|
||||
<!-- Development Pathway -->
|
||||
<g transform="translate(450,50)">
|
||||
<!-- Step 1 Box -->
|
||||
<rect x="0" y="0" width="300" height="100" rx="10" fill="#2d1a2d" stroke="#ce93d8" stroke-width="2"/>
|
||||
<text x="150" y="35" text-anchor="middle" font-family="Arial" font-weight="bold" font-size="16" fill="#ce93d8">1. Open-Source Foundation</text>
|
||||
<text x="150" y="65" text-anchor="middle" font-family="Arial" font-size="12" fill="#e0e0e0">Data extraction engine & community development</text>
|
||||
|
||||
<!-- Step 2 Box -->
|
||||
<rect x="0" y="125" width="300" height="100" rx="10" fill="#2d1a2d" stroke="#ce93d8" stroke-width="2"/>
|
||||
<text x="150" y="160" text-anchor="middle" font-family="Arial" font-weight="bold" font-size="16" fill="#ce93d8">2. Data Capitalization Platform</text>
|
||||
<text x="150" y="190" text-anchor="middle" font-family="Arial" font-size="12" fill="#e0e0e0">Tools to structure & value digital assets</text>
|
||||
|
||||
<!-- Step 3 Box -->
|
||||
<rect x="0" y="250" width="300" height="100" rx="10" fill="#2d1a2d" stroke="#ce93d8" stroke-width="2"/>
|
||||
<text x="150" y="285" text-anchor="middle" font-family="Arial" font-weight="bold" font-size="16" fill="#ce93d8">3. Shared Data Marketplace</text>
|
||||
<text x="150" y="315" text-anchor="middle" font-family="Arial" font-size="12" fill="#e0e0e0">Economic platform for data exchange</text>
|
||||
</g>
|
||||
|
||||
<!-- Connecting Arrows -->
|
||||
<g transform="translate(400,125)">
|
||||
<path d="M-20,0 L40,0" stroke="#666" stroke-width="2" marker-end="url(#arrowhead)"/>
|
||||
<path d="M-20,200 L40,200" stroke="#666" stroke-width="2" marker-end="url(#arrowhead)"/>
|
||||
</g>
|
||||
|
||||
<!-- Arrow Marker -->
|
||||
<defs>
|
||||
<marker id="arrowhead" markerWidth="10" markerHeight="7" refX="9" refY="3.5" orient="auto">
|
||||
<polygon points="0 0, 10 3.5, 0 7" fill="#666"/>
|
||||
</marker>
|
||||
</defs>
|
||||
|
||||
<!-- Vision Box at Bottom -->
|
||||
<g transform="translate(200,420)">
|
||||
<rect x="0" y="0" width="400" height="60" rx="10" fill="#2d2613" stroke="#ffd54f" stroke-width="2"/>
|
||||
<text x="200" y="35" text-anchor="middle" font-family="Arial" font-weight="bold" font-size="16" fill="#ffd54f">Economic Vision: Shared Data Economy</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.8 KiB |
189
docs/deprecated/docker-deployment.md
Normal file
189
docs/deprecated/docker-deployment.md
Normal file
@@ -0,0 +1,189 @@
|
||||
# 🐳 Using Docker (Legacy)
|
||||
|
||||
Crawl4AI is available as Docker images for easy deployment. You can either pull directly from Docker Hub (recommended) or build from the repository.
|
||||
|
||||
---
|
||||
|
||||
<details>
|
||||
<summary>🐳 <strong>Option 1: Docker Hub (Recommended)</strong></summary>
|
||||
|
||||
Choose the appropriate image based on your platform and needs:
|
||||
|
||||
### For AMD64 (Regular Linux/Windows):
|
||||
```bash
|
||||
# Basic version (recommended)
|
||||
docker pull unclecode/crawl4ai:basic-amd64
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:basic-amd64
|
||||
|
||||
# Full ML/LLM support
|
||||
docker pull unclecode/crawl4ai:all-amd64
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:all-amd64
|
||||
|
||||
# With GPU support
|
||||
docker pull unclecode/crawl4ai:gpu-amd64
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:gpu-amd64
|
||||
```
|
||||
|
||||
### For ARM64 (M1/M2 Macs, ARM servers):
|
||||
```bash
|
||||
# Basic version (recommended)
|
||||
docker pull unclecode/crawl4ai:basic-arm64
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:basic-arm64
|
||||
|
||||
# Full ML/LLM support
|
||||
docker pull unclecode/crawl4ai:all-arm64
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:all-arm64
|
||||
|
||||
# With GPU support
|
||||
docker pull unclecode/crawl4ai:gpu-arm64
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:gpu-arm64
|
||||
```
|
||||
|
||||
Need more memory? Add `--shm-size`:
|
||||
```bash
|
||||
docker run --shm-size=2gb -p 11235:11235 unclecode/crawl4ai:basic-amd64
|
||||
```
|
||||
|
||||
Test the installation:
|
||||
```bash
|
||||
curl http://localhost:11235/health
|
||||
```
|
||||
|
||||
### For Raspberry Pi (32-bit) (coming soon):
|
||||
```bash
|
||||
# Pull and run basic version (recommended for Raspberry Pi)
|
||||
docker pull unclecode/crawl4ai:basic-armv7
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:basic-armv7
|
||||
|
||||
# With increased shared memory if needed
|
||||
docker run --shm-size=2gb -p 11235:11235 unclecode/crawl4ai:basic-armv7
|
||||
```
|
||||
|
||||
Note: Due to hardware constraints, only the basic version is recommended for Raspberry Pi.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🐳 <strong>Option 2: Build from Repository</strong></summary>
|
||||
|
||||
Build the image locally based on your platform:
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/unclecode/crawl4ai.git
|
||||
cd crawl4ai
|
||||
|
||||
# For AMD64 (Regular Linux/Windows)
|
||||
docker build --platform linux/amd64 \
|
||||
--tag crawl4ai:local \
|
||||
--build-arg INSTALL_TYPE=basic \
|
||||
.
|
||||
|
||||
# For ARM64 (M1/M2 Macs, ARM servers)
|
||||
docker build --platform linux/arm64 \
|
||||
--tag crawl4ai:local \
|
||||
--build-arg INSTALL_TYPE=basic \
|
||||
.
|
||||
```
|
||||
|
||||
Build options:
|
||||
- INSTALL_TYPE=basic (default): Basic crawling features
|
||||
- INSTALL_TYPE=all: Full ML/LLM support
|
||||
- ENABLE_GPU=true: Add GPU support
|
||||
|
||||
Example with all options:
|
||||
```bash
|
||||
docker build --platform linux/amd64 \
|
||||
--tag crawl4ai:local \
|
||||
--build-arg INSTALL_TYPE=all \
|
||||
--build-arg ENABLE_GPU=true \
|
||||
.
|
||||
```
|
||||
|
||||
Run your local build:
|
||||
```bash
|
||||
# Regular run
|
||||
docker run -p 11235:11235 crawl4ai:local
|
||||
|
||||
# With increased shared memory
|
||||
docker run --shm-size=2gb -p 11235:11235 crawl4ai:local
|
||||
```
|
||||
|
||||
Test the installation:
|
||||
```bash
|
||||
curl http://localhost:11235/health
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🐳 <strong>Option 3: Using Docker Compose</strong></summary>
|
||||
|
||||
Docker Compose provides a more structured way to run Crawl4AI, especially when dealing with environment variables and multiple configurations.
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/unclecode/crawl4ai.git
|
||||
cd crawl4ai
|
||||
```
|
||||
|
||||
### For AMD64 (Regular Linux/Windows):
|
||||
```bash
|
||||
# Build and run locally
|
||||
docker-compose --profile local-amd64 up
|
||||
|
||||
# Run from Docker Hub
|
||||
VERSION=basic docker-compose --profile hub-amd64 up # Basic version
|
||||
VERSION=all docker-compose --profile hub-amd64 up # Full ML/LLM support
|
||||
VERSION=gpu docker-compose --profile hub-amd64 up # GPU support
|
||||
```
|
||||
|
||||
### For ARM64 (M1/M2 Macs, ARM servers):
|
||||
```bash
|
||||
# Build and run locally
|
||||
docker-compose --profile local-arm64 up
|
||||
|
||||
# Run from Docker Hub
|
||||
VERSION=basic docker-compose --profile hub-arm64 up # Basic version
|
||||
VERSION=all docker-compose --profile hub-arm64 up # Full ML/LLM support
|
||||
VERSION=gpu docker-compose --profile hub-arm64 up # GPU support
|
||||
```
|
||||
|
||||
Environment variables (optional):
|
||||
```bash
|
||||
# Create a .env file
|
||||
CRAWL4AI_API_TOKEN=your_token
|
||||
OPENAI_API_KEY=your_openai_key
|
||||
CLAUDE_API_KEY=your_claude_key
|
||||
```
|
||||
|
||||
The compose file includes:
|
||||
- Memory management (4GB limit, 1GB reserved)
|
||||
- Shared memory volume for browser support
|
||||
- Health checks
|
||||
- Auto-restart policy
|
||||
- All necessary port mappings
|
||||
|
||||
Test the installation:
|
||||
```bash
|
||||
curl http://localhost:11235/health
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🚀 <strong>One-Click Deployment</strong></summary>
|
||||
|
||||
Deploy your own instance of Crawl4AI with one click:
|
||||
|
||||
[](https://www.digitalocean.com/?repo=https://github.com/unclecode/crawl4ai/tree/0.3.74&refcode=a0780f1bdb3d&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=badge)
|
||||
|
||||
> 💡 **Recommended specs**: 4GB RAM minimum. Select "professional-xs" or higher when deploying for stable operation.
|
||||
|
||||
The deploy will:
|
||||
- Set up a Docker container with Crawl4AI
|
||||
- Configure Playwright and all dependencies
|
||||
- Start the FastAPI server on port `11235`
|
||||
- Set up health checks and auto-deployment
|
||||
|
||||
</details>
|
||||
@@ -1,157 +0,0 @@
|
||||
### Extraction Strategies
|
||||
|
||||
#### 1. LLMExtractionStrategy
|
||||
```python
|
||||
LLMExtractionStrategy(
|
||||
# Core Parameters
|
||||
provider: str = DEFAULT_PROVIDER, # LLM provider (e.g., "openai/gpt-4", "huggingface/...", "ollama/...")
|
||||
api_token: Optional[str] = None, # API token for the provider
|
||||
instruction: str = None, # Custom instruction for extraction
|
||||
schema: Dict = None, # Pydantic model schema for structured extraction
|
||||
extraction_type: str = "block", # Type of extraction: "block" or "schema"
|
||||
|
||||
# Chunking Parameters
|
||||
chunk_token_threshold: int = CHUNK_TOKEN_THRESHOLD, # Maximum tokens per chunk
|
||||
overlap_rate: float = OVERLAP_RATE, # Overlap between chunks
|
||||
word_token_rate: float = WORD_TOKEN_RATE, # Conversion rate from words to tokens
|
||||
apply_chunking: bool = True, # Whether to apply text chunking
|
||||
|
||||
# API Configuration
|
||||
base_url: str = None, # Base URL for API calls
|
||||
api_base: str = None, # Alternative base URL
|
||||
extra_args: Dict = {}, # Additional provider-specific arguments
|
||||
|
||||
verbose: bool = False # Enable verbose logging
|
||||
)
|
||||
```
|
||||
|
||||
Usage Example:
|
||||
```python
|
||||
class NewsArticle(BaseModel):
|
||||
title: str
|
||||
content: str
|
||||
|
||||
strategy = LLMExtractionStrategy(
|
||||
provider="ollama/nemotron",
|
||||
api_token="your-token",
|
||||
schema=NewsArticle.schema(),
|
||||
instruction="Extract news article content with title and main text"
|
||||
)
|
||||
|
||||
result = await crawler.arun(url="https://example.com", extraction_strategy=strategy)
|
||||
```
|
||||
|
||||
#### 2. JsonCssExtractionStrategy
|
||||
```python
|
||||
JsonCssExtractionStrategy(
|
||||
schema: Dict[str, Any], # Schema defining extraction rules
|
||||
verbose: bool = False # Enable verbose logging
|
||||
)
|
||||
|
||||
# Schema Structure
|
||||
schema = {
|
||||
"name": str, # Name of the extraction schema
|
||||
"baseSelector": str, # CSS selector for base elements
|
||||
"fields": [
|
||||
{
|
||||
"name": str, # Field name
|
||||
"selector": str, # CSS selector
|
||||
"type": str, # Field type: "text", "attribute", "html", "regex", "nested", "list", "nested_list"
|
||||
"attribute": str, # For type="attribute"
|
||||
"pattern": str, # For type="regex"
|
||||
"transform": str, # Optional: "lowercase", "uppercase", "strip"
|
||||
"default": Any, # Default value if extraction fails
|
||||
"fields": List[Dict], # For nested/list types
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Usage Example:
|
||||
```python
|
||||
schema = {
|
||||
"name": "News Articles",
|
||||
"baseSelector": "article.news-item",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h1",
|
||||
"type": "text",
|
||||
"transform": "strip"
|
||||
},
|
||||
{
|
||||
"name": "date",
|
||||
"selector": ".date",
|
||||
"type": "attribute",
|
||||
"attribute": "datetime"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
strategy = JsonCssExtractionStrategy(schema)
|
||||
result = await crawler.arun(url="https://example.com", extraction_strategy=strategy)
|
||||
```
|
||||
|
||||
#### 3. CosineStrategy
|
||||
```python
|
||||
CosineStrategy(
|
||||
# Content Filtering
|
||||
semantic_filter: str = None, # Keyword filter for document filtering
|
||||
word_count_threshold: int = 10, # Minimum words per cluster
|
||||
sim_threshold: float = 0.3, # Similarity threshold for filtering
|
||||
|
||||
# Clustering Parameters
|
||||
max_dist: float = 0.2, # Maximum distance for clustering
|
||||
linkage_method: str = 'ward', # Clustering linkage method
|
||||
top_k: int = 3, # Number of top categories to extract
|
||||
|
||||
# Model Configuration
|
||||
model_name: str = 'sentence-transformers/all-MiniLM-L6-v2', # Embedding model
|
||||
|
||||
verbose: bool = False # Enable verbose logging
|
||||
)
|
||||
```
|
||||
|
||||
### Chunking Strategies
|
||||
|
||||
#### 1. RegexChunking
|
||||
```python
|
||||
RegexChunking(
|
||||
patterns: List[str] = None # List of regex patterns for splitting text
|
||||
# Default pattern: [r'\n\n']
|
||||
)
|
||||
```
|
||||
|
||||
Usage Example:
|
||||
```python
|
||||
chunker = RegexChunking(patterns=[r'\n\n', r'\.\s+']) # Split on double newlines and sentences
|
||||
chunks = chunker.chunk(text)
|
||||
```
|
||||
|
||||
#### 2. SlidingWindowChunking
|
||||
```python
|
||||
SlidingWindowChunking(
|
||||
window_size: int = 100, # Size of the window in words
|
||||
step: int = 50, # Number of words to slide the window
|
||||
)
|
||||
```
|
||||
|
||||
Usage Example:
|
||||
```python
|
||||
chunker = SlidingWindowChunking(window_size=200, step=100)
|
||||
chunks = chunker.chunk(text) # Creates overlapping chunks of 200 words, moving 100 words at a time
|
||||
```
|
||||
|
||||
#### 3. OverlappingWindowChunking
|
||||
```python
|
||||
OverlappingWindowChunking(
|
||||
window_size: int = 1000, # Size of each chunk in words
|
||||
overlap: int = 100 # Number of words to overlap between chunks
|
||||
)
|
||||
```
|
||||
|
||||
Usage Example:
|
||||
```python
|
||||
chunker = OverlappingWindowChunking(window_size=500, overlap=50)
|
||||
chunks = chunker.chunk(text) # Creates 500-word chunks with 50-word overlap
|
||||
```
|
||||
@@ -1,175 +0,0 @@
|
||||
# Features
|
||||
|
||||
## Current Features
|
||||
1. Async-first architecture for high-performance web crawling
|
||||
2. Built-in anti-bot detection bypass ("magic mode")
|
||||
3. Multiple browser engine support (Chromium, Firefox, WebKit)
|
||||
4. Smart session management with automatic cleanup
|
||||
5. Automatic content cleaning and relevance scoring
|
||||
6. Built-in markdown generation with formatting preservation
|
||||
7. Intelligent image scoring and filtering
|
||||
8. Automatic popup and overlay removal
|
||||
9. Smart wait conditions (CSS/JavaScript based)
|
||||
10. Multi-provider LLM integration (OpenAI, HuggingFace, Ollama)
|
||||
11. Schema-based structured data extraction
|
||||
12. Automated iframe content processing
|
||||
13. Intelligent link categorization (internal/external)
|
||||
14. Multiple chunking strategies for large content
|
||||
15. Real-time HTML cleaning and sanitization
|
||||
16. Automatic screenshot capabilities
|
||||
17. Social media link filtering
|
||||
18. Semantic similarity-based content clustering
|
||||
19. Human behavior simulation for anti-bot bypass
|
||||
20. Proxy support with authentication
|
||||
21. Automatic resource cleanup
|
||||
22. Custom CSS selector-based extraction
|
||||
23. Automatic content relevance scoring ("fit" content)
|
||||
24. Recursive website crawling capabilities
|
||||
25. Flexible hook system for customization
|
||||
26. Built-in caching system
|
||||
27. Domain-based content filtering
|
||||
28. Dynamic content handling with JavaScript execution
|
||||
29. Automatic media content extraction and classification
|
||||
30. Metadata extraction and processing
|
||||
31. Customizable HTML to Markdown conversion
|
||||
32. Token-aware content chunking for LLM processing
|
||||
33. Automatic response header and status code handling
|
||||
34. Browser fingerprint customization
|
||||
35. Multiple extraction strategies (LLM, CSS, Cosine, XPATH)
|
||||
36. Automatic error image generation for failed screenshots
|
||||
37. Smart content overlap handling for large texts
|
||||
38. Built-in rate limiting for batch processing
|
||||
39. Automatic cookie handling
|
||||
40. Browser Console logging and debugging capabilities
|
||||
|
||||
## Feature Techs
|
||||
• Browser Management
|
||||
- Asynchronous browser control
|
||||
- Multi-browser support (Chromium, Firefox, WebKit)
|
||||
- Headless mode support
|
||||
- Browser cleanup and resource management
|
||||
- Custom browser arguments and configuration
|
||||
- Context management with `__aenter__` and `__aexit__`
|
||||
|
||||
• Session Handling
|
||||
- Session management with TTL (Time To Live)
|
||||
- Session reuse capabilities
|
||||
- Session cleanup for expired sessions
|
||||
- Session-based context preservation
|
||||
|
||||
• Stealth Features
|
||||
- Playwright stealth configuration
|
||||
- Navigator properties override
|
||||
- WebDriver detection evasion
|
||||
- Chrome app simulation
|
||||
- Plugin simulation
|
||||
- Language preferences simulation
|
||||
- Hardware concurrency simulation
|
||||
- Media codecs simulation
|
||||
|
||||
• Network Features
|
||||
- Proxy support with authentication
|
||||
- Custom headers management
|
||||
- Cookie handling
|
||||
- Response header capture
|
||||
- Status code tracking
|
||||
- Network idle detection
|
||||
|
||||
• Page Interaction
|
||||
- Smart wait functionality for multiple conditions
|
||||
- CSS selector-based waiting
|
||||
- JavaScript condition waiting
|
||||
- Custom JavaScript execution
|
||||
- User interaction simulation (mouse/keyboard)
|
||||
- Page scrolling
|
||||
- Timeout management
|
||||
- Load state monitoring
|
||||
|
||||
• Content Processing
|
||||
- HTML content extraction
|
||||
- Iframe processing and content extraction
|
||||
- Delayed content retrieval
|
||||
- Content caching
|
||||
- Cache file management
|
||||
- HTML cleaning and processing
|
||||
|
||||
• Image Handling
|
||||
- Screenshot capabilities (full page)
|
||||
- Base64 encoding of screenshots
|
||||
- Image dimension updating
|
||||
- Image filtering (size/visibility)
|
||||
- Error image generation
|
||||
- Natural width/height preservation
|
||||
|
||||
• Overlay Management
|
||||
- Popup removal
|
||||
- Cookie notice removal
|
||||
- Newsletter dialog removal
|
||||
- Modal removal
|
||||
- Fixed position element removal
|
||||
- Z-index based overlay detection
|
||||
- Visibility checking
|
||||
|
||||
• Hook System
|
||||
- Browser creation hooks
|
||||
- User agent update hooks
|
||||
- Execution start hooks
|
||||
- Navigation hooks (before/after goto)
|
||||
- HTML retrieval hooks
|
||||
- HTML return hooks
|
||||
|
||||
• Error Handling
|
||||
- Browser error catching
|
||||
- Network error handling
|
||||
- Timeout handling
|
||||
- Screenshot error recovery
|
||||
- Invalid selector handling
|
||||
- General exception management
|
||||
|
||||
• Performance Features
|
||||
- Concurrent URL processing
|
||||
- Semaphore-based rate limiting
|
||||
- Async gathering of results
|
||||
- Resource cleanup
|
||||
- Memory management
|
||||
|
||||
• Debug Features
|
||||
- Console logging
|
||||
- Page error logging
|
||||
- Verbose mode
|
||||
- Error message generation
|
||||
- Warning system
|
||||
|
||||
• Security Features
|
||||
- Certificate error handling
|
||||
- Sandbox configuration
|
||||
- GPU handling
|
||||
- CSP (Content Security Policy) compliant waiting
|
||||
|
||||
• Configuration
|
||||
- User agent customization
|
||||
- Viewport configuration
|
||||
- Timeout configuration
|
||||
- Browser type selection
|
||||
- Proxy configuration
|
||||
- Header configuration
|
||||
|
||||
• Data Models
|
||||
- Pydantic model for responses
|
||||
- Type hints throughout code
|
||||
- Structured response format
|
||||
- Optional response fields
|
||||
|
||||
• File System Integration
|
||||
- Cache directory management
|
||||
- File path handling
|
||||
- Cache metadata storage
|
||||
- File read/write operations
|
||||
|
||||
• Metadata Handling
|
||||
- Response headers capture
|
||||
- Status code tracking
|
||||
- Cache metadata
|
||||
- Session tracking
|
||||
- Timestamp management
|
||||
|
||||
@@ -1,150 +0,0 @@
|
||||
### 1. Basic Web Crawling
|
||||
```python
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print(result.markdown) # Get clean markdown content
|
||||
print(result.html) # Get raw HTML
|
||||
print(result.cleaned_html) # Get cleaned HTML
|
||||
```
|
||||
|
||||
### 2. Browser Control Options
|
||||
- Multiple Browser Support
|
||||
```python
|
||||
# Choose between different browser engines
|
||||
crawler = AsyncWebCrawler(browser_type="firefox") # or "chromium", "webkit"
|
||||
crawler = AsyncWebCrawler(headless=False) # For visible browser
|
||||
```
|
||||
|
||||
- Proxy Configuration
|
||||
```python
|
||||
crawler = AsyncWebCrawler(proxy="http://proxy.example.com:8080")
|
||||
# Or with authentication
|
||||
crawler = AsyncWebCrawler(proxy_config={
|
||||
"server": "http://proxy.example.com:8080",
|
||||
"username": "user",
|
||||
"password": "pass"
|
||||
})
|
||||
```
|
||||
|
||||
### 3. Content Selection & Filtering
|
||||
- CSS Selector Support
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
css_selector=".main-content" # Extract specific content
|
||||
)
|
||||
```
|
||||
|
||||
- Content Filtering Options
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
word_count_threshold=10, # Minimum words per block
|
||||
excluded_tags=['form', 'header'], # Tags to exclude
|
||||
exclude_external_links=True, # Remove external links
|
||||
exclude_social_media_links=True, # Remove social media links
|
||||
exclude_external_images=True # Remove external images
|
||||
)
|
||||
```
|
||||
|
||||
### 4. Dynamic Content Handling
|
||||
- JavaScript Execution
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
js_code="window.scrollTo(0, document.body.scrollHeight)" # Execute custom JS
|
||||
)
|
||||
```
|
||||
|
||||
- Wait Conditions
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
wait_for="css:.my-element", # Wait for element
|
||||
wait_for="js:() => document.readyState === 'complete'" # Wait for condition
|
||||
)
|
||||
```
|
||||
|
||||
### 5. Anti-Bot Protection Handling
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
simulate_user=True, # Simulate human behavior
|
||||
override_navigator=True, # Mask automation signals
|
||||
magic=True # Enable all anti-detection features
|
||||
)
|
||||
```
|
||||
|
||||
### 6. Session Management
|
||||
```python
|
||||
session_id = "my_session"
|
||||
result1 = await crawler.arun(url="https://example.com/page1", session_id=session_id)
|
||||
result2 = await crawler.arun(url="https://example.com/page2", session_id=session_id)
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
```
|
||||
|
||||
### 7. Media Handling
|
||||
- Screenshot Capture
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
screenshot=True
|
||||
)
|
||||
base64_screenshot = result.screenshot
|
||||
```
|
||||
|
||||
- Media Extraction
|
||||
```python
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print(result.media['images']) # List of images
|
||||
print(result.media['videos']) # List of videos
|
||||
print(result.media['audios']) # List of audio files
|
||||
```
|
||||
|
||||
### 8. Structured Data Extraction
|
||||
- CSS-based Extraction
|
||||
```python
|
||||
schema = {
|
||||
"name": "News Articles",
|
||||
"baseSelector": "article",
|
||||
"fields": [
|
||||
{"name": "title", "selector": "h1", "type": "text"},
|
||||
{"name": "date", "selector": ".date", "type": "text"}
|
||||
]
|
||||
}
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema)
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
extraction_strategy=extraction_strategy
|
||||
)
|
||||
structured_data = json.loads(result.extracted_content)
|
||||
```
|
||||
|
||||
- LLM-based Extraction (Multiple Providers)
|
||||
```python
|
||||
class NewsArticle(BaseModel):
|
||||
title: str
|
||||
summary: str
|
||||
|
||||
strategy = LLMExtractionStrategy(
|
||||
provider="ollama/nemotron", # or "huggingface/...", "ollama/..."
|
||||
api_token="your-token",
|
||||
schema=NewsArticle.schema(),
|
||||
instruction="Extract news article details..."
|
||||
)
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
extraction_strategy=strategy
|
||||
)
|
||||
```
|
||||
|
||||
### 9. Content Cleaning & Processing
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
remove_overlay_elements=True, # Remove popups/modals
|
||||
process_iframes=True, # Process iframe content
|
||||
)
|
||||
print(result.fit_markdown) # Get most relevant content
|
||||
print(result.fit_html) # Get cleaned HTML
|
||||
```
|
||||
@@ -1,457 +0,0 @@
|
||||
I'll expand the outline with detailed descriptions and examples based on all the provided files. I'll start with the first few sections:
|
||||
|
||||
### 1. Basic Web Crawling
|
||||
Basic web crawling provides the foundation for extracting content from websites. The library supports both simple single-page crawling and recursive website crawling.
|
||||
|
||||
```python
|
||||
# Simple page crawling
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print(result.html) # Raw HTML
|
||||
print(result.markdown) # Cleaned markdown
|
||||
print(result.cleaned_html) # Cleaned HTML
|
||||
|
||||
# Recursive website crawling
|
||||
class SimpleWebsiteScraper:
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
|
||||
async def scrape(self, start_url: str, max_depth: int):
|
||||
results = await self.scrape_recursive(start_url, max_depth)
|
||||
return results
|
||||
|
||||
# Usage
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
scraper = SimpleWebsiteScraper(crawler)
|
||||
results = await scraper.scrape("https://example.com", depth=2)
|
||||
```
|
||||
|
||||
### 2. Browser Control Options
|
||||
The library provides extensive control over browser behavior, allowing customization of browser type, headless mode, and proxy settings.
|
||||
|
||||
```python
|
||||
# Browser Type Selection
|
||||
async with AsyncWebCrawler(
|
||||
browser_type="firefox", # Options: "chromium", "firefox", "webkit"
|
||||
headless=False, # For visible browser
|
||||
verbose=True # Enable logging
|
||||
) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
|
||||
# Proxy Configuration
|
||||
async with AsyncWebCrawler(
|
||||
proxy_config={
|
||||
"server": "http://proxy.example.com:8080",
|
||||
"username": "user",
|
||||
"password": "pass"
|
||||
},
|
||||
headers={
|
||||
"User-Agent": "Custom User Agent",
|
||||
"Accept-Language": "en-US,en;q=0.9"
|
||||
}
|
||||
) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
```
|
||||
|
||||
### 3. Content Selection & Filtering
|
||||
The library offers multiple ways to select and filter content, from CSS selectors to word count thresholds.
|
||||
|
||||
```python
|
||||
# CSS Selector and Content Filtering
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
css_selector="article.main-content", # Extract specific content
|
||||
word_count_threshold=10, # Minimum words per block
|
||||
excluded_tags=['form', 'header'], # Tags to exclude
|
||||
exclude_external_links=True, # Remove external links
|
||||
exclude_social_media_links=True, # Remove social media links
|
||||
exclude_domains=["pinterest.com", "facebook.com"] # Exclude specific domains
|
||||
)
|
||||
|
||||
# Custom HTML to Text Options
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
html2text={
|
||||
"escape_dot": False,
|
||||
"links_each_paragraph": True,
|
||||
"protect_links": True
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### 4. Dynamic Content Handling
|
||||
The library provides sophisticated handling of dynamic content with JavaScript execution and wait conditions.
|
||||
|
||||
```python
|
||||
# JavaScript Execution and Wait Conditions
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
js_code=[
|
||||
"window.scrollTo(0, document.body.scrollHeight);",
|
||||
"document.querySelector('.load-more').click();"
|
||||
],
|
||||
wait_for="css:.dynamic-content", # Wait for element
|
||||
delay_before_return_html=2.0 # Wait after JS execution
|
||||
)
|
||||
|
||||
# Smart Wait Conditions
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
wait_for="""() => {
|
||||
return document.querySelectorAll('.item').length > 10;
|
||||
}""",
|
||||
page_timeout=60000 # 60 seconds timeout
|
||||
)
|
||||
```
|
||||
|
||||
### 5. Advanced Link Analysis
|
||||
The library provides comprehensive link analysis capabilities, distinguishing between internal and external links, with options for filtering and processing.
|
||||
|
||||
```python
|
||||
# Basic Link Analysis
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
|
||||
# Access internal and external links
|
||||
for internal_link in result.links['internal']:
|
||||
print(f"Internal: {internal_link['href']} - {internal_link['text']}")
|
||||
|
||||
for external_link in result.links['external']:
|
||||
print(f"External: {external_link['href']} - {external_link['text']}")
|
||||
|
||||
# Advanced Link Filtering
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
exclude_external_links=True, # Remove all external links
|
||||
exclude_social_media_links=True, # Remove social media links
|
||||
exclude_social_media_domains=[ # Custom social media domains
|
||||
"facebook.com", "twitter.com", "instagram.com"
|
||||
],
|
||||
exclude_domains=["pinterest.com"] # Specific domains to exclude
|
||||
)
|
||||
```
|
||||
|
||||
### 6. Anti-Bot Protection Handling
|
||||
The library includes sophisticated anti-detection mechanisms to handle websites with bot protection.
|
||||
|
||||
```python
|
||||
# Basic Anti-Detection
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
simulate_user=True, # Simulate human behavior
|
||||
override_navigator=True # Override navigator properties
|
||||
)
|
||||
|
||||
# Advanced Anti-Detection with Magic Mode
|
||||
async with AsyncWebCrawler(headless=False) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
magic=True, # Enable all anti-detection features
|
||||
remove_overlay_elements=True, # Remove popups/modals automatically
|
||||
# Custom navigator properties
|
||||
js_code="""
|
||||
Object.defineProperty(navigator, 'webdriver', {
|
||||
get: () => undefined
|
||||
});
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
### 7. Session Management
|
||||
Session management allows maintaining state across multiple requests and handling cookies.
|
||||
|
||||
```python
|
||||
# Basic Session Management
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
session_id = "my_session"
|
||||
|
||||
# Login
|
||||
login_result = await crawler.arun(
|
||||
url="https://example.com/login",
|
||||
session_id=session_id,
|
||||
js_code="document.querySelector('form').submit();"
|
||||
)
|
||||
|
||||
# Use same session for subsequent requests
|
||||
protected_result = await crawler.arun(
|
||||
url="https://example.com/protected",
|
||||
session_id=session_id
|
||||
)
|
||||
|
||||
# Clean up session
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
|
||||
# Advanced Session with Custom Cookies
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
session_id="custom_session",
|
||||
cookies=[{
|
||||
"name": "sessionId",
|
||||
"value": "abc123",
|
||||
"domain": "example.com"
|
||||
}]
|
||||
)
|
||||
```
|
||||
|
||||
### 8. Screenshot and Media Handling
|
||||
The library provides comprehensive media handling capabilities, including screenshots and media content extraction.
|
||||
|
||||
```python
|
||||
# Screenshot Capture
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
screenshot=True,
|
||||
screenshot_wait_for=2.0 # Wait before taking screenshot
|
||||
)
|
||||
|
||||
# Save screenshot
|
||||
if result.screenshot:
|
||||
with open("screenshot.png", "wb") as f:
|
||||
f.write(base64.b64decode(result.screenshot))
|
||||
|
||||
# Media Extraction
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
|
||||
# Process images with metadata
|
||||
for image in result.media['images']:
|
||||
print(f"Image: {image['src']}")
|
||||
print(f"Alt text: {image['alt']}")
|
||||
print(f"Context: {image['desc']}")
|
||||
print(f"Relevance score: {image['score']}")
|
||||
|
||||
# Process videos and audio
|
||||
for video in result.media['videos']:
|
||||
print(f"Video: {video['src']}")
|
||||
for audio in result.media['audios']:
|
||||
print(f"Audio: {audio['src']}")
|
||||
```
|
||||
|
||||
### 9. Structured Data Extraction & Chunking
|
||||
The library supports multiple strategies for structured data extraction and content chunking.
|
||||
|
||||
```python
|
||||
# LLM-based Extraction
|
||||
class NewsArticle(BaseModel):
|
||||
title: str
|
||||
content: str
|
||||
author: str
|
||||
|
||||
extraction_strategy = LLMExtractionStrategy(
|
||||
provider='openai/gpt-4',
|
||||
api_token="your-token",
|
||||
schema=NewsArticle.schema(),
|
||||
instruction="Extract news article details",
|
||||
chunk_token_threshold=1000,
|
||||
overlap_rate=0.1
|
||||
)
|
||||
|
||||
# CSS-based Extraction
|
||||
schema = {
|
||||
"name": "Product Listing",
|
||||
"baseSelector": ".product-card",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h2",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".price",
|
||||
"type": "text",
|
||||
"transform": "strip"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
css_strategy = JsonCssExtractionStrategy(schema)
|
||||
|
||||
# Text Chunking
|
||||
from crawl4ai.chunking_strategy import OverlappingWindowChunking
|
||||
|
||||
chunking_strategy = OverlappingWindowChunking(
|
||||
window_size=1000,
|
||||
overlap=100
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
extraction_strategy=extraction_strategy,
|
||||
chunking_strategy=chunking_strategy
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### 10. Content Cleaning & Processing
|
||||
The library provides extensive content cleaning and processing capabilities, ensuring high-quality output in various formats.
|
||||
|
||||
```python
|
||||
# Basic Content Cleaning
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
remove_overlay_elements=True, # Remove popups/modals
|
||||
process_iframes=True, # Process iframe content
|
||||
word_count_threshold=10 # Minimum words per block
|
||||
)
|
||||
|
||||
print(result.cleaned_html) # Clean HTML
|
||||
print(result.fit_html) # Most relevant HTML content
|
||||
print(result.fit_markdown) # Most relevant markdown content
|
||||
|
||||
# Advanced Content Processing
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
excluded_tags=['form', 'header', 'footer', 'nav'],
|
||||
html2text={
|
||||
"escape_dot": False,
|
||||
"body_width": 0,
|
||||
"protect_links": True,
|
||||
"unicode_snob": True,
|
||||
"ignore_links": False,
|
||||
"ignore_images": False,
|
||||
"ignore_emphasis": False,
|
||||
"bypass_tables": False,
|
||||
"ignore_tables": False
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Advanced Usage Patterns
|
||||
|
||||
#### 1. Combining Multiple Features
|
||||
```python
|
||||
async with AsyncWebCrawler(
|
||||
browser_type="chromium",
|
||||
headless=False,
|
||||
verbose=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
# Anti-bot measures
|
||||
magic=True,
|
||||
simulate_user=True,
|
||||
|
||||
# Content selection
|
||||
css_selector="article.main",
|
||||
word_count_threshold=10,
|
||||
|
||||
# Dynamic content handling
|
||||
js_code="window.scrollTo(0, document.body.scrollHeight);",
|
||||
wait_for="css:.dynamic-content",
|
||||
|
||||
# Content filtering
|
||||
exclude_external_links=True,
|
||||
exclude_social_media_links=True,
|
||||
|
||||
# Media handling
|
||||
screenshot=True,
|
||||
process_iframes=True,
|
||||
|
||||
# Content cleaning
|
||||
remove_overlay_elements=True
|
||||
)
|
||||
```
|
||||
|
||||
#### 2. Custom Extraction Pipeline
|
||||
```python
|
||||
# Define custom schemas and strategies
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
content: str
|
||||
date: str
|
||||
|
||||
# CSS extraction for initial content
|
||||
css_schema = {
|
||||
"name": "Article Extraction",
|
||||
"baseSelector": "article",
|
||||
"fields": [
|
||||
{"name": "title", "selector": "h1", "type": "text"},
|
||||
{"name": "content", "selector": ".content", "type": "html"},
|
||||
{"name": "date", "selector": ".date", "type": "text"}
|
||||
]
|
||||
}
|
||||
|
||||
# LLM processing for semantic analysis
|
||||
llm_strategy = LLMExtractionStrategy(
|
||||
provider="ollama/nemotron",
|
||||
api_token="your-token",
|
||||
schema=Article.schema(),
|
||||
instruction="Extract and clean article content"
|
||||
)
|
||||
|
||||
# Chunking strategy for large content
|
||||
chunking = OverlappingWindowChunking(window_size=1000, overlap=100)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# First pass: Extract structure
|
||||
css_result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
extraction_strategy=JsonCssExtractionStrategy(css_schema)
|
||||
)
|
||||
|
||||
# Second pass: Semantic processing
|
||||
llm_result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
extraction_strategy=llm_strategy,
|
||||
chunking_strategy=chunking
|
||||
)
|
||||
```
|
||||
|
||||
#### 3. Website Crawling with Custom Processing
|
||||
```python
|
||||
class CustomWebsiteCrawler:
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
self.results = {}
|
||||
|
||||
async def process_page(self, url: str) -> Dict:
|
||||
result = await self.crawler.arun(
|
||||
url=url,
|
||||
magic=True,
|
||||
word_count_threshold=10,
|
||||
exclude_external_links=True,
|
||||
process_iframes=True,
|
||||
remove_overlay_elements=True
|
||||
)
|
||||
|
||||
# Process internal links
|
||||
internal_links = [
|
||||
link['href'] for link in result.links['internal']
|
||||
if self._is_valid_link(link['href'])
|
||||
]
|
||||
|
||||
# Extract media
|
||||
media_urls = [img['src'] for img in result.media['images']]
|
||||
|
||||
return {
|
||||
'content': result.markdown,
|
||||
'links': internal_links,
|
||||
'media': media_urls,
|
||||
'metadata': result.metadata
|
||||
}
|
||||
|
||||
async def crawl_website(self, start_url: str, max_depth: int = 2):
|
||||
visited = set()
|
||||
queue = [(start_url, 0)]
|
||||
|
||||
while queue:
|
||||
url, depth = queue.pop(0)
|
||||
if depth > max_depth or url in visited:
|
||||
continue
|
||||
|
||||
visited.add(url)
|
||||
self.results[url] = await self.process_page(url)
|
||||
```
|
||||
|
||||
@@ -1,282 +0,0 @@
|
||||
### AsyncWebCrawler Constructor Parameters
|
||||
```python
|
||||
AsyncWebCrawler(
|
||||
# Core Browser Settings
|
||||
browser_type: str = "chromium", # Options: "chromium", "firefox", "webkit"
|
||||
headless: bool = True, # Whether to run browser in headless mode
|
||||
verbose: bool = False, # Enable verbose logging
|
||||
|
||||
# Cache Settings
|
||||
always_by_pass_cache: bool = False, # Always bypass cache regardless of run settings
|
||||
base_directory: str = str(Path.home()), # Base directory for cache storage
|
||||
|
||||
# Network Settings
|
||||
proxy: str = None, # Simple proxy URL (e.g., "http://proxy.example.com:8080")
|
||||
proxy_config: Dict = None, # Advanced proxy settings with auth: {"server": str, "username": str, "password": str}
|
||||
|
||||
# Browser Behavior
|
||||
sleep_on_close: bool = False, # Wait before closing browser
|
||||
|
||||
# Other Settings passed to AsyncPlaywrightCrawlerStrategy
|
||||
user_agent: str = None, # Custom user agent string
|
||||
headers: Dict[str, str] = {}, # Custom HTTP headers
|
||||
js_code: Union[str, List[str]] = None, # Default JavaScript to execute
|
||||
)
|
||||
```
|
||||
|
||||
### arun() Method Parameters
|
||||
```python
|
||||
arun(
|
||||
# Core Parameters
|
||||
url: str, # Required: URL to crawl
|
||||
|
||||
# Content Selection
|
||||
css_selector: str = None, # CSS selector to extract specific content
|
||||
word_count_threshold: int = MIN_WORD_THRESHOLD, # Minimum words for content blocks
|
||||
|
||||
# Cache Control
|
||||
bypass_cache: bool = False, # Bypass cache for this request
|
||||
|
||||
# Session Management
|
||||
session_id: str = None, # Session identifier for persistent browsing
|
||||
|
||||
# Screenshot Options
|
||||
screenshot: bool = False, # Take page screenshot
|
||||
screenshot_wait_for: float = None, # Wait time before screenshot
|
||||
|
||||
# Content Processing
|
||||
process_iframes: bool = False, # Process iframe content
|
||||
remove_overlay_elements: bool = False, # Remove popups/modals
|
||||
|
||||
# Anti-Bot/Detection
|
||||
simulate_user: bool = False, # Simulate human-like behavior
|
||||
override_navigator: bool = False, # Override navigator properties
|
||||
magic: bool = False, # Enable all anti-detection features
|
||||
|
||||
# Content Filtering
|
||||
excluded_tags: List[str] = None, # HTML tags to exclude
|
||||
exclude_external_links: bool = False, # Remove external links
|
||||
exclude_social_media_links: bool = False, # Remove social media links
|
||||
exclude_external_images: bool = False, # Remove external images
|
||||
exclude_social_media_domains: List[str] = None, # Additional social media domains to exclude
|
||||
remove_forms: bool = False, # Remove all form elements
|
||||
|
||||
# JavaScript Handling
|
||||
js_code: Union[str, List[str]] = None, # JavaScript to execute
|
||||
js_only: bool = False, # Only execute JavaScript without reloading page
|
||||
wait_for: str = None, # Wait condition (CSS selector or JS function)
|
||||
|
||||
# Page Loading
|
||||
page_timeout: int = 60000, # Page load timeout in milliseconds
|
||||
delay_before_return_html: float = None, # Wait before returning HTML
|
||||
|
||||
# Debug Options
|
||||
log_console: bool = False, # Log browser console messages
|
||||
|
||||
# Content Format Control
|
||||
only_text: bool = False, # Extract only text content
|
||||
keep_data_attributes: bool = False, # Keep data-* attributes in HTML
|
||||
|
||||
# Markdown Options
|
||||
include_links_on_markdown: bool = False, # Include links in markdown output
|
||||
html2text: Dict = {}, # HTML to text conversion options
|
||||
|
||||
# Extraction Strategy
|
||||
extraction_strategy: ExtractionStrategy = None, # Strategy for structured data extraction
|
||||
|
||||
# Advanced Browser Control
|
||||
user_agent: str = None, # Override user agent for this request
|
||||
)
|
||||
```
|
||||
|
||||
### Extraction Strategy Parameters
|
||||
```python
|
||||
# JsonCssExtractionStrategy
|
||||
{
|
||||
"name": str, # Name of extraction schema
|
||||
"baseSelector": str, # Base CSS selector
|
||||
"fields": [
|
||||
{
|
||||
"name": str, # Field name
|
||||
"selector": str, # CSS selector
|
||||
"type": str, # Data type ("text", etc.)
|
||||
"transform": str = None # Optional transformation
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# LLMExtractionStrategy
|
||||
{
|
||||
"provider": str, # LLM provider (e.g., "openai/gpt-4", "huggingface/...", "ollama/...")
|
||||
"api_token": str, # API token
|
||||
"schema": dict, # Pydantic model schema
|
||||
"extraction_type": str, # Type of extraction ("schema", etc.)
|
||||
"instruction": str, # Extraction instruction
|
||||
"extra_args": dict = None, # Additional provider-specific arguments
|
||||
"extra_headers": dict = None # Additional HTTP headers
|
||||
}
|
||||
```
|
||||
|
||||
### HTML to Text Conversion Options (html2text parameter)
|
||||
```python
|
||||
{
|
||||
"escape_dot": bool = True, # Escape dots in text
|
||||
# Other html2text library options
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### CrawlResult Fields
|
||||
|
||||
```python
|
||||
class CrawlResult(BaseModel):
|
||||
# Basic Information
|
||||
url: str # The crawled URL
|
||||
# Example: "https://example.com"
|
||||
|
||||
success: bool # Whether the crawl was successful
|
||||
# Example: True/False
|
||||
|
||||
status_code: Optional[int] # HTTP status code
|
||||
# Example: 200, 404, 500
|
||||
|
||||
# Content Fields
|
||||
html: str # Raw HTML content
|
||||
# Example: "<html><body>...</body></html>"
|
||||
|
||||
cleaned_html: Optional[str] # HTML after cleaning and processing
|
||||
# Example: "<article><p>Clean content...</p></article>"
|
||||
|
||||
fit_html: Optional[str] # Most relevant HTML content after content cleaning strategy
|
||||
# Example: "<div><p>Most relevant content...</p></div>"
|
||||
|
||||
markdown: Optional[str] # HTML converted to markdown
|
||||
# Example: "# Title\n\nContent paragraph..."
|
||||
|
||||
fit_markdown: Optional[str] # Most relevant content in markdown
|
||||
# Example: "# Main Article\n\nKey content..."
|
||||
|
||||
# Media Content
|
||||
media: Dict[str, List[Dict]] = {} # Extracted media information
|
||||
# Example: {
|
||||
# "images": [
|
||||
# {
|
||||
# "src": "https://example.com/image.jpg",
|
||||
# "alt": "Image description",
|
||||
# "desc": "Contextual description",
|
||||
# "score": 5, # Relevance score
|
||||
# "type": "image"
|
||||
# }
|
||||
# ],
|
||||
# "videos": [
|
||||
# {
|
||||
# "src": "https://example.com/video.mp4",
|
||||
# "alt": "Video title",
|
||||
# "type": "video",
|
||||
# "description": "Video context"
|
||||
# }
|
||||
# ],
|
||||
# "audios": [
|
||||
# {
|
||||
# "src": "https://example.com/audio.mp3",
|
||||
# "alt": "Audio title",
|
||||
# "type": "audio",
|
||||
# "description": "Audio context"
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
|
||||
# Link Information
|
||||
links: Dict[str, List[Dict]] = {} # Extracted links
|
||||
# Example: {
|
||||
# "internal": [
|
||||
# {
|
||||
# "href": "https://example.com/page",
|
||||
# "text": "Link text",
|
||||
# "title": "Link title"
|
||||
# }
|
||||
# ],
|
||||
# "external": [
|
||||
# {
|
||||
# "href": "https://external.com",
|
||||
# "text": "External link text",
|
||||
# "title": "External link title"
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
|
||||
# Extraction Results
|
||||
extracted_content: Optional[str] # Content from extraction strategy
|
||||
# Example for JsonCssExtractionStrategy:
|
||||
# '[{"title": "Article 1", "date": "2024-03-20"}, ...]'
|
||||
# Example for LLMExtractionStrategy:
|
||||
# '{"entities": [...], "relationships": [...]}'
|
||||
|
||||
# Additional Information
|
||||
metadata: Optional[dict] = None # Page metadata
|
||||
# Example: {
|
||||
# "title": "Page Title",
|
||||
# "description": "Meta description",
|
||||
# "keywords": ["keyword1", "keyword2"],
|
||||
# "author": "Author Name",
|
||||
# "published_date": "2024-03-20"
|
||||
# }
|
||||
|
||||
screenshot: Optional[str] = None # Base64 encoded screenshot
|
||||
# Example: "iVBORw0KGgoAAAANSUhEUgAA..."
|
||||
|
||||
error_message: Optional[str] = None # Error message if crawl failed
|
||||
# Example: "Failed to load page: timeout"
|
||||
|
||||
session_id: Optional[str] = None # Session identifier
|
||||
# Example: "session_123456"
|
||||
|
||||
response_headers: Optional[dict] = None # HTTP response headers
|
||||
# Example: {
|
||||
# "content-type": "text/html",
|
||||
# "server": "nginx/1.18.0",
|
||||
# "date": "Wed, 20 Mar 2024 12:00:00 GMT"
|
||||
# }
|
||||
```
|
||||
|
||||
### Common Usage Patterns:
|
||||
|
||||
1. Basic Content Extraction:
|
||||
```python
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print(result.markdown) # Clean, readable content
|
||||
print(result.cleaned_html) # Cleaned HTML
|
||||
```
|
||||
|
||||
2. Media Analysis:
|
||||
```python
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
for image in result.media["images"]:
|
||||
if image["score"] > 3: # High-relevance images
|
||||
print(f"High-quality image: {image['src']}")
|
||||
```
|
||||
|
||||
3. Link Analysis:
|
||||
```python
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
internal_links = [link["href"] for link in result.links["internal"]]
|
||||
external_links = [link["href"] for link in result.links["external"]]
|
||||
```
|
||||
|
||||
4. Structured Data Extraction:
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
extraction_strategy=my_strategy
|
||||
)
|
||||
structured_data = json.loads(result.extracted_content)
|
||||
```
|
||||
|
||||
5. Error Handling:
|
||||
```python
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
if not result.success:
|
||||
print(f"Crawl failed: {result.error_message}")
|
||||
print(f"Status code: {result.status_code}")
|
||||
```
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
1. **E-commerce Product Monitor**
|
||||
- Scraping product details from multiple e-commerce sites
|
||||
- Price tracking with structured data extraction
|
||||
- Handling dynamic content and anti-bot measures
|
||||
- Features: JsonCssExtraction, session management, anti-bot
|
||||
|
||||
2. **News Aggregator & Summarizer**
|
||||
- Crawling news websites
|
||||
- Content extraction and summarization
|
||||
- Topic classification
|
||||
- Features: LLMExtraction, CosineStrategy, content cleaning
|
||||
|
||||
3. **Academic Paper Research Assistant**
|
||||
- Crawling research papers from academic sites
|
||||
- Extracting citations and references
|
||||
- Building knowledge graphs
|
||||
- Features: structured extraction, link analysis, chunking
|
||||
|
||||
4. **Social Media Content Analyzer**
|
||||
- Handling JavaScript-heavy sites
|
||||
- Dynamic content loading
|
||||
- Sentiment analysis integration
|
||||
- Features: dynamic content handling, session management
|
||||
|
||||
5. **Real Estate Market Analyzer**
|
||||
- Scraping property listings
|
||||
- Processing image galleries
|
||||
- Geolocation data extraction
|
||||
- Features: media handling, structured data extraction
|
||||
|
||||
6. **Documentation Site Generator**
|
||||
- Recursive website crawling
|
||||
- Markdown generation
|
||||
- Link validation
|
||||
- Features: website crawling, content cleaning
|
||||
|
||||
7. **Job Board Aggregator**
|
||||
- Handling pagination
|
||||
- Structured job data extraction
|
||||
- Filtering and categorization
|
||||
- Features: session management, JsonCssExtraction
|
||||
|
||||
8. **Recipe Database Builder**
|
||||
- Schema-based extraction
|
||||
- Image processing
|
||||
- Ingredient parsing
|
||||
- Features: structured extraction, media handling
|
||||
|
||||
9. **Travel Blog Content Analyzer**
|
||||
- Location extraction
|
||||
- Image and map processing
|
||||
- Content categorization
|
||||
- Features: CosineStrategy, media handling
|
||||
|
||||
10. **Technical Documentation Scraper**
|
||||
- API documentation extraction
|
||||
- Code snippet processing
|
||||
- Version tracking
|
||||
- Features: content cleaning, structured extraction
|
||||
|
||||
Each example will include:
|
||||
- Problem description
|
||||
- Technical requirements
|
||||
- Complete implementation
|
||||
- Error handling
|
||||
- Output processing
|
||||
- Performance considerations
|
||||
110
docs/examples/amazon_product_extraction_direct_url.py
Normal file
110
docs/examples/amazon_product_extraction_direct_url.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""
|
||||
This example demonstrates how to use JSON CSS extraction to scrape product information
|
||||
from Amazon search results. It shows how to extract structured data like product titles,
|
||||
prices, ratings, and other details using CSS selectors.
|
||||
"""
|
||||
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||
import json
|
||||
|
||||
|
||||
async def extract_amazon_products():
|
||||
# Initialize browser config
|
||||
browser_config = BrowserConfig(browser_type="chromium", headless=True)
|
||||
|
||||
# Initialize crawler config with JSON CSS extraction strategy
|
||||
crawler_config = CrawlerRunConfig(
|
||||
extraction_strategy=JsonCssExtractionStrategy(
|
||||
schema={
|
||||
"name": "Amazon Product Search Results",
|
||||
"baseSelector": "[data-component-type='s-search-result']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "asin",
|
||||
"selector": "",
|
||||
"type": "attribute",
|
||||
"attribute": "data-asin",
|
||||
},
|
||||
{"name": "title", "selector": "h2 a span", "type": "text"},
|
||||
{
|
||||
"name": "url",
|
||||
"selector": "h2 a",
|
||||
"type": "attribute",
|
||||
"attribute": "href",
|
||||
},
|
||||
{
|
||||
"name": "image",
|
||||
"selector": ".s-image",
|
||||
"type": "attribute",
|
||||
"attribute": "src",
|
||||
},
|
||||
{
|
||||
"name": "rating",
|
||||
"selector": ".a-icon-star-small .a-icon-alt",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "reviews_count",
|
||||
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".a-price .a-offscreen",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "original_price",
|
||||
"selector": ".a-price.a-text-price .a-offscreen",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "sponsored",
|
||||
"selector": ".puis-sponsored-label-text",
|
||||
"type": "exists",
|
||||
},
|
||||
{
|
||||
"name": "delivery_info",
|
||||
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
||||
"type": "text",
|
||||
"multiple": True,
|
||||
},
|
||||
],
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
# Example search URL (you should replace with your actual Amazon URL)
|
||||
url = "https://www.amazon.com/s?k=Samsung+Galaxy+Tab"
|
||||
|
||||
# Use context manager for proper resource handling
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
# Extract the data
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
|
||||
# Process and print the results
|
||||
if result and result.extracted_content:
|
||||
# Parse the JSON string into a list of products
|
||||
products = json.loads(result.extracted_content)
|
||||
|
||||
# Process each product in the list
|
||||
for product in products:
|
||||
print("\nProduct Details:")
|
||||
print(f"ASIN: {product.get('asin')}")
|
||||
print(f"Title: {product.get('title')}")
|
||||
print(f"Price: {product.get('price')}")
|
||||
print(f"Original Price: {product.get('original_price')}")
|
||||
print(f"Rating: {product.get('rating')}")
|
||||
print(f"Reviews: {product.get('reviews_count')}")
|
||||
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
||||
if product.get("delivery_info"):
|
||||
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
||||
print("-" * 80)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
asyncio.run(extract_amazon_products())
|
||||
150
docs/examples/amazon_product_extraction_using_hooks.py
Normal file
150
docs/examples/amazon_product_extraction_using_hooks.py
Normal file
@@ -0,0 +1,150 @@
|
||||
"""
|
||||
This example demonstrates how to use JSON CSS extraction to scrape product information
|
||||
from Amazon search results. It shows how to extract structured data like product titles,
|
||||
prices, ratings, and other details using CSS selectors.
|
||||
"""
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||
import json
|
||||
from playwright.async_api import Page, BrowserContext
|
||||
|
||||
|
||||
async def extract_amazon_products():
|
||||
# Initialize browser config
|
||||
browser_config = BrowserConfig(
|
||||
# browser_type="chromium",
|
||||
headless=True
|
||||
)
|
||||
|
||||
# Initialize crawler config with JSON CSS extraction strategy nav-search-submit-button
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=JsonCssExtractionStrategy(
|
||||
schema={
|
||||
"name": "Amazon Product Search Results",
|
||||
"baseSelector": "[data-component-type='s-search-result']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "asin",
|
||||
"selector": "",
|
||||
"type": "attribute",
|
||||
"attribute": "data-asin",
|
||||
},
|
||||
{"name": "title", "selector": "h2 a span", "type": "text"},
|
||||
{
|
||||
"name": "url",
|
||||
"selector": "h2 a",
|
||||
"type": "attribute",
|
||||
"attribute": "href",
|
||||
},
|
||||
{
|
||||
"name": "image",
|
||||
"selector": ".s-image",
|
||||
"type": "attribute",
|
||||
"attribute": "src",
|
||||
},
|
||||
{
|
||||
"name": "rating",
|
||||
"selector": ".a-icon-star-small .a-icon-alt",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "reviews_count",
|
||||
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".a-price .a-offscreen",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "original_price",
|
||||
"selector": ".a-price.a-text-price .a-offscreen",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "sponsored",
|
||||
"selector": ".puis-sponsored-label-text",
|
||||
"type": "exists",
|
||||
},
|
||||
{
|
||||
"name": "delivery_info",
|
||||
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
||||
"type": "text",
|
||||
"multiple": True,
|
||||
},
|
||||
],
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
url = "https://www.amazon.com/"
|
||||
|
||||
async def after_goto(
|
||||
page: Page, context: BrowserContext, url: str, response: dict, **kwargs
|
||||
):
|
||||
"""Hook called after navigating to each URL"""
|
||||
print(f"[HOOK] after_goto - Successfully loaded: {url}")
|
||||
|
||||
try:
|
||||
# Wait for search box to be available
|
||||
search_box = await page.wait_for_selector(
|
||||
"#twotabsearchtextbox", timeout=1000
|
||||
)
|
||||
|
||||
# Type the search query
|
||||
await search_box.fill("Samsung Galaxy Tab")
|
||||
|
||||
# Get the search button and prepare for navigation
|
||||
search_button = await page.wait_for_selector(
|
||||
"#nav-search-submit-button", timeout=1000
|
||||
)
|
||||
|
||||
# Click with navigation waiting
|
||||
await search_button.click()
|
||||
|
||||
# Wait for search results to load
|
||||
await page.wait_for_selector(
|
||||
'[data-component-type="s-search-result"]', timeout=10000
|
||||
)
|
||||
print("[HOOK] Search completed and results loaded!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[HOOK] Error during search operation: {str(e)}")
|
||||
|
||||
return page
|
||||
|
||||
# Use context manager for proper resource handling
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler.crawler_strategy.set_hook("after_goto", after_goto)
|
||||
|
||||
# Extract the data
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
|
||||
# Process and print the results
|
||||
if result and result.extracted_content:
|
||||
# Parse the JSON string into a list of products
|
||||
products = json.loads(result.extracted_content)
|
||||
|
||||
# Process each product in the list
|
||||
for product in products:
|
||||
print("\nProduct Details:")
|
||||
print(f"ASIN: {product.get('asin')}")
|
||||
print(f"Title: {product.get('title')}")
|
||||
print(f"Price: {product.get('price')}")
|
||||
print(f"Original Price: {product.get('original_price')}")
|
||||
print(f"Rating: {product.get('rating')}")
|
||||
print(f"Reviews: {product.get('reviews_count')}")
|
||||
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
||||
if product.get("delivery_info"):
|
||||
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
||||
print("-" * 80)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
asyncio.run(extract_amazon_products())
|
||||
126
docs/examples/amazon_product_extraction_using_use_javascript.py
Normal file
126
docs/examples/amazon_product_extraction_using_use_javascript.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""
|
||||
This example demonstrates how to use JSON CSS extraction to scrape product information
|
||||
from Amazon search results. It shows how to extract structured data like product titles,
|
||||
prices, ratings, and other details using CSS selectors.
|
||||
"""
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||
import json
|
||||
|
||||
|
||||
async def extract_amazon_products():
|
||||
# Initialize browser config
|
||||
browser_config = BrowserConfig(
|
||||
# browser_type="chromium",
|
||||
headless=True
|
||||
)
|
||||
|
||||
js_code_to_search = """
|
||||
const task = async () => {
|
||||
document.querySelector('#twotabsearchtextbox').value = 'Samsung Galaxy Tab';
|
||||
document.querySelector('#nav-search-submit-button').click();
|
||||
}
|
||||
await task();
|
||||
"""
|
||||
js_code_to_search_sync = """
|
||||
document.querySelector('#twotabsearchtextbox').value = 'Samsung Galaxy Tab';
|
||||
document.querySelector('#nav-search-submit-button').click();
|
||||
"""
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
js_code=js_code_to_search,
|
||||
wait_for='css:[data-component-type="s-search-result"]',
|
||||
extraction_strategy=JsonCssExtractionStrategy(
|
||||
schema={
|
||||
"name": "Amazon Product Search Results",
|
||||
"baseSelector": "[data-component-type='s-search-result']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "asin",
|
||||
"selector": "",
|
||||
"type": "attribute",
|
||||
"attribute": "data-asin",
|
||||
},
|
||||
{"name": "title", "selector": "h2 a span", "type": "text"},
|
||||
{
|
||||
"name": "url",
|
||||
"selector": "h2 a",
|
||||
"type": "attribute",
|
||||
"attribute": "href",
|
||||
},
|
||||
{
|
||||
"name": "image",
|
||||
"selector": ".s-image",
|
||||
"type": "attribute",
|
||||
"attribute": "src",
|
||||
},
|
||||
{
|
||||
"name": "rating",
|
||||
"selector": ".a-icon-star-small .a-icon-alt",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "reviews_count",
|
||||
"selector": "[data-csa-c-func-deps='aui-da-a-popover'] ~ span span",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".a-price .a-offscreen",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "original_price",
|
||||
"selector": ".a-price.a-text-price .a-offscreen",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "sponsored",
|
||||
"selector": ".puis-sponsored-label-text",
|
||||
"type": "exists",
|
||||
},
|
||||
{
|
||||
"name": "delivery_info",
|
||||
"selector": "[data-cy='delivery-recipe'] .a-color-base",
|
||||
"type": "text",
|
||||
"multiple": True,
|
||||
},
|
||||
],
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
# Example search URL (you should replace with your actual Amazon URL)
|
||||
url = "https://www.amazon.com/"
|
||||
|
||||
# Use context manager for proper resource handling
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
# Extract the data
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
|
||||
# Process and print the results
|
||||
if result and result.extracted_content:
|
||||
# Parse the JSON string into a list of products
|
||||
products = json.loads(result.extracted_content)
|
||||
|
||||
# Process each product in the list
|
||||
for product in products:
|
||||
print("\nProduct Details:")
|
||||
print(f"ASIN: {product.get('asin')}")
|
||||
print(f"Title: {product.get('title')}")
|
||||
print(f"Price: {product.get('price')}")
|
||||
print(f"Original Price: {product.get('original_price')}")
|
||||
print(f"Rating: {product.get('rating')}")
|
||||
print(f"Reviews: {product.get('reviews_count')}")
|
||||
print(f"Sponsored: {'Yes' if product.get('sponsored') else 'No'}")
|
||||
if product.get("delivery_info"):
|
||||
print(f"Delivery: {' '.join(product['delivery_info'])}")
|
||||
print("-" * 80)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
asyncio.run(extract_amazon_products())
|
||||
@@ -1,12 +1,16 @@
|
||||
# File: async_webcrawler_multiple_urls_example.py
|
||||
import os, sys
|
||||
|
||||
# append 2 parent directories to sys.path to import crawl4ai
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
parent_dir = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
|
||||
async def main():
|
||||
# Initialize the AsyncWebCrawler
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
@@ -16,7 +20,7 @@ async def main():
|
||||
"https://python.org",
|
||||
"https://github.com",
|
||||
"https://stackoverflow.com",
|
||||
"https://news.ycombinator.com"
|
||||
"https://news.ycombinator.com",
|
||||
]
|
||||
|
||||
# Set up crawling parameters
|
||||
@@ -27,7 +31,7 @@ async def main():
|
||||
urls=urls,
|
||||
word_count_threshold=word_count_threshold,
|
||||
bypass_cache=True,
|
||||
verbose=True
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Process the results
|
||||
@@ -36,7 +40,9 @@ async def main():
|
||||
print(f"Successfully crawled: {result.url}")
|
||||
print(f"Title: {result.metadata.get('title', 'N/A')}")
|
||||
print(f"Word count: {len(result.markdown.split())}")
|
||||
print(f"Number of links: {len(result.links.get('internal', [])) + len(result.links.get('external', []))}")
|
||||
print(
|
||||
f"Number of links: {len(result.links.get('internal', [])) + len(result.links.get('external', []))}"
|
||||
)
|
||||
print(f"Number of images: {len(result.media.get('images', []))}")
|
||||
print("---")
|
||||
else:
|
||||
@@ -44,5 +50,6 @@ async def main():
|
||||
print(f"Error: {result.error_message}")
|
||||
print("---")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
asyncio.run(main())
|
||||
|
||||
126
docs/examples/browser_optimization_example.py
Normal file
126
docs/examples/browser_optimization_example.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""
|
||||
This example demonstrates optimal browser usage patterns in Crawl4AI:
|
||||
1. Sequential crawling with session reuse
|
||||
2. Parallel crawling with browser instance reuse
|
||||
3. Performance optimization settings
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import List
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
|
||||
|
||||
async def crawl_sequential(urls: List[str]):
|
||||
"""
|
||||
Sequential crawling using session reuse - most efficient for moderate workloads
|
||||
"""
|
||||
print("\n=== Sequential Crawling with Session Reuse ===")
|
||||
|
||||
# Configure browser with optimized settings
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
browser_args=[
|
||||
"--disable-gpu", # Disable GPU acceleration
|
||||
"--disable-dev-shm-usage", # Disable /dev/shm usage
|
||||
"--no-sandbox", # Required for Docker
|
||||
],
|
||||
viewport={
|
||||
"width": 800,
|
||||
"height": 600,
|
||||
}, # Smaller viewport for better performance
|
||||
)
|
||||
|
||||
# Configure crawl settings
|
||||
crawl_config = CrawlerRunConfig(
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=PruningContentFilter(), In case you need fit_markdown
|
||||
),
|
||||
)
|
||||
|
||||
# Create single crawler instance
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
|
||||
try:
|
||||
session_id = "session1" # Use same session for all URLs
|
||||
for url in urls:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
config=crawl_config,
|
||||
session_id=session_id, # Reuse same browser tab
|
||||
)
|
||||
if result.success:
|
||||
print(f"Successfully crawled {url}")
|
||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)}")
|
||||
finally:
|
||||
await crawler.close()
|
||||
|
||||
|
||||
async def crawl_parallel(urls: List[str], max_concurrent: int = 3):
|
||||
"""
|
||||
Parallel crawling while reusing browser instance - best for large workloads
|
||||
"""
|
||||
print("\n=== Parallel Crawling with Browser Reuse ===")
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
browser_args=["--disable-gpu", "--disable-dev-shm-usage", "--no-sandbox"],
|
||||
viewport={"width": 800, "height": 600},
|
||||
)
|
||||
|
||||
crawl_config = CrawlerRunConfig(
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=PruningContentFilter(), In case you need fit_markdown
|
||||
),
|
||||
)
|
||||
|
||||
# Create single crawler instance for all parallel tasks
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
|
||||
try:
|
||||
# Create tasks in batches to control concurrency
|
||||
for i in range(0, len(urls), max_concurrent):
|
||||
batch = urls[i : i + max_concurrent]
|
||||
tasks = []
|
||||
|
||||
for j, url in enumerate(batch):
|
||||
session_id = (
|
||||
f"parallel_session_{j}" # Different session per concurrent task
|
||||
)
|
||||
task = crawler.arun(url=url, config=crawl_config, session_id=session_id)
|
||||
tasks.append(task)
|
||||
|
||||
# Wait for batch to complete
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Process results
|
||||
for url, result in zip(batch, results):
|
||||
if isinstance(result, Exception):
|
||||
print(f"Error crawling {url}: {str(result)}")
|
||||
elif result.success:
|
||||
print(f"Successfully crawled {url}")
|
||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)}")
|
||||
finally:
|
||||
await crawler.close()
|
||||
|
||||
|
||||
async def main():
|
||||
# Example URLs
|
||||
urls = [
|
||||
"https://example.com/page1",
|
||||
"https://example.com/page2",
|
||||
"https://example.com/page3",
|
||||
"https://example.com/page4",
|
||||
]
|
||||
|
||||
# Demo sequential crawling
|
||||
await crawl_sequential(urls)
|
||||
|
||||
# Demo parallel crawling
|
||||
await crawl_parallel(urls, max_concurrent=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,31 +1,32 @@
|
||||
import os, time
|
||||
|
||||
# append the path to the root of the project
|
||||
import sys
|
||||
import asyncio
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
|
||||
from firecrawl import FirecrawlApp
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
__data__ = os.path.join(os.path.dirname(__file__), '..', '..') + '/.data'
|
||||
|
||||
__data__ = os.path.join(os.path.dirname(__file__), "..", "..") + "/.data"
|
||||
|
||||
|
||||
async def compare():
|
||||
app = FirecrawlApp(api_key=os.environ['FIRECRAWL_API_KEY'])
|
||||
app = FirecrawlApp(api_key=os.environ["FIRECRAWL_API_KEY"])
|
||||
|
||||
# Tet Firecrawl with a simple crawl
|
||||
start = time.time()
|
||||
scrape_status = app.scrape_url(
|
||||
'https://www.nbcnews.com/business',
|
||||
params={'formats': ['markdown', 'html']}
|
||||
"https://www.nbcnews.com/business", params={"formats": ["markdown", "html"]}
|
||||
)
|
||||
end = time.time()
|
||||
print(f"Time taken: {end - start} seconds")
|
||||
print(len(scrape_status['markdown']))
|
||||
print(len(scrape_status["markdown"]))
|
||||
# save the markdown content with provider name
|
||||
with open(f"{__data__}/firecrawl_simple.md", "w") as f:
|
||||
f.write(scrape_status['markdown'])
|
||||
f.write(scrape_status["markdown"])
|
||||
# Count how many "cldnry.s-nbcnews.com" are in the markdown
|
||||
print(scrape_status['markdown'].count("cldnry.s-nbcnews.com"))
|
||||
|
||||
|
||||
print(scrape_status["markdown"].count("cldnry.s-nbcnews.com"))
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
start = time.time()
|
||||
@@ -33,13 +34,13 @@ async def compare():
|
||||
url="https://www.nbcnews.com/business",
|
||||
# js_code=["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"],
|
||||
word_count_threshold=0,
|
||||
bypass_cache=True,
|
||||
verbose=False
|
||||
bypass_cache=True,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print(f"Time taken: {end - start} seconds")
|
||||
print(len(result.markdown))
|
||||
# save the markdown content with provider name
|
||||
# save the markdown content with provider name
|
||||
with open(f"{__data__}/crawl4ai_simple.md", "w") as f:
|
||||
f.write(result.markdown)
|
||||
# count how many "cldnry.s-nbcnews.com" are in the markdown
|
||||
@@ -48,10 +49,12 @@ async def compare():
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
js_code=["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"],
|
||||
js_code=[
|
||||
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
|
||||
],
|
||||
word_count_threshold=0,
|
||||
bypass_cache=True,
|
||||
verbose=False
|
||||
bypass_cache=True,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print(f"Time taken: {end - start} seconds")
|
||||
@@ -61,7 +64,7 @@ async def compare():
|
||||
f.write(result.markdown)
|
||||
# count how many "cldnry.s-nbcnews.com" are in the markdown
|
||||
print(result.markdown.count("cldnry.s-nbcnews.com"))
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(compare())
|
||||
|
||||
136
docs/examples/dispatcher_example.py
Normal file
136
docs/examples/dispatcher_example.py
Normal file
@@ -0,0 +1,136 @@
|
||||
import asyncio
|
||||
import time
|
||||
from rich import print
|
||||
from rich.table import Table
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
RateLimiter,
|
||||
CrawlerMonitor,
|
||||
DisplayMode,
|
||||
CacheMode,
|
||||
LXMLWebScrapingStrategy,
|
||||
)
|
||||
|
||||
|
||||
async def memory_adaptive(urls, browser_config, run_config):
|
||||
"""Memory adaptive crawler with monitoring"""
|
||||
start = time.perf_counter()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=70.0,
|
||||
max_session_permit=10,
|
||||
monitor=CrawlerMonitor(
|
||||
max_visible_rows=15, display_mode=DisplayMode.DETAILED
|
||||
),
|
||||
)
|
||||
results = await crawler.arun_many(
|
||||
urls, config=run_config, dispatcher=dispatcher
|
||||
)
|
||||
duration = time.perf_counter() - start
|
||||
return len(results), duration
|
||||
|
||||
|
||||
async def memory_adaptive_with_rate_limit(urls, browser_config, run_config):
|
||||
"""Memory adaptive crawler with rate limiting"""
|
||||
start = time.perf_counter()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=70.0,
|
||||
max_session_permit=10,
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=(1.0, 2.0), max_delay=30.0, max_retries=2
|
||||
),
|
||||
monitor=CrawlerMonitor(
|
||||
max_visible_rows=15, display_mode=DisplayMode.DETAILED
|
||||
),
|
||||
)
|
||||
results = await crawler.arun_many(
|
||||
urls, config=run_config, dispatcher=dispatcher
|
||||
)
|
||||
duration = time.perf_counter() - start
|
||||
return len(results), duration
|
||||
|
||||
|
||||
async def semaphore(urls, browser_config, run_config):
|
||||
"""Basic semaphore crawler"""
|
||||
start = time.perf_counter()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
dispatcher = SemaphoreDispatcher(
|
||||
semaphore_count=5,
|
||||
monitor=CrawlerMonitor(
|
||||
max_visible_rows=15, display_mode=DisplayMode.DETAILED
|
||||
),
|
||||
)
|
||||
results = await crawler.arun_many(
|
||||
urls, config=run_config, dispatcher=dispatcher
|
||||
)
|
||||
duration = time.perf_counter() - start
|
||||
return len(results), duration
|
||||
|
||||
|
||||
async def semaphore_with_rate_limit(urls, browser_config, run_config):
|
||||
"""Semaphore crawler with rate limiting"""
|
||||
start = time.perf_counter()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
dispatcher = SemaphoreDispatcher(
|
||||
semaphore_count=5,
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=(1.0, 2.0), max_delay=30.0, max_retries=2
|
||||
),
|
||||
monitor=CrawlerMonitor(
|
||||
max_visible_rows=15, display_mode=DisplayMode.DETAILED
|
||||
),
|
||||
)
|
||||
results = await crawler.arun_many(
|
||||
urls, config=run_config, dispatcher=dispatcher
|
||||
)
|
||||
duration = time.perf_counter() - start
|
||||
return len(results), duration
|
||||
|
||||
|
||||
def create_performance_table(results):
|
||||
"""Creates a rich table showing performance results"""
|
||||
table = Table(title="Crawler Strategy Performance Comparison")
|
||||
table.add_column("Strategy", style="cyan")
|
||||
table.add_column("URLs Crawled", justify="right", style="green")
|
||||
table.add_column("Time (seconds)", justify="right", style="yellow")
|
||||
table.add_column("URLs/second", justify="right", style="magenta")
|
||||
|
||||
sorted_results = sorted(results.items(), key=lambda x: x[1][1])
|
||||
|
||||
for strategy, (urls_crawled, duration) in sorted_results:
|
||||
urls_per_second = urls_crawled / duration
|
||||
table.add_row(
|
||||
strategy, str(urls_crawled), f"{duration:.2f}", f"{urls_per_second:.2f}"
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
|
||||
async def main():
|
||||
urls = [f"https://example.com/page{i}" for i in range(1, 40)]
|
||||
browser_config = BrowserConfig(headless=True, verbose=False)
|
||||
run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, scraping_strategy=LXMLWebScrapingStrategy())
|
||||
|
||||
results = {
|
||||
"Memory Adaptive": await memory_adaptive(urls, browser_config, run_config),
|
||||
# "Memory Adaptive + Rate Limit": await memory_adaptive_with_rate_limit(
|
||||
# urls, browser_config, run_config
|
||||
# ),
|
||||
# "Semaphore": await semaphore(urls, browser_config, run_config),
|
||||
# "Semaphore + Rate Limit": await semaphore_with_rate_limit(
|
||||
# urls, browser_config, run_config
|
||||
# ),
|
||||
}
|
||||
|
||||
table = create_performance_table(results)
|
||||
print("\nPerformance Summary:")
|
||||
print(table)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
372
docs/examples/docker_example.py
Normal file
372
docs/examples/docker_example.py
Normal file
@@ -0,0 +1,372 @@
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
import base64
|
||||
import os
|
||||
from typing import Dict, Any
|
||||
|
||||
|
||||
class Crawl4AiTester:
|
||||
def __init__(self, base_url: str = "http://localhost:11235", api_token: str = None):
|
||||
self.base_url = base_url
|
||||
self.api_token = (
|
||||
api_token or os.getenv("CRAWL4AI_API_TOKEN") or "test_api_code"
|
||||
) # Check environment variable as fallback
|
||||
self.headers = (
|
||||
{"Authorization": f"Bearer {self.api_token}"} if self.api_token else {}
|
||||
)
|
||||
|
||||
def submit_and_wait(
|
||||
self, request_data: Dict[str, Any], timeout: int = 300
|
||||
) -> Dict[str, Any]:
|
||||
# Submit crawl job
|
||||
response = requests.post(
|
||||
f"{self.base_url}/crawl", json=request_data, headers=self.headers
|
||||
)
|
||||
if response.status_code == 403:
|
||||
raise Exception("API token is invalid or missing")
|
||||
task_id = response.json()["task_id"]
|
||||
print(f"Task ID: {task_id}")
|
||||
|
||||
# Poll for result
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if time.time() - start_time > timeout:
|
||||
raise TimeoutError(
|
||||
f"Task {task_id} did not complete within {timeout} seconds"
|
||||
)
|
||||
|
||||
result = requests.get(
|
||||
f"{self.base_url}/task/{task_id}", headers=self.headers
|
||||
)
|
||||
status = result.json()
|
||||
|
||||
if status["status"] == "failed":
|
||||
print("Task failed:", status.get("error"))
|
||||
raise Exception(f"Task failed: {status.get('error')}")
|
||||
|
||||
if status["status"] == "completed":
|
||||
return status
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
def submit_sync(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
response = requests.post(
|
||||
f"{self.base_url}/crawl_sync",
|
||||
json=request_data,
|
||||
headers=self.headers,
|
||||
timeout=60,
|
||||
)
|
||||
if response.status_code == 408:
|
||||
raise TimeoutError("Task did not complete within server timeout")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def crawl_direct(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Directly crawl without using task queue"""
|
||||
response = requests.post(
|
||||
f"{self.base_url}/crawl_direct", json=request_data, headers=self.headers
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
|
||||
def test_docker_deployment(version="basic"):
|
||||
tester = Crawl4AiTester(
|
||||
base_url="http://localhost:11235",
|
||||
# base_url="https://api.crawl4ai.com" # just for example
|
||||
# api_token="test" # just for example
|
||||
)
|
||||
print(f"Testing Crawl4AI Docker {version} version")
|
||||
|
||||
# Health check with timeout and retry
|
||||
max_retries = 5
|
||||
for i in range(max_retries):
|
||||
try:
|
||||
health = requests.get(f"{tester.base_url}/health", timeout=10)
|
||||
print("Health check:", health.json())
|
||||
break
|
||||
except requests.exceptions.RequestException:
|
||||
if i == max_retries - 1:
|
||||
print(f"Failed to connect after {max_retries} attempts")
|
||||
sys.exit(1)
|
||||
print(f"Waiting for service to start (attempt {i+1}/{max_retries})...")
|
||||
time.sleep(5)
|
||||
|
||||
# Test cases based on version
|
||||
test_basic_crawl_direct(tester)
|
||||
test_basic_crawl(tester)
|
||||
test_basic_crawl(tester)
|
||||
test_basic_crawl_sync(tester)
|
||||
|
||||
if version in ["full", "transformer"]:
|
||||
test_cosine_extraction(tester)
|
||||
|
||||
test_js_execution(tester)
|
||||
test_css_selector(tester)
|
||||
test_structured_extraction(tester)
|
||||
test_llm_extraction(tester)
|
||||
test_llm_with_ollama(tester)
|
||||
test_screenshot(tester)
|
||||
|
||||
|
||||
def test_basic_crawl(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Basic Crawl ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10,
|
||||
"session_id": "test",
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print(f"Basic crawl result length: {len(result['result']['markdown'])}")
|
||||
assert result["result"]["success"]
|
||||
assert len(result["result"]["markdown"]) > 0
|
||||
|
||||
|
||||
def test_basic_crawl_sync(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Basic Crawl (Sync) ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10,
|
||||
"session_id": "test",
|
||||
}
|
||||
|
||||
result = tester.submit_sync(request)
|
||||
print(f"Basic crawl result length: {len(result['result']['markdown'])}")
|
||||
assert result["status"] == "completed"
|
||||
assert result["result"]["success"]
|
||||
assert len(result["result"]["markdown"]) > 0
|
||||
|
||||
|
||||
def test_basic_crawl_direct(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Basic Crawl (Direct) ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10,
|
||||
# "session_id": "test"
|
||||
"cache_mode": "bypass", # or "enabled", "disabled", "read_only", "write_only"
|
||||
}
|
||||
|
||||
result = tester.crawl_direct(request)
|
||||
print(f"Basic crawl result length: {len(result['result']['markdown'])}")
|
||||
assert result["result"]["success"]
|
||||
assert len(result["result"]["markdown"]) > 0
|
||||
|
||||
|
||||
def test_js_execution(tester: Crawl4AiTester):
|
||||
print("\n=== Testing JS Execution ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 8,
|
||||
"js_code": [
|
||||
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
|
||||
],
|
||||
"wait_for": "article.tease-card:nth-child(10)",
|
||||
"crawler_params": {"headless": True},
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print(f"JS execution result length: {len(result['result']['markdown'])}")
|
||||
assert result["result"]["success"]
|
||||
|
||||
|
||||
def test_css_selector(tester: Crawl4AiTester):
|
||||
print("\n=== Testing CSS Selector ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 7,
|
||||
"css_selector": ".wide-tease-item__description",
|
||||
"crawler_params": {"headless": True},
|
||||
"extra": {"word_count_threshold": 10},
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print(f"CSS selector result length: {len(result['result']['markdown'])}")
|
||||
assert result["result"]["success"]
|
||||
|
||||
|
||||
def test_structured_extraction(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Structured Extraction ===")
|
||||
schema = {
|
||||
"name": "Coinbase Crypto Prices",
|
||||
"baseSelector": ".cds-tableRow-t45thuk",
|
||||
"fields": [
|
||||
{
|
||||
"name": "crypto",
|
||||
"selector": "td:nth-child(1) h2",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "symbol",
|
||||
"selector": "td:nth-child(1) p",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": "td:nth-child(2)",
|
||||
"type": "text",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
request = {
|
||||
"urls": "https://www.coinbase.com/explore",
|
||||
"priority": 9,
|
||||
"extraction_config": {"type": "json_css", "params": {"schema": schema}},
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print(f"Extracted {len(extracted)} items")
|
||||
print("Sample item:", json.dumps(extracted[0], indent=2))
|
||||
assert result["result"]["success"]
|
||||
assert len(extracted) > 0
|
||||
|
||||
|
||||
def test_llm_extraction(tester: Crawl4AiTester):
|
||||
print("\n=== Testing LLM Extraction ===")
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the OpenAI model.",
|
||||
},
|
||||
"input_fee": {
|
||||
"type": "string",
|
||||
"description": "Fee for input token for the OpenAI model.",
|
||||
},
|
||||
"output_fee": {
|
||||
"type": "string",
|
||||
"description": "Fee for output token for the OpenAI model.",
|
||||
},
|
||||
},
|
||||
"required": ["model_name", "input_fee", "output_fee"],
|
||||
}
|
||||
|
||||
request = {
|
||||
"urls": "https://openai.com/api/pricing",
|
||||
"priority": 8,
|
||||
"extraction_config": {
|
||||
"type": "llm",
|
||||
"params": {
|
||||
"provider": "openai/gpt-4o-mini",
|
||||
"api_token": os.getenv("OPENAI_API_KEY"),
|
||||
"schema": schema,
|
||||
"extraction_type": "schema",
|
||||
"instruction": """From the crawled content, extract all mentioned model names along with their fees for input and output tokens.""",
|
||||
},
|
||||
},
|
||||
"crawler_params": {"word_count_threshold": 1},
|
||||
}
|
||||
|
||||
try:
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print(f"Extracted {len(extracted)} model pricing entries")
|
||||
print("Sample entry:", json.dumps(extracted[0], indent=2))
|
||||
assert result["result"]["success"]
|
||||
except Exception as e:
|
||||
print(f"LLM extraction test failed (might be due to missing API key): {str(e)}")
|
||||
|
||||
|
||||
def test_llm_with_ollama(tester: Crawl4AiTester):
|
||||
print("\n=== Testing LLM with Ollama ===")
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"article_title": {
|
||||
"type": "string",
|
||||
"description": "The main title of the news article",
|
||||
},
|
||||
"summary": {
|
||||
"type": "string",
|
||||
"description": "A brief summary of the article content",
|
||||
},
|
||||
"main_topics": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Main topics or themes discussed in the article",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 8,
|
||||
"extraction_config": {
|
||||
"type": "llm",
|
||||
"params": {
|
||||
"provider": "ollama/llama2",
|
||||
"schema": schema,
|
||||
"extraction_type": "schema",
|
||||
"instruction": "Extract the main article information including title, summary, and main topics.",
|
||||
},
|
||||
},
|
||||
"extra": {"word_count_threshold": 1},
|
||||
"crawler_params": {"verbose": True},
|
||||
}
|
||||
|
||||
try:
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print("Extracted content:", json.dumps(extracted, indent=2))
|
||||
assert result["result"]["success"]
|
||||
except Exception as e:
|
||||
print(f"Ollama extraction test failed: {str(e)}")
|
||||
|
||||
|
||||
def test_cosine_extraction(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Cosine Extraction ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 8,
|
||||
"extraction_config": {
|
||||
"type": "cosine",
|
||||
"params": {
|
||||
"semantic_filter": "business finance economy",
|
||||
"word_count_threshold": 10,
|
||||
"max_dist": 0.2,
|
||||
"top_k": 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
try:
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print(f"Extracted {len(extracted)} text clusters")
|
||||
print("First cluster tags:", extracted[0]["tags"])
|
||||
assert result["result"]["success"]
|
||||
except Exception as e:
|
||||
print(f"Cosine extraction test failed: {str(e)}")
|
||||
|
||||
|
||||
def test_screenshot(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Screenshot ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 5,
|
||||
"screenshot": True,
|
||||
"crawler_params": {"headless": True},
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print("Screenshot captured:", bool(result["result"]["screenshot"]))
|
||||
|
||||
if result["result"]["screenshot"]:
|
||||
# Save screenshot
|
||||
screenshot_data = base64.b64decode(result["result"]["screenshot"])
|
||||
with open("test_screenshot.jpg", "wb") as f:
|
||||
f.write(screenshot_data)
|
||||
print("Screenshot saved as test_screenshot.jpg")
|
||||
|
||||
assert result["result"]["success"]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
version = sys.argv[1] if len(sys.argv) > 1 else "basic"
|
||||
# version = "full"
|
||||
test_docker_deployment(version)
|
||||
127
docs/examples/extraction_strategies_example.py
Normal file
127
docs/examples/extraction_strategies_example.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""
|
||||
Example demonstrating different extraction strategies with various input formats.
|
||||
This example shows how to:
|
||||
1. Use different input formats (markdown, HTML, fit_markdown)
|
||||
2. Work with JSON-based extractors (CSS and XPath)
|
||||
3. Use LLM-based extraction with different input formats
|
||||
4. Configure browser and crawler settings properly
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.extraction_strategy import (
|
||||
LLMExtractionStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy,
|
||||
)
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
|
||||
|
||||
async def run_extraction(crawler: AsyncWebCrawler, url: str, strategy, name: str):
|
||||
"""Helper function to run extraction with proper configuration"""
|
||||
try:
|
||||
# Configure the crawler run settings
|
||||
config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=strategy,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter() # For fit_markdown support
|
||||
),
|
||||
)
|
||||
|
||||
# Run the crawler
|
||||
result = await crawler.arun(url=url, config=config)
|
||||
|
||||
if result.success:
|
||||
print(f"\n=== {name} Results ===")
|
||||
print(f"Extracted Content: {result.extracted_content}")
|
||||
print(f"Raw Markdown Length: {len(result.markdown_v2.raw_markdown)}")
|
||||
print(
|
||||
f"Citations Markdown Length: {len(result.markdown_v2.markdown_with_citations)}"
|
||||
)
|
||||
else:
|
||||
print(f"Error in {name}: Crawl failed")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in {name}: {str(e)}")
|
||||
|
||||
|
||||
async def main():
|
||||
# Example URL (replace with actual URL)
|
||||
url = "https://example.com/product-page"
|
||||
|
||||
# Configure browser settings
|
||||
browser_config = BrowserConfig(headless=True, verbose=True)
|
||||
|
||||
# Initialize extraction strategies
|
||||
|
||||
# 1. LLM Extraction with different input formats
|
||||
markdown_strategy = LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o-mini",
|
||||
api_token=os.getenv("OPENAI_API_KEY"),
|
||||
instruction="Extract product information including name, price, and description",
|
||||
)
|
||||
|
||||
html_strategy = LLMExtractionStrategy(
|
||||
input_format="html",
|
||||
provider="openai/gpt-4o-mini",
|
||||
api_token=os.getenv("OPENAI_API_KEY"),
|
||||
instruction="Extract product information from HTML including structured data",
|
||||
)
|
||||
|
||||
fit_markdown_strategy = LLMExtractionStrategy(
|
||||
input_format="fit_markdown",
|
||||
provider="openai/gpt-4o-mini",
|
||||
api_token=os.getenv("OPENAI_API_KEY"),
|
||||
instruction="Extract product information from cleaned markdown",
|
||||
)
|
||||
|
||||
# 2. JSON CSS Extraction (automatically uses HTML input)
|
||||
css_schema = {
|
||||
"baseSelector": ".product",
|
||||
"fields": [
|
||||
{"name": "title", "selector": "h1.product-title", "type": "text"},
|
||||
{"name": "price", "selector": ".price", "type": "text"},
|
||||
{"name": "description", "selector": ".description", "type": "text"},
|
||||
],
|
||||
}
|
||||
css_strategy = JsonCssExtractionStrategy(schema=css_schema)
|
||||
|
||||
# 3. JSON XPath Extraction (automatically uses HTML input)
|
||||
xpath_schema = {
|
||||
"baseSelector": "//div[@class='product']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".//h1[@class='product-title']/text()",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".//span[@class='price']/text()",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "description",
|
||||
"selector": ".//div[@class='description']/text()",
|
||||
"type": "text",
|
||||
},
|
||||
],
|
||||
}
|
||||
xpath_strategy = JsonXPathExtractionStrategy(schema=xpath_schema)
|
||||
|
||||
# Use context manager for proper resource handling
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
# Run all strategies
|
||||
await run_extraction(crawler, url, markdown_strategy, "Markdown LLM")
|
||||
await run_extraction(crawler, url, html_strategy, "HTML LLM")
|
||||
await run_extraction(crawler, url, fit_markdown_strategy, "Fit Markdown LLM")
|
||||
await run_extraction(crawler, url, css_strategy, "CSS Extraction")
|
||||
await run_extraction(crawler, url, xpath_strategy, "XPath Extraction")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
58
docs/examples/full_page_screenshot_and_pdf_export.md
Normal file
58
docs/examples/full_page_screenshot_and_pdf_export.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Capturing Full-Page Screenshots and PDFs from Massive Webpages with Crawl4AI
|
||||
|
||||
When dealing with very long web pages, traditional full-page screenshots can be slow or fail entirely. For large pages (like extensive Wikipedia articles), generating a single massive screenshot often leads to delays, memory issues, or style differences.
|
||||
|
||||
**The New Approach:**
|
||||
We’ve introduced a new feature that effortlessly handles even the biggest pages by first exporting them as a PDF, then converting that PDF into a high-quality image. This approach leverages the browser’s built-in PDF rendering, making it both stable and efficient for very long content. You also have the option to directly save the PDF for your own usage—no need for multiple passes or complex stitching logic.
|
||||
|
||||
**Key Benefits:**
|
||||
- **Reliability:** The PDF export never times out and works regardless of page length.
|
||||
- **Versatility:** Get both the PDF and a screenshot in one crawl, without reloading or reprocessing.
|
||||
- **Performance:** Skips manual scrolling and stitching images, reducing complexity and runtime.
|
||||
|
||||
**Simple Example:**
|
||||
```python
|
||||
import os, sys
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
|
||||
# Adjust paths as needed
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Request both PDF and screenshot
|
||||
result = await crawler.arun(
|
||||
url='https://en.wikipedia.org/wiki/List_of_common_misconceptions',
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
pdf=True,
|
||||
screenshot=True
|
||||
)
|
||||
|
||||
if result.success:
|
||||
# Save screenshot
|
||||
if result.screenshot:
|
||||
from base64 import b64decode
|
||||
with open(os.path.join(__location__, "screenshot.png"), "wb") as f:
|
||||
f.write(b64decode(result.screenshot))
|
||||
|
||||
# Save PDF
|
||||
if result.pdf:
|
||||
pdf_bytes = b64decode(result.pdf)
|
||||
with open(os.path.join(__location__, "page.pdf"), "wb") as f:
|
||||
f.write(pdf_bytes)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
**What Happens Under the Hood:**
|
||||
- Crawl4AI navigates to the target page.
|
||||
- If `pdf=True`, it exports the current page as a full PDF, capturing all of its content no matter the length.
|
||||
- If `screenshot=True`, and a PDF is already available, it directly converts the first page of that PDF to an image for you—no repeated loading or scrolling.
|
||||
- Finally, you get your PDF and/or screenshot ready to use.
|
||||
|
||||
**Conclusion:**
|
||||
With this feature, Crawl4AI becomes even more robust and versatile for large-scale content extraction. Whether you need a PDF snapshot or a quick screenshot, you now have a reliable solution for even the most extensive webpages.
|
||||
23
docs/examples/hello_world.py
Normal file
23
docs/examples/hello_world.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(headless=True, verbose=True)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
)
|
||||
),
|
||||
)
|
||||
result = await crawler.arun(
|
||||
url="https://www.helloworld.org", config=crawler_config
|
||||
)
|
||||
print(result.markdown_v2.raw_markdown[:500])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
118
docs/examples/hooks_example.py
Normal file
118
docs/examples/hooks_example.py
Normal file
@@ -0,0 +1,118 @@
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from playwright.async_api import Page, BrowserContext
|
||||
|
||||
|
||||
async def main():
|
||||
print("🔗 Hooks Example: Demonstrating different hook use cases")
|
||||
|
||||
# Configure browser settings
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
|
||||
# Configure crawler settings
|
||||
crawler_run_config = CrawlerRunConfig(
|
||||
js_code="window.scrollTo(0, document.body.scrollHeight);",
|
||||
wait_for="body",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
# Create crawler instance
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
|
||||
# Define and set hook functions
|
||||
async def on_browser_created(browser, context: BrowserContext, **kwargs):
|
||||
"""Hook called after the browser is created"""
|
||||
print("[HOOK] on_browser_created - Browser is ready!")
|
||||
# Example: Set a cookie that will be used for all requests
|
||||
return browser
|
||||
|
||||
async def on_page_context_created(page: Page, context: BrowserContext, **kwargs):
|
||||
"""Hook called after a new page and context are created"""
|
||||
print("[HOOK] on_page_context_created - New page created!")
|
||||
# Example: Set default viewport size
|
||||
await context.add_cookies(
|
||||
[
|
||||
{
|
||||
"name": "session_id",
|
||||
"value": "example_session",
|
||||
"domain": ".example.com",
|
||||
"path": "/",
|
||||
}
|
||||
]
|
||||
)
|
||||
await page.set_viewport_size({"width": 1080, "height": 800})
|
||||
return page
|
||||
|
||||
async def on_user_agent_updated(
|
||||
page: Page, context: BrowserContext, user_agent: str, **kwargs
|
||||
):
|
||||
"""Hook called when the user agent is updated"""
|
||||
print(f"[HOOK] on_user_agent_updated - New user agent: {user_agent}")
|
||||
return page
|
||||
|
||||
async def on_execution_started(page: Page, context: BrowserContext, **kwargs):
|
||||
"""Hook called after custom JavaScript execution"""
|
||||
print("[HOOK] on_execution_started - Custom JS executed!")
|
||||
return page
|
||||
|
||||
async def before_goto(page: Page, context: BrowserContext, url: str, **kwargs):
|
||||
"""Hook called before navigating to each URL"""
|
||||
print(f"[HOOK] before_goto - About to visit: {url}")
|
||||
# Example: Add custom headers for the request
|
||||
await page.set_extra_http_headers({"Custom-Header": "my-value"})
|
||||
return page
|
||||
|
||||
async def after_goto(
|
||||
page: Page, context: BrowserContext, url: str, response: dict, **kwargs
|
||||
):
|
||||
"""Hook called after navigating to each URL"""
|
||||
print(f"[HOOK] after_goto - Successfully loaded: {url}")
|
||||
# Example: Wait for a specific element to be loaded
|
||||
try:
|
||||
await page.wait_for_selector(".content", timeout=1000)
|
||||
print("Content element found!")
|
||||
except:
|
||||
print("Content element not found, continuing anyway")
|
||||
return page
|
||||
|
||||
async def before_retrieve_html(page: Page, context: BrowserContext, **kwargs):
|
||||
"""Hook called before retrieving the HTML content"""
|
||||
print("[HOOK] before_retrieve_html - About to get HTML content")
|
||||
# Example: Scroll to bottom to trigger lazy loading
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight);")
|
||||
return page
|
||||
|
||||
async def before_return_html(
|
||||
page: Page, context: BrowserContext, html: str, **kwargs
|
||||
):
|
||||
"""Hook called before returning the HTML content"""
|
||||
print(f"[HOOK] before_return_html - Got HTML content (length: {len(html)})")
|
||||
# Example: You could modify the HTML content here if needed
|
||||
return page
|
||||
|
||||
# Set all the hooks
|
||||
crawler.crawler_strategy.set_hook("on_browser_created", on_browser_created)
|
||||
crawler.crawler_strategy.set_hook(
|
||||
"on_page_context_created", on_page_context_created
|
||||
)
|
||||
crawler.crawler_strategy.set_hook("on_user_agent_updated", on_user_agent_updated)
|
||||
crawler.crawler_strategy.set_hook("on_execution_started", on_execution_started)
|
||||
crawler.crawler_strategy.set_hook("before_goto", before_goto)
|
||||
crawler.crawler_strategy.set_hook("after_goto", after_goto)
|
||||
crawler.crawler_strategy.set_hook("before_retrieve_html", before_retrieve_html)
|
||||
crawler.crawler_strategy.set_hook("before_return_html", before_return_html)
|
||||
|
||||
await crawler.start()
|
||||
|
||||
# Example usage: crawl a simple website
|
||||
url = "https://example.com"
|
||||
result = await crawler.arun(url, config=crawler_run_config)
|
||||
print(f"\nCrawled URL: {result.url}")
|
||||
print(f"HTML length: {len(result.html)}")
|
||||
|
||||
await crawler.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
asyncio.run(main())
|
||||
@@ -1,6 +1,7 @@
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, AsyncPlaywrightCrawlerStrategy
|
||||
|
||||
|
||||
async def main():
|
||||
# Example 1: Setting language when creating the crawler
|
||||
crawler1 = AsyncWebCrawler(
|
||||
@@ -9,11 +10,15 @@ async def main():
|
||||
)
|
||||
)
|
||||
result1 = await crawler1.arun("https://www.example.com")
|
||||
print("Example 1 result:", result1.extracted_content[:100]) # Print first 100 characters
|
||||
print(
|
||||
"Example 1 result:", result1.extracted_content[:100]
|
||||
) # Print first 100 characters
|
||||
|
||||
# Example 2: Setting language before crawling
|
||||
crawler2 = AsyncWebCrawler()
|
||||
crawler2.crawler_strategy.headers["Accept-Language"] = "es-ES,es;q=0.9,en-US;q=0.8,en;q=0.7"
|
||||
crawler2.crawler_strategy.headers[
|
||||
"Accept-Language"
|
||||
] = "es-ES,es;q=0.9,en-US;q=0.8,en;q=0.7"
|
||||
result2 = await crawler2.arun("https://www.example.com")
|
||||
print("Example 2 result:", result2.extracted_content[:100])
|
||||
|
||||
@@ -21,7 +26,7 @@ async def main():
|
||||
crawler3 = AsyncWebCrawler()
|
||||
result3 = await crawler3.arun(
|
||||
"https://www.example.com",
|
||||
headers={"Accept-Language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7"}
|
||||
headers={"Accept-Language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7"},
|
||||
)
|
||||
print("Example 3 result:", result3.extracted_content[:100])
|
||||
|
||||
@@ -31,15 +36,15 @@ async def main():
|
||||
("https://www.example.org", "es-ES,es;q=0.9"),
|
||||
("https://www.example.net", "de-DE,de;q=0.9"),
|
||||
]
|
||||
|
||||
|
||||
crawler4 = AsyncWebCrawler()
|
||||
results = await asyncio.gather(*[
|
||||
crawler4.arun(url, headers={"Accept-Language": lang})
|
||||
for url, lang in urls
|
||||
])
|
||||
|
||||
results = await asyncio.gather(
|
||||
*[crawler4.arun(url, headers={"Accept-Language": lang}) for url, lang in urls]
|
||||
)
|
||||
|
||||
for url, result in zip([u for u, _ in urls], results):
|
||||
print(f"Result for {url}:", result.extracted_content[:100])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
asyncio.run(main())
|
||||
|
||||
@@ -1,41 +1,46 @@
|
||||
import os
|
||||
import time
|
||||
from crawl4ai.web_crawler import WebCrawler
|
||||
from crawl4ai.chunking_strategy import *
|
||||
from crawl4ai.extraction_strategy import *
|
||||
from crawl4ai.crawler_strategy import *
|
||||
|
||||
url = r'https://openai.com/api/pricing/'
|
||||
|
||||
crawler = WebCrawler()
|
||||
crawler.warmup()
|
||||
|
||||
import asyncio
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
url = r"https://openai.com/api/pricing/"
|
||||
|
||||
|
||||
class OpenAIModelFee(BaseModel):
|
||||
model_name: str = Field(..., description="Name of the OpenAI model.")
|
||||
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
|
||||
output_fee: str = Field(..., description="Fee for output token for the OpenAI model.")
|
||||
output_fee: str = Field(
|
||||
..., description="Fee for output token for the OpenAI model."
|
||||
)
|
||||
|
||||
result = crawler.run(
|
||||
url=url,
|
||||
word_count_threshold=1,
|
||||
extraction_strategy= LLMExtractionStrategy(
|
||||
# provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'),
|
||||
provider= "groq/llama-3.1-70b-versatile", api_token = os.getenv('GROQ_API_KEY'),
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="From the crawled content, extract all mentioned model names along with their "\
|
||||
"fees for input and output tokens. Make sure not to miss anything in the entire content. "\
|
||||
'One extracted model JSON format should look like this: '\
|
||||
'{ "model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens" }'
|
||||
),
|
||||
bypass_cache=True,
|
||||
)
|
||||
|
||||
model_fees = json.loads(result.extracted_content)
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
print(len(model_fees))
|
||||
|
||||
with open(".data/data.json", "w", encoding="utf-8") as f:
|
||||
f.write(result.extracted_content)
|
||||
async def main():
|
||||
# Use AsyncWebCrawler
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
word_count_threshold=1,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
# provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'),
|
||||
provider="groq/llama-3.1-70b-versatile",
|
||||
api_token=os.getenv("GROQ_API_KEY"),
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="From the crawled content, extract all mentioned model names along with their "
|
||||
"fees for input and output tokens. Make sure not to miss anything in the entire content. "
|
||||
"One extracted model JSON format should look like this: "
|
||||
'{ "model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens" }',
|
||||
),
|
||||
)
|
||||
print("Success:", result.success)
|
||||
model_fees = json.loads(result.extracted_content)
|
||||
print(len(model_fees))
|
||||
|
||||
with open(".data/data.json", "w", encoding="utf-8") as f:
|
||||
f.write(result.extracted_content)
|
||||
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
87
docs/examples/llm_markdown_generator.py
Normal file
87
docs/examples/llm_markdown_generator.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.content_filter_strategy import LLMContentFilter
|
||||
|
||||
async def test_llm_filter():
|
||||
# Create an HTML source that needs intelligent filtering
|
||||
url = "https://docs.python.org/3/tutorial/classes.html"
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
run_config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
# First get the raw HTML
|
||||
result = await crawler.arun(url, config=run_config)
|
||||
html = result.cleaned_html
|
||||
|
||||
# Initialize LLM filter with focused instruction
|
||||
filter = LLMContentFilter(
|
||||
provider="openai/gpt-4o",
|
||||
api_token=os.getenv('OPENAI_API_KEY'),
|
||||
instruction="""
|
||||
Focus on extracting the core educational content about Python classes.
|
||||
Include:
|
||||
- Key concepts and their explanations
|
||||
- Important code examples
|
||||
- Essential technical details
|
||||
Exclude:
|
||||
- Navigation elements
|
||||
- Sidebars
|
||||
- Footer content
|
||||
- Version information
|
||||
- Any non-essential UI elements
|
||||
|
||||
Format the output as clean markdown with proper code blocks and headers.
|
||||
""",
|
||||
verbose=True
|
||||
)
|
||||
|
||||
filter = LLMContentFilter(
|
||||
provider="openai/gpt-4o",
|
||||
api_token=os.getenv('OPENAI_API_KEY'),
|
||||
chunk_token_threshold=2 ** 12 * 2, # 2048 * 2
|
||||
instruction="""
|
||||
Extract the main educational content while preserving its original wording and substance completely. Your task is to:
|
||||
|
||||
1. Maintain the exact language and terminology used in the main content
|
||||
2. Keep all technical explanations, examples, and educational content intact
|
||||
3. Preserve the original flow and structure of the core content
|
||||
4. Remove only clearly irrelevant elements like:
|
||||
- Navigation menus
|
||||
- Advertisement sections
|
||||
- Cookie notices
|
||||
- Footers with site information
|
||||
- Sidebars with external links
|
||||
- Any UI elements that don't contribute to learning
|
||||
|
||||
The goal is to create a clean markdown version that reads exactly like the original article,
|
||||
keeping all valuable content but free from distracting elements. Imagine you're creating
|
||||
a perfect reading experience where nothing valuable is lost, but all noise is removed.
|
||||
""",
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Apply filtering
|
||||
filtered_content = filter.filter_content(html, ignore_cache = True)
|
||||
|
||||
# Show results
|
||||
print("\nFiltered Content Length:", len(filtered_content))
|
||||
print("\nFirst 500 chars of filtered content:")
|
||||
if filtered_content:
|
||||
print(filtered_content[0][:500])
|
||||
|
||||
# Save on disc the markdown version
|
||||
with open("filtered_content.md", "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(filtered_content))
|
||||
|
||||
# Show token usage
|
||||
filter.show_usage()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_llm_filter())
|
||||
File diff suppressed because one or more lines are too long
618
docs/examples/quickstart_async.config.py
Normal file
618
docs/examples/quickstart_async.config.py
Normal file
@@ -0,0 +1,618 @@
|
||||
import os, sys
|
||||
|
||||
sys.path.append(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
)
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
from typing import Dict
|
||||
from bs4 import BeautifulSoup
|
||||
from pydantic import BaseModel, Field
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode, BrowserConfig, CrawlerRunConfig
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
||||
from crawl4ai.extraction_strategy import (
|
||||
JsonCssExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
)
|
||||
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
|
||||
print("Crawl4AI: Advanced Web Crawling and Data Extraction")
|
||||
print("GitHub Repository: https://github.com/unclecode/crawl4ai")
|
||||
print("Twitter: @unclecode")
|
||||
print("Website: https://crawl4ai.com")
|
||||
|
||||
|
||||
# Basic Example - Simple Crawl
|
||||
async def simple_crawl():
|
||||
print("\n--- Basic Usage ---")
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
async def clean_content():
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
excluded_tags=["nav", "footer", "aside"],
|
||||
remove_overlay_elements=True,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
),
|
||||
options={"ignore_links": True},
|
||||
),
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://en.wikipedia.org/wiki/Apple",
|
||||
config=crawler_config,
|
||||
)
|
||||
full_markdown_length = len(result.markdown_v2.raw_markdown)
|
||||
fit_markdown_length = len(result.markdown_v2.fit_markdown)
|
||||
print(f"Full Markdown Length: {full_markdown_length}")
|
||||
print(f"Fit Markdown Length: {fit_markdown_length}")
|
||||
|
||||
|
||||
async def link_analysis():
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.ENABLED,
|
||||
exclude_external_links=True,
|
||||
exclude_social_media_links=True,
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
config=crawler_config,
|
||||
)
|
||||
print(f"Found {len(result.links['internal'])} internal links")
|
||||
print(f"Found {len(result.links['external'])} external links")
|
||||
|
||||
for link in result.links["internal"][:5]:
|
||||
print(f"Href: {link['href']}\nText: {link['text']}\n")
|
||||
|
||||
|
||||
# JavaScript Execution Example
|
||||
async def simple_example_with_running_js_code():
|
||||
print("\n--- Executing JavaScript and Using CSS Selectors ---")
|
||||
|
||||
browser_config = BrowserConfig(headless=True, java_script_enabled=True)
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
js_code="const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();",
|
||||
# wait_for="() => { return Array.from(document.querySelectorAll('article.tease-card')).length > 10; }"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
# CSS Selector Example
|
||||
async def simple_example_with_css_selector():
|
||||
print("\n--- Using CSS Selectors ---")
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS, css_selector=".wide-tease-item__description"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
async def media_handling():
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS, exclude_external_images=True, screenshot=True
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
for img in result.media["images"][:5]:
|
||||
print(f"Image URL: {img['src']}, Alt: {img['alt']}, Score: {img['score']}")
|
||||
|
||||
|
||||
async def custom_hook_workflow(verbose=True):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Set a 'before_goto' hook to run custom code just before navigation
|
||||
crawler.crawler_strategy.set_hook(
|
||||
"before_goto",
|
||||
lambda page, context: print("[Hook] Preparing to navigate..."),
|
||||
)
|
||||
|
||||
# Perform the crawl operation
|
||||
result = await crawler.arun(url="https://crawl4ai.com")
|
||||
print(result.markdown_v2.raw_markdown[:500].replace("\n", " -- "))
|
||||
|
||||
|
||||
# Proxy Example
|
||||
async def use_proxy():
|
||||
print("\n--- Using a Proxy ---")
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
proxy_config={
|
||||
"server": "http://proxy.example.com:8080",
|
||||
"username": "username",
|
||||
"password": "password",
|
||||
},
|
||||
)
|
||||
crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
if result.success:
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
# Screenshot Example
|
||||
async def capture_and_save_screenshot(url: str, output_path: str):
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, screenshot=True)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
|
||||
if result.success and result.screenshot:
|
||||
import base64
|
||||
|
||||
screenshot_data = base64.b64decode(result.screenshot)
|
||||
with open(output_path, "wb") as f:
|
||||
f.write(screenshot_data)
|
||||
print(f"Screenshot saved successfully to {output_path}")
|
||||
else:
|
||||
print("Failed to capture screenshot")
|
||||
|
||||
|
||||
# LLM Extraction Example
|
||||
class OpenAIModelFee(BaseModel):
|
||||
model_name: str = Field(..., description="Name of the OpenAI model.")
|
||||
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
|
||||
output_fee: str = Field(
|
||||
..., description="Fee for output token for the OpenAI model."
|
||||
)
|
||||
|
||||
|
||||
async def extract_structured_data_using_llm(
|
||||
provider: str, api_token: str = None, extra_headers: Dict[str, str] = None
|
||||
):
|
||||
print(f"\n--- Extracting Structured Data with {provider} ---")
|
||||
|
||||
if api_token is None and provider != "ollama":
|
||||
print(f"API token is required for {provider}. Skipping this example.")
|
||||
return
|
||||
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
|
||||
extra_args = {"temperature": 0, "top_p": 0.9, "max_tokens": 2000}
|
||||
if extra_headers:
|
||||
extra_args["extra_headers"] = extra_headers
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
word_count_threshold=1,
|
||||
page_timeout=80000,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider=provider,
|
||||
api_token=api_token,
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content.""",
|
||||
extra_args=extra_args,
|
||||
),
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://openai.com/api/pricing/", config=crawler_config
|
||||
)
|
||||
print(result.extracted_content)
|
||||
|
||||
|
||||
# CSS Extraction Example
|
||||
async def extract_structured_data_using_css_extractor():
|
||||
print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---")
|
||||
schema = {
|
||||
"name": "KidoCode Courses",
|
||||
"baseSelector": "section.charge-methodology .framework-collection-item.w-dyn-item",
|
||||
"fields": [
|
||||
{
|
||||
"name": "section_title",
|
||||
"selector": "h3.heading-50",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "section_description",
|
||||
"selector": ".charge-content",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_name",
|
||||
"selector": ".text-block-93",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_description",
|
||||
"selector": ".course-content-text",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_icon",
|
||||
"selector": ".image-92",
|
||||
"type": "attribute",
|
||||
"attribute": "src",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
browser_config = BrowserConfig(headless=True, java_script_enabled=True)
|
||||
|
||||
js_click_tabs = """
|
||||
(async () => {
|
||||
const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");
|
||||
for(let tab of tabs) {
|
||||
tab.scrollIntoView();
|
||||
tab.click();
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=JsonCssExtractionStrategy(schema),
|
||||
js_code=[js_click_tabs],
|
||||
delay_before_return_html=1
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.kidocode.com/degrees/technology", config=crawler_config
|
||||
)
|
||||
|
||||
companies = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(companies)} companies")
|
||||
print(json.dumps(companies[0], indent=2))
|
||||
|
||||
|
||||
# Dynamic Content Examples - Method 1
|
||||
async def crawl_dynamic_content_pages_method_1():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
|
||||
first_commit = ""
|
||||
|
||||
async def on_execution_started(page, **kwargs):
|
||||
nonlocal first_commit
|
||||
try:
|
||||
while True:
|
||||
await page.wait_for_selector("li.Box-sc-g0xbh4-0 h4")
|
||||
commit = await page.query_selector("li.Box-sc-g0xbh4-0 h4")
|
||||
commit = await commit.evaluate("(element) => element.textContent")
|
||||
commit = re.sub(r"\s+", "", commit)
|
||||
if commit and commit != first_commit:
|
||||
first_commit = commit
|
||||
break
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
print(f"Warning: New content didn't appear after JavaScript execution: {e}")
|
||||
|
||||
browser_config = BrowserConfig(headless=False, java_script_enabled=True)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler.crawler_strategy.set_hook("on_execution_started", on_execution_started)
|
||||
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
js_next_page = """
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
"""
|
||||
|
||||
for page in range(3):
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
soup = BeautifulSoup(result.cleaned_html, "html.parser")
|
||||
commits = soup.select("li")
|
||||
all_commits.extend(commits)
|
||||
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
# Dynamic Content Examples - Method 2
|
||||
async def crawl_dynamic_content_pages_method_2():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
|
||||
|
||||
browser_config = BrowserConfig(headless=False, java_script_enabled=True)
|
||||
|
||||
js_next_page_and_wait = """
|
||||
(async () => {
|
||||
const getCurrentCommit = () => {
|
||||
const commits = document.querySelectorAll('li.Box-sc-g0xbh4-0 h4');
|
||||
return commits.length > 0 ? commits[0].textContent.trim() : null;
|
||||
};
|
||||
|
||||
const initialCommit = getCurrentCommit();
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
|
||||
while (true) {
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
const newCommit = getCurrentCommit();
|
||||
if (newCommit && newCommit !== initialCommit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"name": "Commit Extractor",
|
||||
"baseSelector": "li.Box-sc-g0xbh4-0",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h4.markdown-title",
|
||||
"type": "text",
|
||||
"transform": "strip",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema)
|
||||
|
||||
for page in range(3):
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=js_next_page_and_wait if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
commits = json.loads(result.extracted_content)
|
||||
all_commits.extend(commits)
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def cosine_similarity_extraction():
|
||||
crawl_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=CosineStrategy(
|
||||
word_count_threshold=10,
|
||||
max_dist=0.2, # Maximum distance between two words
|
||||
linkage_method="ward", # Linkage method for hierarchical clustering (ward, complete, average, single)
|
||||
top_k=3, # Number of top keywords to extract
|
||||
sim_threshold=0.3, # Similarity threshold for clustering
|
||||
semantic_filter="McDonald's economic impact, American consumer trends", # Keywords to filter the content semantically using embeddings
|
||||
verbose=True,
|
||||
),
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business/consumer/how-mcdonalds-e-coli-crisis-inflation-politics-reflect-american-story-rcna177156",
|
||||
config=crawl_config,
|
||||
)
|
||||
print(json.loads(result.extracted_content)[:5])
|
||||
|
||||
|
||||
# Browser Comparison
|
||||
async def crawl_custom_browser_type():
|
||||
print("\n--- Browser Comparison ---")
|
||||
|
||||
# Firefox
|
||||
browser_config_firefox = BrowserConfig(browser_type="firefox", headless=True)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(config=browser_config_firefox) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com",
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS),
|
||||
)
|
||||
print("Firefox:", time.time() - start)
|
||||
print(result.markdown[:500])
|
||||
|
||||
# WebKit
|
||||
browser_config_webkit = BrowserConfig(browser_type="webkit", headless=True)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(config=browser_config_webkit) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com",
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS),
|
||||
)
|
||||
print("WebKit:", time.time() - start)
|
||||
print(result.markdown[:500])
|
||||
|
||||
# Chromium (default)
|
||||
browser_config_chromium = BrowserConfig(browser_type="chromium", headless=True)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(config=browser_config_chromium) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com",
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS),
|
||||
)
|
||||
print("Chromium:", time.time() - start)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
# Anti-Bot and User Simulation
|
||||
async def crawl_with_user_simulation():
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
user_agent_mode="random",
|
||||
user_agent_generator_config={"device_type": "mobile", "os_type": "android"},
|
||||
)
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
magic=True,
|
||||
simulate_user=True,
|
||||
override_navigator=True,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="YOUR-URL-HERE", config=crawler_config)
|
||||
print(result.markdown)
|
||||
|
||||
|
||||
async def ssl_certification():
|
||||
# Configure crawler to fetch SSL certificate
|
||||
config = CrawlerRunConfig(
|
||||
fetch_ssl_certificate=True,
|
||||
cache_mode=CacheMode.BYPASS, # Bypass cache to always get fresh certificates
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com", config=config)
|
||||
|
||||
if result.success and result.ssl_certificate:
|
||||
cert = result.ssl_certificate
|
||||
|
||||
# 1. Access certificate properties directly
|
||||
print("\nCertificate Information:")
|
||||
print(f"Issuer: {cert.issuer.get('CN', '')}")
|
||||
print(f"Valid until: {cert.valid_until}")
|
||||
print(f"Fingerprint: {cert.fingerprint}")
|
||||
|
||||
# 2. Export certificate in different formats
|
||||
cert.to_json(os.path.join(tmp_dir, "certificate.json")) # For analysis
|
||||
print("\nCertificate exported to:")
|
||||
print(f"- JSON: {os.path.join(tmp_dir, 'certificate.json')}")
|
||||
|
||||
pem_data = cert.to_pem(
|
||||
os.path.join(tmp_dir, "certificate.pem")
|
||||
) # For web servers
|
||||
print(f"- PEM: {os.path.join(tmp_dir, 'certificate.pem')}")
|
||||
|
||||
der_data = cert.to_der(
|
||||
os.path.join(tmp_dir, "certificate.der")
|
||||
) # For Java apps
|
||||
print(f"- DER: {os.path.join(tmp_dir, 'certificate.der')}")
|
||||
|
||||
|
||||
# Speed Comparison
|
||||
async def speed_comparison():
|
||||
print("\n--- Speed Comparison ---")
|
||||
|
||||
# Firecrawl comparison
|
||||
from firecrawl import FirecrawlApp
|
||||
|
||||
app = FirecrawlApp(api_key=os.environ["FIRECRAWL_API_KEY"])
|
||||
start = time.time()
|
||||
scrape_status = app.scrape_url(
|
||||
"https://www.nbcnews.com/business", params={"formats": ["markdown", "html"]}
|
||||
)
|
||||
end = time.time()
|
||||
print("Firecrawl:")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(scrape_status['markdown'])} characters")
|
||||
print(f"Images found: {scrape_status['markdown'].count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Crawl4AI comparisons
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
|
||||
# Simple crawl
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
config=CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS, word_count_threshold=0
|
||||
),
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (simple crawl):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown)} characters")
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Advanced filtering
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
config=CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
word_count_threshold=0,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
)
|
||||
),
|
||||
),
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (Markdown Plus):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)} characters")
|
||||
print(f"Fit Markdown: {len(result.markdown_v2.fit_markdown)} characters")
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
|
||||
# Main execution
|
||||
async def main():
|
||||
# Basic examples
|
||||
await simple_crawl()
|
||||
await simple_example_with_running_js_code()
|
||||
await simple_example_with_css_selector()
|
||||
|
||||
# Advanced examples
|
||||
await extract_structured_data_using_css_extractor()
|
||||
await extract_structured_data_using_llm(
|
||||
"openai/gpt-4o", os.getenv("OPENAI_API_KEY")
|
||||
)
|
||||
await crawl_dynamic_content_pages_method_1()
|
||||
await crawl_dynamic_content_pages_method_2()
|
||||
|
||||
# Browser comparisons
|
||||
await crawl_custom_browser_type()
|
||||
|
||||
# Screenshot example
|
||||
await capture_and_save_screenshot(
|
||||
"https://www.example.com",
|
||||
os.path.join(__location__, "tmp/example_screenshot.jpg")
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,6 +1,10 @@
|
||||
import os, sys
|
||||
|
||||
# append parent directory to system path
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))); os.environ['FIRECRAWL_API_KEY'] = "fc-84b370ccfad44beabc686b38f1769692";
|
||||
sys.path.append(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
)
|
||||
os.environ["FIRECRAWL_API_KEY"] = "fc-84b370ccfad44beabc686b38f1769692"
|
||||
|
||||
import asyncio
|
||||
# import nest_asyncio
|
||||
@@ -13,7 +17,9 @@ import re
|
||||
from typing import Dict, List
|
||||
from bs4 import BeautifulSoup
|
||||
from pydantic import BaseModel, Field
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
||||
from crawl4ai.extraction_strategy import (
|
||||
JsonCssExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
@@ -30,9 +36,12 @@ print("Website: https://crawl4ai.com")
|
||||
async def simple_crawl():
|
||||
print("\n--- Basic Usage ---")
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(url="https://www.nbcnews.com/business")
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def simple_example_with_running_js_code():
|
||||
print("\n--- Executing JavaScript and Using CSS Selectors ---")
|
||||
# New code to handle the wait_for parameter
|
||||
@@ -51,55 +60,59 @@ async def simple_example_with_running_js_code():
|
||||
url="https://www.nbcnews.com/business",
|
||||
js_code=js_code,
|
||||
# wait_for=wait_for,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def simple_example_with_css_selector():
|
||||
print("\n--- Using CSS Selectors ---")
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
css_selector=".wide-tease-item__description",
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def use_proxy():
|
||||
print("\n--- Using a Proxy ---")
|
||||
print(
|
||||
"Note: Replace 'http://your-proxy-url:port' with a working proxy to run this example."
|
||||
)
|
||||
# Uncomment and modify the following lines to use a proxy
|
||||
# async with AsyncWebCrawler(verbose=True, proxy="http://your-proxy-url:port") as crawler:
|
||||
# result = await crawler.arun(
|
||||
# url="https://www.nbcnews.com/business",
|
||||
# bypass_cache=True
|
||||
# )
|
||||
# print(result.markdown[:500]) # Print first 500 characters
|
||||
async with AsyncWebCrawler(
|
||||
verbose=True, proxy="http://your-proxy-url:port"
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
if result.success:
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def capture_and_save_screenshot(url: str, output_path: str):
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
screenshot=True,
|
||||
bypass_cache=True
|
||||
url=url, screenshot=True, cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
|
||||
if result.success and result.screenshot:
|
||||
import base64
|
||||
|
||||
|
||||
# Decode the base64 screenshot data
|
||||
screenshot_data = base64.b64decode(result.screenshot)
|
||||
|
||||
|
||||
# Save the screenshot as a JPEG file
|
||||
with open(output_path, 'wb') as f:
|
||||
with open(output_path, "wb") as f:
|
||||
f.write(screenshot_data)
|
||||
|
||||
|
||||
print(f"Screenshot saved successfully to {output_path}")
|
||||
else:
|
||||
print("Failed to capture screenshot")
|
||||
|
||||
|
||||
class OpenAIModelFee(BaseModel):
|
||||
model_name: str = Field(..., description="Name of the OpenAI model.")
|
||||
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
|
||||
@@ -107,14 +120,23 @@ class OpenAIModelFee(BaseModel):
|
||||
..., description="Fee for output token for the OpenAI model."
|
||||
)
|
||||
|
||||
async def extract_structured_data_using_llm(provider: str, api_token: str = None, extra_headers: Dict[str, str] = None):
|
||||
|
||||
async def extract_structured_data_using_llm(
|
||||
provider: str, api_token: str = None, extra_headers: Dict[str, str] = None
|
||||
):
|
||||
print(f"\n--- Extracting Structured Data with {provider} ---")
|
||||
|
||||
|
||||
if api_token is None and provider != "ollama":
|
||||
print(f"API token is required for {provider}. Skipping this example.")
|
||||
return
|
||||
|
||||
extra_args = {}
|
||||
# extra_args = {}
|
||||
extra_args = {
|
||||
"temperature": 0,
|
||||
"top_p": 0.9,
|
||||
"max_tokens": 2000,
|
||||
# any other supported parameters for litellm
|
||||
}
|
||||
if extra_headers:
|
||||
extra_args["extra_headers"] = extra_headers
|
||||
|
||||
@@ -125,55 +147,80 @@ async def extract_structured_data_using_llm(provider: str, api_token: str = None
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider=provider,
|
||||
api_token=api_token,
|
||||
schema=OpenAIModelFee.schema(),
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content. One extracted model JSON format should look like this:
|
||||
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}.""",
|
||||
extra_args=extra_args
|
||||
extra_args=extra_args,
|
||||
),
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.extracted_content)
|
||||
|
||||
|
||||
async def extract_structured_data_using_css_extractor():
|
||||
print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---")
|
||||
schema = {
|
||||
"name": "Coinbase Crypto Prices",
|
||||
"baseSelector": ".cds-tableRow-t45thuk",
|
||||
"name": "KidoCode Courses",
|
||||
"baseSelector": "section.charge-methodology .w-tab-content > div",
|
||||
"fields": [
|
||||
{
|
||||
"name": "crypto",
|
||||
"selector": "td:nth-child(1) h2",
|
||||
"name": "section_title",
|
||||
"selector": "h3.heading-50",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "symbol",
|
||||
"selector": "td:nth-child(1) p",
|
||||
"name": "section_description",
|
||||
"selector": ".charge-content",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": "td:nth-child(2)",
|
||||
"name": "course_name",
|
||||
"selector": ".text-block-93",
|
||||
"type": "text",
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "course_description",
|
||||
"selector": ".course-content-text",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_icon",
|
||||
"selector": ".image-92",
|
||||
"type": "attribute",
|
||||
"attribute": "src",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
|
||||
async with AsyncWebCrawler(headless=True, verbose=True) as crawler:
|
||||
# Create the JavaScript that handles clicking multiple times
|
||||
js_click_tabs = """
|
||||
(async () => {
|
||||
const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");
|
||||
|
||||
for(let tab of tabs) {
|
||||
// scroll to the tab
|
||||
tab.scrollIntoView();
|
||||
tab.click();
|
||||
// Wait for content to load and animations to complete
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.coinbase.com/explore",
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True,
|
||||
url="https://www.kidocode.com/degrees/technology",
|
||||
extraction_strategy=JsonCssExtractionStrategy(schema, verbose=True),
|
||||
js_code=[js_click_tabs],
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
assert result.success, "Failed to crawl the page"
|
||||
companies = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(companies)} companies")
|
||||
print(json.dumps(companies[0], indent=2))
|
||||
|
||||
news_teasers = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(news_teasers)} news teasers")
|
||||
print(json.dumps(news_teasers[0], indent=2))
|
||||
|
||||
# Advanced Session-Based Crawling with Dynamic Content 🔄
|
||||
async def crawl_dynamic_content_pages_method_1():
|
||||
@@ -203,8 +250,10 @@ async def crawl_dynamic_content_pages_method_1():
|
||||
all_commits = []
|
||||
|
||||
js_next_page = """
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
(() => {
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
})();
|
||||
"""
|
||||
|
||||
for page in range(3): # Crawl 3 pages
|
||||
@@ -213,7 +262,7 @@ async def crawl_dynamic_content_pages_method_1():
|
||||
session_id=session_id,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
js=js_next_page if page > 0 else None,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
js_only=page > 0,
|
||||
headless=False,
|
||||
)
|
||||
@@ -229,6 +278,7 @@ async def crawl_dynamic_content_pages_method_1():
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def crawl_dynamic_content_pages_method_2():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
|
||||
|
||||
@@ -282,7 +332,7 @@ async def crawl_dynamic_content_pages_method_2():
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=js_next_page_and_wait if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
@@ -296,8 +346,11 @@ async def crawl_dynamic_content_pages_method_2():
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def crawl_dynamic_content_pages_method_3():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution using `wait_for` ---")
|
||||
print(
|
||||
"\n--- Advanced Multi-Page Crawling with JavaScript Execution using `wait_for` ---"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
@@ -319,7 +372,7 @@ async def crawl_dynamic_content_pages_method_3():
|
||||
const firstCommit = commits[0].textContent.trim();
|
||||
return firstCommit !== window.firstCommit;
|
||||
}"""
|
||||
|
||||
|
||||
schema = {
|
||||
"name": "Commit Extractor",
|
||||
"baseSelector": "li.Box-sc-g0xbh4-0",
|
||||
@@ -343,7 +396,7 @@ async def crawl_dynamic_content_pages_method_3():
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
wait_for=wait_for if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
@@ -357,39 +410,53 @@ async def crawl_dynamic_content_pages_method_3():
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def crawl_custom_browser_type():
|
||||
# Use Firefox
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(browser_type="firefox", verbose=True, headless = True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
async with AsyncWebCrawler(
|
||||
browser_type="firefox", verbose=True, headless=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
# Use WebKit
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(browser_type="webkit", verbose=True, headless = True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
async with AsyncWebCrawler(
|
||||
browser_type="webkit", verbose=True, headless=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
# Use Chromium (default)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(verbose=True, headless = True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
async with AsyncWebCrawler(verbose=True, headless=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
|
||||
async def crawl_with_user_simultion():
|
||||
async with AsyncWebCrawler(verbose=True, headless=True) as crawler:
|
||||
url = "YOUR-URL-HERE"
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
bypass_cache=True,
|
||||
simulate_user = True,# Causes a series of random mouse movements and clicks to simulate user interaction
|
||||
override_navigator = True # Overrides the navigator object to make it look like a real user
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
magic=True, # Automatically detects and removes overlays, popups, and other elements that block content
|
||||
# simulate_user = True,# Causes a series of random mouse movements and clicks to simulate user interaction
|
||||
# override_navigator = True # Overrides the navigator object to make it look like a real user
|
||||
)
|
||||
|
||||
print(result.markdown)
|
||||
|
||||
print(result.markdown)
|
||||
|
||||
|
||||
async def speed_comparison():
|
||||
# print("\n--- Speed Comparison ---")
|
||||
@@ -400,18 +467,18 @@ async def speed_comparison():
|
||||
# print()
|
||||
# Simulated Firecrawl performance
|
||||
from firecrawl import FirecrawlApp
|
||||
app = FirecrawlApp(api_key=os.environ['FIRECRAWL_API_KEY'])
|
||||
|
||||
app = FirecrawlApp(api_key=os.environ["FIRECRAWL_API_KEY"])
|
||||
start = time.time()
|
||||
scrape_status = app.scrape_url(
|
||||
'https://www.nbcnews.com/business',
|
||||
params={'formats': ['markdown', 'html']}
|
||||
"https://www.nbcnews.com/business", params={"formats": ["markdown", "html"]}
|
||||
)
|
||||
end = time.time()
|
||||
print("Firecrawl (simulated):")
|
||||
print("Firecrawl:")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(scrape_status['markdown'])} characters")
|
||||
print(f"Images found: {scrape_status['markdown'].count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
print()
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Crawl4AI simple crawl
|
||||
@@ -419,7 +486,7 @@ async def speed_comparison():
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
word_count_threshold=0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
@@ -429,6 +496,28 @@ async def speed_comparison():
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Crawl4AI with advanced content filtering
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
word_count_threshold=0,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
)
|
||||
# content_filter=BM25ContentFilter(user_query=None, bm25_threshold=1.0)
|
||||
),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (Markdown Plus):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)} characters")
|
||||
print(f"Fit Markdown: {len(result.markdown_v2.fit_markdown)} characters")
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Crawl4AI with JavaScript execution
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
@@ -437,13 +526,20 @@ async def speed_comparison():
|
||||
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
|
||||
],
|
||||
word_count_threshold=0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
)
|
||||
# content_filter=BM25ContentFilter(user_query=None, bm25_threshold=1.0)
|
||||
),
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (with JavaScript execution):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown)} characters")
|
||||
print(f"Fit Markdown: {len(result.markdown_v2.fit_markdown)} characters")
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
|
||||
print("\nNote on Speed Comparison:")
|
||||
@@ -456,11 +552,12 @@ async def speed_comparison():
|
||||
print("If you run these tests in an environment with better network conditions,")
|
||||
print("you may observe an even more significant speed advantage for Crawl4AI.")
|
||||
|
||||
|
||||
async def generate_knowledge_graph():
|
||||
class Entity(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
|
||||
|
||||
class Relationship(BaseModel):
|
||||
entity1: Entity
|
||||
entity2: Entity
|
||||
@@ -472,17 +569,17 @@ async def generate_knowledge_graph():
|
||||
relationships: List[Relationship]
|
||||
|
||||
extraction_strategy = LLMExtractionStrategy(
|
||||
provider='openai/gpt-4o-mini', # Or any other provider, including Ollama and open source models
|
||||
api_token=os.getenv('OPENAI_API_KEY'), # In case of Ollama just pass "no-token"
|
||||
schema=KnowledgeGraph.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""Extract entities and relationships from the given text."""
|
||||
provider="openai/gpt-4o-mini", # Or any other provider, including Ollama and open source models
|
||||
api_token=os.getenv("OPENAI_API_KEY"), # In case of Ollama just pass "no-token"
|
||||
schema=KnowledgeGraph.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""Extract entities and relationships from the given text.""",
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
url = "https://paulgraham.com/love.html"
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=extraction_strategy,
|
||||
# magic=True
|
||||
)
|
||||
@@ -490,51 +587,88 @@ async def generate_knowledge_graph():
|
||||
with open(os.path.join(__location__, "kb.json"), "w") as f:
|
||||
f.write(result.extracted_content)
|
||||
|
||||
|
||||
async def fit_markdown_remove_overlay():
|
||||
async with AsyncWebCrawler(headless = False) as crawler:
|
||||
url = "https://janineintheworld.com/places-to-visit-in-central-mexico"
|
||||
async with AsyncWebCrawler(
|
||||
headless=True, # Set to False to see what is happening
|
||||
verbose=True,
|
||||
user_agent_mode="random",
|
||||
user_agent_generator_config={"device_type": "mobile", "os_type": "android"},
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
bypass_cache=True,
|
||||
word_count_threshold = 10,
|
||||
remove_overlay_elements=True,
|
||||
screenshot = True
|
||||
url="https://www.kidocode.com/degrees/technology",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
),
|
||||
options={"ignore_links": True},
|
||||
),
|
||||
# markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=BM25ContentFilter(user_query="", bm25_threshold=1.0),
|
||||
# options={
|
||||
# "ignore_links": True
|
||||
# }
|
||||
# ),
|
||||
)
|
||||
# Save markdown to file
|
||||
with open(os.path.join(__location__, "mexico_places.md"), "w") as f:
|
||||
f.write(result.fit_markdown)
|
||||
|
||||
if result.success:
|
||||
print(len(result.markdown_v2.raw_markdown))
|
||||
print(len(result.markdown_v2.markdown_with_citations))
|
||||
print(len(result.markdown_v2.fit_markdown))
|
||||
|
||||
# Save clean html
|
||||
with open(os.path.join(__location__, "output/cleaned_html.html"), "w") as f:
|
||||
f.write(result.cleaned_html)
|
||||
|
||||
with open(
|
||||
os.path.join(__location__, "output/output_raw_markdown.md"), "w"
|
||||
) as f:
|
||||
f.write(result.markdown_v2.raw_markdown)
|
||||
|
||||
with open(
|
||||
os.path.join(__location__, "output/output_markdown_with_citations.md"),
|
||||
"w",
|
||||
) as f:
|
||||
f.write(result.markdown_v2.markdown_with_citations)
|
||||
|
||||
with open(
|
||||
os.path.join(__location__, "output/output_fit_markdown.md"), "w"
|
||||
) as f:
|
||||
f.write(result.markdown_v2.fit_markdown)
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
async def main():
|
||||
await simple_crawl()
|
||||
await simple_example_with_running_js_code()
|
||||
await simple_example_with_css_selector()
|
||||
await use_proxy()
|
||||
await capture_and_save_screenshot("https://www.example.com", os.path.join(__location__, "tmp/example_screenshot.jpg"))
|
||||
await extract_structured_data_using_css_extractor()
|
||||
# await extract_structured_data_using_llm("openai/gpt-4o", os.getenv("OPENAI_API_KEY"))
|
||||
|
||||
# await simple_crawl()
|
||||
# await simple_example_with_running_js_code()
|
||||
# await simple_example_with_css_selector()
|
||||
# # await use_proxy()
|
||||
# await capture_and_save_screenshot("https://www.example.com", os.path.join(__location__, "tmp/example_screenshot.jpg"))
|
||||
# await extract_structured_data_using_css_extractor()
|
||||
|
||||
# LLM extraction examples
|
||||
await extract_structured_data_using_llm()
|
||||
await extract_structured_data_using_llm("huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct", os.getenv("HUGGINGFACE_API_KEY"))
|
||||
await extract_structured_data_using_llm("openai/gpt-4o", os.getenv("OPENAI_API_KEY"))
|
||||
await extract_structured_data_using_llm("ollama/llama3.2")
|
||||
# await extract_structured_data_using_llm()
|
||||
# await extract_structured_data_using_llm("huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct", os.getenv("HUGGINGFACE_API_KEY"))
|
||||
# await extract_structured_data_using_llm("ollama/llama3.2")
|
||||
|
||||
# You always can pass custom headers to the extraction strategy
|
||||
custom_headers = {
|
||||
"Authorization": "Bearer your-custom-token",
|
||||
"X-Custom-Header": "Some-Value"
|
||||
}
|
||||
await extract_structured_data_using_llm(extra_headers=custom_headers)
|
||||
|
||||
# custom_headers = {
|
||||
# "Authorization": "Bearer your-custom-token",
|
||||
# "X-Custom-Header": "Some-Value"
|
||||
# }
|
||||
# await extract_structured_data_using_llm(extra_headers=custom_headers)
|
||||
|
||||
# await crawl_dynamic_content_pages_method_1()
|
||||
# await crawl_dynamic_content_pages_method_2()
|
||||
await crawl_dynamic_content_pages_method_3()
|
||||
|
||||
await crawl_custom_browser_type()
|
||||
|
||||
await speed_comparison()
|
||||
|
||||
# await crawl_custom_browser_type()
|
||||
|
||||
# await speed_comparison()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -10,15 +10,17 @@ from functools import lru_cache
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def create_crawler():
|
||||
crawler = WebCrawler(verbose=True)
|
||||
crawler.warmup()
|
||||
return crawler
|
||||
|
||||
|
||||
def print_result(result):
|
||||
# Print each key in one line and just the first 10 characters of each one's value and three dots
|
||||
console.print(f"\t[bold]Result:[/bold]")
|
||||
console.print("\t[bold]Result:[/bold]")
|
||||
for key, value in result.model_dump().items():
|
||||
if isinstance(value, str) and value:
|
||||
console.print(f"\t{key}: [green]{value[:20]}...[/green]")
|
||||
@@ -33,18 +35,27 @@ def cprint(message, press_any_key=False):
|
||||
console.print("Press any key to continue...", style="")
|
||||
input()
|
||||
|
||||
|
||||
def basic_usage(crawler):
|
||||
cprint("🛠️ [bold cyan]Basic Usage: Simply provide a URL and let Crawl4ai do the magic![/bold cyan]")
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", only_text = True)
|
||||
cprint(
|
||||
"🛠️ [bold cyan]Basic Usage: Simply provide a URL and let Crawl4ai do the magic![/bold cyan]"
|
||||
)
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", only_text=True)
|
||||
cprint("[LOG] 📦 [bold yellow]Basic crawl result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def basic_usage_some_params(crawler):
|
||||
cprint("🛠️ [bold cyan]Basic Usage: Simply provide a URL and let Crawl4ai do the magic![/bold cyan]")
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", word_count_threshold=1, only_text = True)
|
||||
cprint(
|
||||
"🛠️ [bold cyan]Basic Usage: Simply provide a URL and let Crawl4ai do the magic![/bold cyan]"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business", word_count_threshold=1, only_text=True
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]Basic crawl result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def screenshot_usage(crawler):
|
||||
cprint("\n📸 [bold cyan]Let's take a screenshot of the page![/bold cyan]")
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", screenshot=True)
|
||||
@@ -55,16 +66,23 @@ def screenshot_usage(crawler):
|
||||
cprint("Screenshot saved to 'screenshot.png'!")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def understanding_parameters(crawler):
|
||||
cprint("\n🧠 [bold cyan]Understanding 'bypass_cache' and 'include_raw_html' parameters:[/bold cyan]")
|
||||
cprint("By default, Crawl4ai caches the results of your crawls. This means that subsequent crawls of the same URL will be much faster! Let's see this in action.")
|
||||
|
||||
cprint(
|
||||
"\n🧠 [bold cyan]Understanding 'bypass_cache' and 'include_raw_html' parameters:[/bold cyan]"
|
||||
)
|
||||
cprint(
|
||||
"By default, Crawl4ai caches the results of your crawls. This means that subsequent crawls of the same URL will be much faster! Let's see this in action."
|
||||
)
|
||||
|
||||
# First crawl (reads from cache)
|
||||
cprint("1️⃣ First crawl (caches the result):", True)
|
||||
start_time = time.time()
|
||||
result = crawler.run(url="https://www.nbcnews.com/business")
|
||||
end_time = time.time()
|
||||
cprint(f"[LOG] 📦 [bold yellow]First crawl took {end_time - start_time} seconds and result (from cache):[/bold yellow]")
|
||||
cprint(
|
||||
f"[LOG] 📦 [bold yellow]First crawl took {end_time - start_time} seconds and result (from cache):[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
# Force to crawl again
|
||||
@@ -72,169 +90,232 @@ def understanding_parameters(crawler):
|
||||
start_time = time.time()
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", bypass_cache=True)
|
||||
end_time = time.time()
|
||||
cprint(f"[LOG] 📦 [bold yellow]Second crawl took {end_time - start_time} seconds and result (forced to crawl):[/bold yellow]")
|
||||
cprint(
|
||||
f"[LOG] 📦 [bold yellow]Second crawl took {end_time - start_time} seconds and result (forced to crawl):[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
|
||||
def add_chunking_strategy(crawler):
|
||||
# Adding a chunking strategy: RegexChunking
|
||||
cprint("\n🧩 [bold cyan]Let's add a chunking strategy: RegexChunking![/bold cyan]", True)
|
||||
cprint("RegexChunking is a simple chunking strategy that splits the text based on a given regex pattern. Let's see it in action!")
|
||||
cprint(
|
||||
"\n🧩 [bold cyan]Let's add a chunking strategy: RegexChunking![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"RegexChunking is a simple chunking strategy that splits the text based on a given regex pattern. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
chunking_strategy=RegexChunking(patterns=["\n\n"])
|
||||
chunking_strategy=RegexChunking(patterns=["\n\n"]),
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]RegexChunking result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
# Adding another chunking strategy: NlpSentenceChunking
|
||||
cprint("\n🔍 [bold cyan]Time to explore another chunking strategy: NlpSentenceChunking![/bold cyan]", True)
|
||||
cprint("NlpSentenceChunking uses NLP techniques to split the text into sentences. Let's see how it performs!")
|
||||
cprint(
|
||||
"\n🔍 [bold cyan]Time to explore another chunking strategy: NlpSentenceChunking![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"NlpSentenceChunking uses NLP techniques to split the text into sentences. Let's see how it performs!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
chunking_strategy=NlpSentenceChunking()
|
||||
url="https://www.nbcnews.com/business", chunking_strategy=NlpSentenceChunking()
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]NlpSentenceChunking result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def add_extraction_strategy(crawler):
|
||||
# Adding an extraction strategy: CosineStrategy
|
||||
cprint("\n🧠 [bold cyan]Let's get smarter with an extraction strategy: CosineStrategy![/bold cyan]", True)
|
||||
cprint("CosineStrategy uses cosine similarity to extract semantically similar blocks of text. Let's see it in action!")
|
||||
cprint(
|
||||
"\n🧠 [bold cyan]Let's get smarter with an extraction strategy: CosineStrategy![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"CosineStrategy uses cosine similarity to extract semantically similar blocks of text. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3, sim_threshold = 0.3, verbose=True)
|
||||
extraction_strategy=CosineStrategy(
|
||||
word_count_threshold=10,
|
||||
max_dist=0.2,
|
||||
linkage_method="ward",
|
||||
top_k=3,
|
||||
sim_threshold=0.3,
|
||||
verbose=True,
|
||||
),
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]CosineStrategy result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
# Using semantic_filter with CosineStrategy
|
||||
cprint("You can pass other parameters like 'semantic_filter' to the CosineStrategy to extract semantically similar blocks of text. Let's see it in action!")
|
||||
cprint(
|
||||
"You can pass other parameters like 'semantic_filter' to the CosineStrategy to extract semantically similar blocks of text. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=CosineStrategy(
|
||||
semantic_filter="inflation rent prices",
|
||||
)
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]CosineStrategy result with semantic filter:[/bold yellow]"
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]CosineStrategy result with semantic filter:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def add_llm_extraction_strategy(crawler):
|
||||
# Adding an LLM extraction strategy without instructions
|
||||
cprint("\n🤖 [bold cyan]Time to bring in the big guns: LLMExtractionStrategy without instructions![/bold cyan]", True)
|
||||
cprint("LLMExtractionStrategy uses a large language model to extract relevant information from the web page. Let's see it in action!")
|
||||
cprint(
|
||||
"\n🤖 [bold cyan]Time to bring in the big guns: LLMExtractionStrategy without instructions![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"LLMExtractionStrategy uses a large language model to extract relevant information from the web page. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=LLMExtractionStrategy(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'))
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o", api_token=os.getenv("OPENAI_API_KEY")
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]LLMExtractionStrategy (no instructions) result:[/bold yellow]"
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]LLMExtractionStrategy (no instructions) result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
# Adding an LLM extraction strategy with instructions
|
||||
cprint("\n📜 [bold cyan]Let's make it even more interesting: LLMExtractionStrategy with instructions![/bold cyan]", True)
|
||||
cprint("Let's say we are only interested in financial news. Let's see how LLMExtractionStrategy performs with instructions!")
|
||||
cprint(
|
||||
"\n📜 [bold cyan]Let's make it even more interesting: LLMExtractionStrategy with instructions![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"Let's say we are only interested in financial news. Let's see how LLMExtractionStrategy performs with instructions!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o",
|
||||
api_token=os.getenv('OPENAI_API_KEY'),
|
||||
instruction="I am interested in only financial news"
|
||||
)
|
||||
api_token=os.getenv("OPENAI_API_KEY"),
|
||||
instruction="I am interested in only financial news",
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]LLMExtractionStrategy (with instructions) result:[/bold yellow]"
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]LLMExtractionStrategy (with instructions) result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o",
|
||||
api_token=os.getenv('OPENAI_API_KEY'),
|
||||
instruction="Extract only content related to technology"
|
||||
)
|
||||
api_token=os.getenv("OPENAI_API_KEY"),
|
||||
instruction="Extract only content related to technology",
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]LLMExtractionStrategy (with technology instruction) result:[/bold yellow]"
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]LLMExtractionStrategy (with technology instruction) result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def targeted_extraction(crawler):
|
||||
# Using a CSS selector to extract only H2 tags
|
||||
cprint("\n🎯 [bold cyan]Targeted extraction: Let's use a CSS selector to extract only H2 tags![/bold cyan]", True)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
css_selector="h2"
|
||||
cprint(
|
||||
"\n🎯 [bold cyan]Targeted extraction: Let's use a CSS selector to extract only H2 tags![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", css_selector="h2")
|
||||
cprint("[LOG] 📦 [bold yellow]CSS Selector (H2 tags) result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def interactive_extraction(crawler):
|
||||
# Passing JavaScript code to interact with the page
|
||||
cprint("\n🖱️ [bold cyan]Let's get interactive: Passing JavaScript code to click 'Load More' button![/bold cyan]", True)
|
||||
cprint("In this example we try to click the 'Load More' button on the page using JavaScript code.")
|
||||
cprint(
|
||||
"\n🖱️ [bold cyan]Let's get interactive: Passing JavaScript code to click 'Load More' button![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"In this example we try to click the 'Load More' button on the page using JavaScript code."
|
||||
)
|
||||
js_code = """
|
||||
const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
|
||||
loadMoreButton && loadMoreButton.click();
|
||||
"""
|
||||
# crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code)
|
||||
# crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
js = js_code
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", js=js_code)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]JavaScript Code (Load More button) result:[/bold yellow]"
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]JavaScript Code (Load More button) result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def multiple_scrip(crawler):
|
||||
# Passing JavaScript code to interact with the page
|
||||
cprint("\n🖱️ [bold cyan]Let's get interactive: Passing JavaScript code to click 'Load More' button![/bold cyan]", True)
|
||||
cprint("In this example we try to click the 'Load More' button on the page using JavaScript code.")
|
||||
js_code = ["""
|
||||
cprint(
|
||||
"\n🖱️ [bold cyan]Let's get interactive: Passing JavaScript code to click 'Load More' button![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"In this example we try to click the 'Load More' button on the page using JavaScript code."
|
||||
)
|
||||
js_code = [
|
||||
"""
|
||||
const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
|
||||
loadMoreButton && loadMoreButton.click();
|
||||
"""] * 2
|
||||
"""
|
||||
] * 2
|
||||
# crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code)
|
||||
# crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
js = js_code
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", js=js_code)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]JavaScript Code (Load More button) result:[/bold yellow]"
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]JavaScript Code (Load More button) result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def using_crawler_hooks(crawler):
|
||||
# Example usage of the hooks for authentication and setting a cookie
|
||||
def on_driver_created(driver):
|
||||
print("[HOOK] on_driver_created")
|
||||
# Example customization: maximize the window
|
||||
driver.maximize_window()
|
||||
|
||||
|
||||
# Example customization: logging in to a hypothetical website
|
||||
driver.get('https://example.com/login')
|
||||
|
||||
driver.get("https://example.com/login")
|
||||
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
|
||||
|
||||
WebDriverWait(driver, 10).until(
|
||||
EC.presence_of_element_located((By.NAME, 'username'))
|
||||
EC.presence_of_element_located((By.NAME, "username"))
|
||||
)
|
||||
driver.find_element(By.NAME, 'username').send_keys('testuser')
|
||||
driver.find_element(By.NAME, 'password').send_keys('password123')
|
||||
driver.find_element(By.NAME, 'login').click()
|
||||
driver.find_element(By.NAME, "username").send_keys("testuser")
|
||||
driver.find_element(By.NAME, "password").send_keys("password123")
|
||||
driver.find_element(By.NAME, "login").click()
|
||||
WebDriverWait(driver, 10).until(
|
||||
EC.presence_of_element_located((By.ID, 'welcome'))
|
||||
EC.presence_of_element_located((By.ID, "welcome"))
|
||||
)
|
||||
# Add a custom cookie
|
||||
driver.add_cookie({'name': 'test_cookie', 'value': 'cookie_value'})
|
||||
return driver
|
||||
|
||||
driver.add_cookie({"name": "test_cookie", "value": "cookie_value"})
|
||||
return driver
|
||||
|
||||
def before_get_url(driver):
|
||||
print("[HOOK] before_get_url")
|
||||
# Example customization: add a custom header
|
||||
# Enable Network domain for sending headers
|
||||
driver.execute_cdp_cmd('Network.enable', {})
|
||||
driver.execute_cdp_cmd("Network.enable", {})
|
||||
# Add a custom header
|
||||
driver.execute_cdp_cmd('Network.setExtraHTTPHeaders', {'headers': {'X-Test-Header': 'test'}})
|
||||
driver.execute_cdp_cmd(
|
||||
"Network.setExtraHTTPHeaders", {"headers": {"X-Test-Header": "test"}}
|
||||
)
|
||||
return driver
|
||||
|
||||
|
||||
def after_get_url(driver):
|
||||
print("[HOOK] after_get_url")
|
||||
# Example customization: log the URL
|
||||
@@ -246,48 +327,59 @@ def using_crawler_hooks(crawler):
|
||||
# Example customization: log the HTML
|
||||
print(len(html))
|
||||
return driver
|
||||
|
||||
cprint("\n🔗 [bold cyan]Using Crawler Hooks: Let's see how we can customize the crawler using hooks![/bold cyan]", True)
|
||||
|
||||
|
||||
cprint(
|
||||
"\n🔗 [bold cyan]Using Crawler Hooks: Let's see how we can customize the crawler using hooks![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
|
||||
crawler_strategy = LocalSeleniumCrawlerStrategy(verbose=True)
|
||||
crawler_strategy.set_hook('on_driver_created', on_driver_created)
|
||||
crawler_strategy.set_hook('before_get_url', before_get_url)
|
||||
crawler_strategy.set_hook('after_get_url', after_get_url)
|
||||
crawler_strategy.set_hook('before_return_html', before_return_html)
|
||||
|
||||
crawler_strategy.set_hook("on_driver_created", on_driver_created)
|
||||
crawler_strategy.set_hook("before_get_url", before_get_url)
|
||||
crawler_strategy.set_hook("after_get_url", after_get_url)
|
||||
crawler_strategy.set_hook("before_return_html", before_return_html)
|
||||
|
||||
crawler = WebCrawler(verbose=True, crawler_strategy=crawler_strategy)
|
||||
crawler.warmup()
|
||||
crawler.warmup()
|
||||
result = crawler.run(url="https://example.com")
|
||||
|
||||
|
||||
cprint("[LOG] 📦 [bold yellow]Crawler Hooks result:[/bold yellow]")
|
||||
print_result(result= result)
|
||||
|
||||
print_result(result=result)
|
||||
|
||||
|
||||
def using_crawler_hooks_dleay_example(crawler):
|
||||
def delay(driver):
|
||||
print("Delaying for 5 seconds...")
|
||||
time.sleep(5)
|
||||
print("Resuming...")
|
||||
|
||||
|
||||
def create_crawler():
|
||||
crawler_strategy = LocalSeleniumCrawlerStrategy(verbose=True)
|
||||
crawler_strategy.set_hook('after_get_url', delay)
|
||||
crawler_strategy.set_hook("after_get_url", delay)
|
||||
crawler = WebCrawler(verbose=True, crawler_strategy=crawler_strategy)
|
||||
crawler.warmup()
|
||||
return crawler
|
||||
|
||||
cprint("\n🔗 [bold cyan]Using Crawler Hooks: Let's add a delay after fetching the url to make sure entire page is fetched.[/bold cyan]")
|
||||
cprint(
|
||||
"\n🔗 [bold cyan]Using Crawler Hooks: Let's add a delay after fetching the url to make sure entire page is fetched.[/bold cyan]"
|
||||
)
|
||||
crawler = create_crawler()
|
||||
result = crawler.run(url="https://google.com", bypass_cache=True)
|
||||
|
||||
result = crawler.run(url="https://google.com", bypass_cache=True)
|
||||
|
||||
cprint("[LOG] 📦 [bold yellow]Crawler Hooks result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
cprint("🌟 [bold green]Welcome to the Crawl4ai Quickstart Guide! Let's dive into some web crawling fun! 🌐[/bold green]")
|
||||
cprint("⛳️ [bold cyan]First Step: Create an instance of WebCrawler and call the `warmup()` function.[/bold cyan]")
|
||||
cprint("If this is the first time you're running Crawl4ai, this might take a few seconds to load required model files.")
|
||||
cprint(
|
||||
"🌟 [bold green]Welcome to the Crawl4ai Quickstart Guide! Let's dive into some web crawling fun! 🌐[/bold green]"
|
||||
)
|
||||
cprint(
|
||||
"⛳️ [bold cyan]First Step: Create an instance of WebCrawler and call the `warmup()` function.[/bold cyan]"
|
||||
)
|
||||
cprint(
|
||||
"If this is the first time you're running Crawl4ai, this might take a few seconds to load required model files."
|
||||
)
|
||||
|
||||
crawler = create_crawler()
|
||||
|
||||
@@ -295,7 +387,7 @@ def main():
|
||||
basic_usage(crawler)
|
||||
# basic_usage_some_params(crawler)
|
||||
understanding_parameters(crawler)
|
||||
|
||||
|
||||
crawler.always_by_pass_cache = True
|
||||
screenshot_usage(crawler)
|
||||
add_chunking_strategy(crawler)
|
||||
@@ -305,8 +397,10 @@ def main():
|
||||
interactive_extraction(crawler)
|
||||
multiple_scrip(crawler)
|
||||
|
||||
cprint("\n🎉 [bold green]Congratulations! You've made it through the Crawl4ai Quickstart Guide! Now go forth and crawl the web like a pro! 🕸️[/bold green]")
|
||||
cprint(
|
||||
"\n🎉 [bold green]Congratulations! You've made it through the Crawl4ai Quickstart Guide! Now go forth and crawl the web like a pro! 🕸️[/bold green]"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
735
docs/examples/quickstart_v0.ipynb
Normal file
735
docs/examples/quickstart_v0.ipynb
Normal file
@@ -0,0 +1,735 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6yLvrXn7yZQI"
|
||||
},
|
||||
"source": [
|
||||
"# Crawl4AI: Advanced Web Crawling and Data Extraction\n",
|
||||
"\n",
|
||||
"Welcome to this interactive notebook showcasing Crawl4AI, an advanced asynchronous web crawling and data extraction library.\n",
|
||||
"\n",
|
||||
"- GitHub Repository: [https://github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)\n",
|
||||
"- Twitter: [@unclecode](https://twitter.com/unclecode)\n",
|
||||
"- Website: [https://crawl4ai.com](https://crawl4ai.com)\n",
|
||||
"\n",
|
||||
"Let's explore the powerful features of Crawl4AI!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "KIn_9nxFyZQK"
|
||||
},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"First, let's install Crawl4AI from GitHub:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "mSnaxLf3zMog"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!sudo apt-get update && sudo apt-get install -y libwoff1 libopus0 libwebp6 libwebpdemux2 libenchant1c2a libgudev-1.0-0 libsecret-1-0 libhyphen0 libgdk-pixbuf2.0-0 libegl1 libnotify4 libxslt1.1 libevent-2.1-7 libgles2 libvpx6 libxcomposite1 libatk1.0-0 libatk-bridge2.0-0 libepoxy0 libgtk-3-0 libharfbuzz-icu0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xlXqaRtayZQK"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install crawl4ai\n",
|
||||
"!pip install nest-asyncio\n",
|
||||
"!playwright install"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "qKCE7TI7yZQL"
|
||||
},
|
||||
"source": [
|
||||
"Now, let's import the necessary libraries:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"id": "I67tr7aAyZQL"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"import nest_asyncio\n",
|
||||
"from crawl4ai import AsyncWebCrawler\n",
|
||||
"from crawl4ai.extraction_strategy import JsonCssExtractionStrategy, LLMExtractionStrategy\n",
|
||||
"import json\n",
|
||||
"import time\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "h7yR_Rt_yZQM"
|
||||
},
|
||||
"source": [
|
||||
"## Basic Usage\n",
|
||||
"\n",
|
||||
"Let's start with a simple crawl example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "yBh6hf4WyZQM",
|
||||
"outputId": "0f83af5c-abba-4175-ed95-70b7512e6bcc"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.05 seconds\n",
|
||||
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.05 seconds.\n",
|
||||
"18102\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def simple_crawl():\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" result = await crawler.arun(url=\"https://www.nbcnews.com/business\")\n",
|
||||
" print(len(result.markdown))\n",
|
||||
"await simple_crawl()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "9rtkgHI28uI4"
|
||||
},
|
||||
"source": [
|
||||
"💡 By default, **Crawl4AI** caches the result of every URL, so the next time you call it, you’ll get an instant result. But if you want to bypass the cache, just set `bypass_cache=True`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "MzZ0zlJ9yZQM"
|
||||
},
|
||||
"source": [
|
||||
"## Advanced Features\n",
|
||||
"\n",
|
||||
"### Executing JavaScript and Using CSS Selectors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "gHStF86xyZQM",
|
||||
"outputId": "34d0fb6d-4dec-4677-f76e-85a1f082829b"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://www.nbcnews.com/business using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://www.nbcnews.com/business successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://www.nbcnews.com/business, success: True, time taken: 6.06 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.10 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://www.nbcnews.com/business, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.11 seconds.\n",
|
||||
"41135\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def js_and_css():\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" js_code = [\"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();\"]\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" js_code=js_code,\n",
|
||||
" # css_selector=\"YOUR_CSS_SELECTOR_HERE\",\n",
|
||||
" bypass_cache=True\n",
|
||||
" )\n",
|
||||
" print(len(result.markdown))\n",
|
||||
"\n",
|
||||
"await js_and_css()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cqE_W4coyZQM"
|
||||
},
|
||||
"source": [
|
||||
"### Using a Proxy\n",
|
||||
"\n",
|
||||
"Note: You'll need to replace the proxy URL with a working proxy for this example to run successfully."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "QjAyiAGqyZQM"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"async def use_proxy():\n",
|
||||
" async with AsyncWebCrawler(verbose=True, proxy=\"http://your-proxy-url:port\") as crawler:\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" bypass_cache=True\n",
|
||||
" )\n",
|
||||
" print(result.markdown[:500]) # Print first 500 characters\n",
|
||||
"\n",
|
||||
"# Uncomment the following line to run the proxy example\n",
|
||||
"# await use_proxy()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "XTZ88lbayZQN"
|
||||
},
|
||||
"source": [
|
||||
"### Extracting Structured Data with OpenAI\n",
|
||||
"\n",
|
||||
"Note: You'll need to set your OpenAI API key as an environment variable for this example to work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "fIOlDayYyZQN",
|
||||
"outputId": "cb8359cc-dee0-4762-9698-5dfdcee055b8"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://openai.com/api/pricing/ using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://openai.com/api/pricing/ successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://openai.com/api/pricing/, success: True, time taken: 3.77 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://openai.com/api/pricing/, success: True, time taken: 0.21 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://openai.com/api/pricing/, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 0\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 1\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 2\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 3\n",
|
||||
"[LOG] Extracted 4 blocks from URL: https://openai.com/api/pricing/ block index: 3\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 4\n",
|
||||
"[LOG] Extracted 5 blocks from URL: https://openai.com/api/pricing/ block index: 0\n",
|
||||
"[LOG] Extracted 1 blocks from URL: https://openai.com/api/pricing/ block index: 4\n",
|
||||
"[LOG] Extracted 8 blocks from URL: https://openai.com/api/pricing/ block index: 1\n",
|
||||
"[LOG] Extracted 12 blocks from URL: https://openai.com/api/pricing/ block index: 2\n",
|
||||
"[LOG] 🚀 Extraction done for https://openai.com/api/pricing/, time taken: 8.55 seconds.\n",
|
||||
"5029\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from google.colab import userdata\n",
|
||||
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"class OpenAIModelFee(BaseModel):\n",
|
||||
" model_name: str = Field(..., description=\"Name of the OpenAI model.\")\n",
|
||||
" input_fee: str = Field(..., description=\"Fee for input token for the OpenAI model.\")\n",
|
||||
" output_fee: str = Field(..., description=\"Fee for output token for the OpenAI model.\")\n",
|
||||
"\n",
|
||||
"async def extract_openai_fees():\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url='https://openai.com/api/pricing/',\n",
|
||||
" word_count_threshold=1,\n",
|
||||
" extraction_strategy=LLMExtractionStrategy(\n",
|
||||
" provider=\"openai/gpt-4o\", api_token=os.getenv('OPENAI_API_KEY'),\n",
|
||||
" schema=OpenAIModelFee.schema(),\n",
|
||||
" extraction_type=\"schema\",\n",
|
||||
" instruction=\"\"\"From the crawled content, extract all mentioned model names along with their fees for input and output tokens.\n",
|
||||
" Do not miss any models in the entire content. One extracted model JSON format should look like this:\n",
|
||||
" {\"model_name\": \"GPT-4\", \"input_fee\": \"US$10.00 / 1M tokens\", \"output_fee\": \"US$30.00 / 1M tokens\"}.\"\"\"\n",
|
||||
" ),\n",
|
||||
" bypass_cache=True,\n",
|
||||
" )\n",
|
||||
" print(len(result.extracted_content))\n",
|
||||
"\n",
|
||||
"# Uncomment the following line to run the OpenAI extraction example\n",
|
||||
"await extract_openai_fees()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "BypA5YxEyZQN"
|
||||
},
|
||||
"source": [
|
||||
"### Advanced Multi-Page Crawling with JavaScript Execution"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "tfkcVQ0b7mw-"
|
||||
},
|
||||
"source": [
|
||||
"## Advanced Multi-Page Crawling with JavaScript Execution\n",
|
||||
"\n",
|
||||
"This example demonstrates Crawl4AI's ability to handle complex crawling scenarios, specifically extracting commits from multiple pages of a GitHub repository. The challenge here is that clicking the \"Next\" button doesn't load a new page, but instead uses asynchronous JavaScript to update the content. This is a common hurdle in modern web crawling.\n",
|
||||
"\n",
|
||||
"To overcome this, we use Crawl4AI's custom JavaScript execution to simulate clicking the \"Next\" button, and implement a custom hook to detect when new data has loaded. Our strategy involves comparing the first commit's text before and after \"clicking\" Next, waiting until it changes to confirm new data has rendered. This showcases Crawl4AI's flexibility in handling dynamic content and its ability to implement custom logic for even the most challenging crawling tasks."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "qUBKGpn3yZQN",
|
||||
"outputId": "3e555b6a-ed33-42f4-cce9-499a923fbe17"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 5.16 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.28 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.28 seconds.\n",
|
||||
"Page 1: Found 35 commits\n",
|
||||
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.78 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.90 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.90 seconds.\n",
|
||||
"Page 2: Found 35 commits\n",
|
||||
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 2.00 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.74 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.75 seconds.\n",
|
||||
"Page 3: Found 35 commits\n",
|
||||
"Successfully crawled 105 commits across 3 pages\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"\n",
|
||||
"async def crawl_typescript_commits():\n",
|
||||
" first_commit = \"\"\n",
|
||||
" async def on_execution_started(page):\n",
|
||||
" nonlocal first_commit\n",
|
||||
" try:\n",
|
||||
" while True:\n",
|
||||
" await page.wait_for_selector('li.Box-sc-g0xbh4-0 h4')\n",
|
||||
" commit = await page.query_selector('li.Box-sc-g0xbh4-0 h4')\n",
|
||||
" commit = await commit.evaluate('(element) => element.textContent')\n",
|
||||
" commit = re.sub(r'\\s+', '', commit)\n",
|
||||
" if commit and commit != first_commit:\n",
|
||||
" first_commit = commit\n",
|
||||
" break\n",
|
||||
" await asyncio.sleep(0.5)\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"Warning: New content didn't appear after JavaScript execution: {e}\")\n",
|
||||
"\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" crawler.crawler_strategy.set_hook('on_execution_started', on_execution_started)\n",
|
||||
"\n",
|
||||
" url = \"https://github.com/microsoft/TypeScript/commits/main\"\n",
|
||||
" session_id = \"typescript_commits_session\"\n",
|
||||
" all_commits = []\n",
|
||||
"\n",
|
||||
" js_next_page = \"\"\"\n",
|
||||
" const button = document.querySelector('a[data-testid=\"pagination-next-button\"]');\n",
|
||||
" if (button) button.click();\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" for page in range(3): # Crawl 3 pages\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=url,\n",
|
||||
" session_id=session_id,\n",
|
||||
" css_selector=\"li.Box-sc-g0xbh4-0\",\n",
|
||||
" js=js_next_page if page > 0 else None,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" js_only=page > 0\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" assert result.success, f\"Failed to crawl page {page + 1}\"\n",
|
||||
"\n",
|
||||
" soup = BeautifulSoup(result.cleaned_html, 'html.parser')\n",
|
||||
" commits = soup.select(\"li\")\n",
|
||||
" all_commits.extend(commits)\n",
|
||||
"\n",
|
||||
" print(f\"Page {page + 1}: Found {len(commits)} commits\")\n",
|
||||
"\n",
|
||||
" await crawler.crawler_strategy.kill_session(session_id)\n",
|
||||
" print(f\"Successfully crawled {len(all_commits)} commits across 3 pages\")\n",
|
||||
"\n",
|
||||
"await crawl_typescript_commits()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EJRnYsp6yZQN"
|
||||
},
|
||||
"source": [
|
||||
"### Using JsonCssExtractionStrategy for Fast Structured Output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "1ZMqIzB_8SYp"
|
||||
},
|
||||
"source": [
|
||||
"The JsonCssExtractionStrategy is a powerful feature of Crawl4AI that allows for precise, structured data extraction from web pages. Here's how it works:\n",
|
||||
"\n",
|
||||
"1. You define a schema that describes the pattern of data you're interested in extracting.\n",
|
||||
"2. The schema includes a base selector that identifies repeating elements on the page.\n",
|
||||
"3. Within the schema, you define fields, each with its own selector and type.\n",
|
||||
"4. These field selectors are applied within the context of each base selector element.\n",
|
||||
"5. The strategy supports nested structures, lists within lists, and various data types.\n",
|
||||
"6. You can even include computed fields for more complex data manipulation.\n",
|
||||
"\n",
|
||||
"This approach allows for highly flexible and precise data extraction, transforming semi-structured web content into clean, structured JSON data. It's particularly useful for extracting consistent data patterns from pages like product listings, news articles, or search results.\n",
|
||||
"\n",
|
||||
"For more details and advanced usage, check out the full documentation on the Crawl4AI website."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "trCMR2T9yZQN",
|
||||
"outputId": "718d36f4-cccf-40f4-8d8c-c3ba73524d16"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://www.nbcnews.com/business using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://www.nbcnews.com/business successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://www.nbcnews.com/business, success: True, time taken: 7.00 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.32 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://www.nbcnews.com/business, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.48 seconds.\n",
|
||||
"Successfully extracted 11 news teasers\n",
|
||||
"{\n",
|
||||
" \"category\": \"Business News\",\n",
|
||||
" \"headline\": \"NBC ripped up its Olympics playbook for 2024 \\u2014 so far, the new strategy paid off\",\n",
|
||||
" \"summary\": \"The Olympics have long been key to NBCUniversal. Paris marked the 18th Olympic Games broadcast by NBC in the U.S.\",\n",
|
||||
" \"time\": \"13h ago\",\n",
|
||||
" \"image\": {\n",
|
||||
" \"src\": \"https://media-cldnry.s-nbcnews.com/image/upload/t_focal-200x100,f_auto,q_auto:best/rockcms/2024-09/240903-nbc-olympics-ch-1344-c7a486.jpg\",\n",
|
||||
" \"alt\": \"Mike Tirico.\"\n",
|
||||
" },\n",
|
||||
" \"link\": \"https://www.nbcnews.com/business\"\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def extract_news_teasers():\n",
|
||||
" schema = {\n",
|
||||
" \"name\": \"News Teaser Extractor\",\n",
|
||||
" \"baseSelector\": \".wide-tease-item__wrapper\",\n",
|
||||
" \"fields\": [\n",
|
||||
" {\n",
|
||||
" \"name\": \"category\",\n",
|
||||
" \"selector\": \".unibrow span[data-testid='unibrow-text']\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"headline\",\n",
|
||||
" \"selector\": \".wide-tease-item__headline\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"summary\",\n",
|
||||
" \"selector\": \".wide-tease-item__description\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"time\",\n",
|
||||
" \"selector\": \"[data-testid='wide-tease-date']\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"image\",\n",
|
||||
" \"type\": \"nested\",\n",
|
||||
" \"selector\": \"picture.teasePicture img\",\n",
|
||||
" \"fields\": [\n",
|
||||
" {\"name\": \"src\", \"type\": \"attribute\", \"attribute\": \"src\"},\n",
|
||||
" {\"name\": \"alt\", \"type\": \"attribute\", \"attribute\": \"alt\"},\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"link\",\n",
|
||||
" \"selector\": \"a[href]\",\n",
|
||||
" \"type\": \"attribute\",\n",
|
||||
" \"attribute\": \"href\",\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)\n",
|
||||
"\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" extraction_strategy=extraction_strategy,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" assert result.success, \"Failed to crawl the page\"\n",
|
||||
"\n",
|
||||
" news_teasers = json.loads(result.extracted_content)\n",
|
||||
" print(f\"Successfully extracted {len(news_teasers)} news teasers\")\n",
|
||||
" print(json.dumps(news_teasers[0], indent=2))\n",
|
||||
"\n",
|
||||
"await extract_news_teasers()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "FnyVhJaByZQN"
|
||||
},
|
||||
"source": [
|
||||
"## Speed Comparison\n",
|
||||
"\n",
|
||||
"Let's compare the speed of Crawl4AI with Firecrawl, a paid service. Note that we can't run Firecrawl in this Colab environment, so we'll simulate its performance based on previously recorded data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "agDD186f3wig"
|
||||
},
|
||||
"source": [
|
||||
"💡 **Note on Speed Comparison:**\n",
|
||||
"\n",
|
||||
"The speed test conducted here is running on Google Colab, where the internet speed and performance can vary and may not reflect optimal conditions. When we call Firecrawl's API, we're seeing its best performance, while Crawl4AI's performance is limited by Colab's network speed.\n",
|
||||
"\n",
|
||||
"For a more accurate comparison, it's recommended to run these tests on your own servers or computers with a stable and fast internet connection. Despite these limitations, Crawl4AI still demonstrates faster performance in this environment.\n",
|
||||
"\n",
|
||||
"If you run these tests locally, you may observe an even more significant speed advantage for Crawl4AI compared to other services."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "F7KwHv8G1LbY"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install firecrawl"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "91813zILyZQN",
|
||||
"outputId": "663223db-ab89-4976-b233-05ceca62b19b"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Firecrawl (simulated):\n",
|
||||
"Time taken: 4.38 seconds\n",
|
||||
"Content length: 41967 characters\n",
|
||||
"Images found: 49\n",
|
||||
"\n",
|
||||
"Crawl4AI (simple crawl):\n",
|
||||
"Time taken: 4.22 seconds\n",
|
||||
"Content length: 18221 characters\n",
|
||||
"Images found: 49\n",
|
||||
"\n",
|
||||
"Crawl4AI (with JavaScript execution):\n",
|
||||
"Time taken: 9.13 seconds\n",
|
||||
"Content length: 34243 characters\n",
|
||||
"Images found: 89\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from google.colab import userdata\n",
|
||||
"os.environ['FIRECRAWL_API_KEY'] = userdata.get('FIRECRAWL_API_KEY')\n",
|
||||
"import time\n",
|
||||
"from firecrawl import FirecrawlApp\n",
|
||||
"\n",
|
||||
"async def speed_comparison():\n",
|
||||
" # Simulated Firecrawl performance\n",
|
||||
" app = FirecrawlApp(api_key=os.environ['FIRECRAWL_API_KEY'])\n",
|
||||
" start = time.time()\n",
|
||||
" scrape_status = app.scrape_url(\n",
|
||||
" 'https://www.nbcnews.com/business',\n",
|
||||
" params={'formats': ['markdown', 'html']}\n",
|
||||
" )\n",
|
||||
" end = time.time()\n",
|
||||
" print(\"Firecrawl (simulated):\")\n",
|
||||
" print(f\"Time taken: {end - start:.2f} seconds\")\n",
|
||||
" print(f\"Content length: {len(scrape_status['markdown'])} characters\")\n",
|
||||
" print(f\"Images found: {scrape_status['markdown'].count('cldnry.s-nbcnews.com')}\")\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
" async with AsyncWebCrawler() as crawler:\n",
|
||||
" # Crawl4AI simple crawl\n",
|
||||
" start = time.time()\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" word_count_threshold=0,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" verbose=False\n",
|
||||
" )\n",
|
||||
" end = time.time()\n",
|
||||
" print(\"Crawl4AI (simple crawl):\")\n",
|
||||
" print(f\"Time taken: {end - start:.2f} seconds\")\n",
|
||||
" print(f\"Content length: {len(result.markdown)} characters\")\n",
|
||||
" print(f\"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}\")\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
" # Crawl4AI with JavaScript execution\n",
|
||||
" start = time.time()\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" js_code=[\"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();\"],\n",
|
||||
" word_count_threshold=0,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" verbose=False\n",
|
||||
" )\n",
|
||||
" end = time.time()\n",
|
||||
" print(\"Crawl4AI (with JavaScript execution):\")\n",
|
||||
" print(f\"Time taken: {end - start:.2f} seconds\")\n",
|
||||
" print(f\"Content length: {len(result.markdown)} characters\")\n",
|
||||
" print(f\"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}\")\n",
|
||||
"\n",
|
||||
"await speed_comparison()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "OBFFYVJIyZQN"
|
||||
},
|
||||
"source": [
|
||||
"If you run on a local machine with a proper internet speed:\n",
|
||||
"- Simple crawl: Crawl4AI is typically over 3-4 times faster than Firecrawl.\n",
|
||||
"- With JavaScript execution: Even when executing JavaScript to load more content (potentially doubling the number of images found), Crawl4AI is still faster than Firecrawl's simple crawl.\n",
|
||||
"\n",
|
||||
"Please note that actual performance may vary depending on network conditions and the specific content being crawled."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "A6_1RK1_yZQO"
|
||||
},
|
||||
"source": [
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"In this notebook, we've explored the powerful features of Crawl4AI, including:\n",
|
||||
"\n",
|
||||
"1. Basic crawling\n",
|
||||
"2. JavaScript execution and CSS selector usage\n",
|
||||
"3. Proxy support\n",
|
||||
"4. Structured data extraction with OpenAI\n",
|
||||
"5. Advanced multi-page crawling with JavaScript execution\n",
|
||||
"6. Fast structured output using JsonCssExtractionStrategy\n",
|
||||
"7. Speed comparison with other services\n",
|
||||
"\n",
|
||||
"Crawl4AI offers a fast, flexible, and powerful solution for web crawling and data extraction tasks. Its asynchronous architecture and advanced features make it suitable for a wide range of applications, from simple web scraping to complex, multi-page data extraction scenarios.\n",
|
||||
"\n",
|
||||
"For more information and advanced usage, please visit the [Crawl4AI documentation](https://docs.crawl4ai.com/).\n",
|
||||
"\n",
|
||||
"Happy crawling!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -11,7 +11,9 @@ from groq import Groq
|
||||
# Import threadpools to run the crawl_url function in a separate thread
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
client = AsyncOpenAI(base_url="https://api.groq.com/openai/v1", api_key=os.getenv("GROQ_API_KEY"))
|
||||
client = AsyncOpenAI(
|
||||
base_url="https://api.groq.com/openai/v1", api_key=os.getenv("GROQ_API_KEY")
|
||||
)
|
||||
|
||||
# Instrument the OpenAI client
|
||||
cl.instrument_openai()
|
||||
@@ -25,41 +27,39 @@ settings = {
|
||||
"presence_penalty": 0,
|
||||
}
|
||||
|
||||
|
||||
def extract_urls(text):
|
||||
url_pattern = re.compile(r'(https?://\S+)')
|
||||
url_pattern = re.compile(r"(https?://\S+)")
|
||||
return url_pattern.findall(text)
|
||||
|
||||
|
||||
def crawl_url(url):
|
||||
data = {
|
||||
"urls": [url],
|
||||
"include_raw_html": True,
|
||||
"word_count_threshold": 10,
|
||||
"extraction_strategy": "NoExtractionStrategy",
|
||||
"chunking_strategy": "RegexChunking"
|
||||
"chunking_strategy": "RegexChunking",
|
||||
}
|
||||
response = requests.post("https://crawl4ai.com/crawl", json=data)
|
||||
response_data = response.json()
|
||||
response_data = response_data['results'][0]
|
||||
return response_data['markdown']
|
||||
response_data = response_data["results"][0]
|
||||
return response_data["markdown"]
|
||||
|
||||
|
||||
@cl.on_chat_start
|
||||
async def on_chat_start():
|
||||
cl.user_session.set("session", {
|
||||
"history": [],
|
||||
"context": {}
|
||||
})
|
||||
await cl.Message(
|
||||
content="Welcome to the chat! How can I assist you today?"
|
||||
).send()
|
||||
cl.user_session.set("session", {"history": [], "context": {}})
|
||||
await cl.Message(content="Welcome to the chat! How can I assist you today?").send()
|
||||
|
||||
|
||||
@cl.on_message
|
||||
async def on_message(message: cl.Message):
|
||||
user_session = cl.user_session.get("session")
|
||||
|
||||
|
||||
# Extract URLs from the user's message
|
||||
urls = extract_urls(message.content)
|
||||
|
||||
|
||||
|
||||
futures = []
|
||||
with ThreadPoolExecutor() as executor:
|
||||
for url in urls:
|
||||
@@ -69,16 +69,9 @@ async def on_message(message: cl.Message):
|
||||
|
||||
for url, result in zip(urls, results):
|
||||
ref_number = f"REF_{len(user_session['context']) + 1}"
|
||||
user_session["context"][ref_number] = {
|
||||
"url": url,
|
||||
"content": result
|
||||
}
|
||||
user_session["context"][ref_number] = {"url": url, "content": result}
|
||||
|
||||
|
||||
user_session["history"].append({
|
||||
"role": "user",
|
||||
"content": message.content
|
||||
})
|
||||
user_session["history"].append({"role": "user", "content": message.content})
|
||||
|
||||
# Create a system message that includes the context
|
||||
context_messages = [
|
||||
@@ -95,26 +88,17 @@ async def on_message(message: cl.Message):
|
||||
"If not, there is no need to add a references section. "
|
||||
"At the end of your response, provide a reference section listing the URLs and their REF numbers only if sources from the appendices were used.\n\n"
|
||||
"\n\n".join(context_messages)
|
||||
)
|
||||
),
|
||||
}
|
||||
else:
|
||||
system_message = {
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant."
|
||||
}
|
||||
|
||||
system_message = {"role": "system", "content": "You are a helpful assistant."}
|
||||
|
||||
msg = cl.Message(content="")
|
||||
await msg.send()
|
||||
|
||||
# Get response from the LLM
|
||||
stream = await client.chat.completions.create(
|
||||
messages=[
|
||||
system_message,
|
||||
*user_session["history"]
|
||||
],
|
||||
stream=True,
|
||||
**settings
|
||||
messages=[system_message, *user_session["history"]], stream=True, **settings
|
||||
)
|
||||
|
||||
assistant_response = ""
|
||||
@@ -124,10 +108,7 @@ async def on_message(message: cl.Message):
|
||||
await msg.stream_token(token)
|
||||
|
||||
# Add assistant message to the history
|
||||
user_session["history"].append({
|
||||
"role": "assistant",
|
||||
"content": assistant_response
|
||||
})
|
||||
user_session["history"].append({"role": "assistant", "content": assistant_response})
|
||||
await msg.update()
|
||||
|
||||
# Append the reference section to the assistant's response
|
||||
@@ -154,10 +135,11 @@ async def on_audio_chunk(chunk: cl.AudioChunk):
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@cl.step(type="tool")
|
||||
async def speech_to_text(audio_file):
|
||||
cli = Groq()
|
||||
|
||||
|
||||
response = await client.audio.transcriptions.create(
|
||||
model="whisper-large-v3", file=audio_file
|
||||
)
|
||||
@@ -172,24 +154,19 @@ async def on_audio_end(elements: list[ElementBased]):
|
||||
audio_buffer.seek(0) # Move the file pointer to the beginning
|
||||
audio_file = audio_buffer.read()
|
||||
audio_mime_type: str = cl.user_session.get("audio_mime_type")
|
||||
|
||||
|
||||
start_time = time.time()
|
||||
whisper_input = (audio_buffer.name, audio_file, audio_mime_type)
|
||||
transcription = await speech_to_text(whisper_input)
|
||||
end_time = time.time()
|
||||
print(f"Transcription took {end_time - start_time} seconds")
|
||||
|
||||
user_msg = cl.Message(
|
||||
author="You",
|
||||
type="user_message",
|
||||
content=transcription
|
||||
)
|
||||
|
||||
user_msg = cl.Message(author="You", type="user_message", content=transcription)
|
||||
await user_msg.send()
|
||||
await on_message(user_msg)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from chainlit.cli import run_chainlit
|
||||
|
||||
run_chainlit(__file__)
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
import requests, base64, os
|
||||
|
||||
data = {
|
||||
@@ -6,59 +5,50 @@ data = {
|
||||
"screenshot": True,
|
||||
}
|
||||
|
||||
response = requests.post("https://crawl4ai.com/crawl", json=data)
|
||||
result = response.json()['results'][0]
|
||||
response = requests.post("https://crawl4ai.com/crawl", json=data)
|
||||
result = response.json()["results"][0]
|
||||
print(result.keys())
|
||||
# dict_keys(['url', 'html', 'success', 'cleaned_html', 'media',
|
||||
# 'links', 'screenshot', 'markdown', 'extracted_content',
|
||||
# dict_keys(['url', 'html', 'success', 'cleaned_html', 'media',
|
||||
# 'links', 'screenshot', 'markdown', 'extracted_content',
|
||||
# 'metadata', 'error_message'])
|
||||
with open("screenshot.png", "wb") as f:
|
||||
f.write(base64.b64decode(result['screenshot']))
|
||||
|
||||
f.write(base64.b64decode(result["screenshot"]))
|
||||
|
||||
# Example of filtering the content using CSS selectors
|
||||
data = {
|
||||
"urls": [
|
||||
"https://www.nbcnews.com/business"
|
||||
],
|
||||
"urls": ["https://www.nbcnews.com/business"],
|
||||
"css_selector": "article",
|
||||
"screenshot": True,
|
||||
}
|
||||
|
||||
# Example of executing a JS script on the page before extracting the content
|
||||
data = {
|
||||
"urls": [
|
||||
"https://www.nbcnews.com/business"
|
||||
],
|
||||
"urls": ["https://www.nbcnews.com/business"],
|
||||
"screenshot": True,
|
||||
'js' : ["""
|
||||
"js": [
|
||||
"""
|
||||
const loadMoreButton = Array.from(document.querySelectorAll('button')).
|
||||
find(button => button.textContent.includes('Load More'));
|
||||
loadMoreButton && loadMoreButton.click();
|
||||
"""]
|
||||
"""
|
||||
],
|
||||
}
|
||||
|
||||
# Example of using a custom extraction strategy
|
||||
data = {
|
||||
"urls": [
|
||||
"https://www.nbcnews.com/business"
|
||||
],
|
||||
"urls": ["https://www.nbcnews.com/business"],
|
||||
"extraction_strategy": "CosineStrategy",
|
||||
"extraction_strategy_args": {
|
||||
"semantic_filter": "inflation rent prices"
|
||||
},
|
||||
"extraction_strategy_args": {"semantic_filter": "inflation rent prices"},
|
||||
}
|
||||
|
||||
# Example of using LLM to extract content
|
||||
data = {
|
||||
"urls": [
|
||||
"https://www.nbcnews.com/business"
|
||||
],
|
||||
"urls": ["https://www.nbcnews.com/business"],
|
||||
"extraction_strategy": "LLMExtractionStrategy",
|
||||
"extraction_strategy_args": {
|
||||
"provider": "groq/llama3-8b-8192",
|
||||
"api_token": os.environ.get("GROQ_API_KEY"),
|
||||
"instruction": """I am interested in only financial news,
|
||||
and translate them in French."""
|
||||
and translate them in French.""",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
135
docs/examples/scraping_strategies_performance.py
Normal file
135
docs/examples/scraping_strategies_performance.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import time, re
|
||||
from crawl4ai.content_scraping_strategy import WebScrapingStrategy, LXMLWebScrapingStrategy
|
||||
import time
|
||||
import functools
|
||||
from collections import defaultdict
|
||||
|
||||
class TimingStats:
|
||||
def __init__(self):
|
||||
self.stats = defaultdict(lambda: defaultdict(lambda: {"calls": 0, "total_time": 0}))
|
||||
|
||||
def add(self, strategy_name, func_name, elapsed):
|
||||
self.stats[strategy_name][func_name]["calls"] += 1
|
||||
self.stats[strategy_name][func_name]["total_time"] += elapsed
|
||||
|
||||
def report(self):
|
||||
for strategy_name, funcs in self.stats.items():
|
||||
print(f"\n{strategy_name} Timing Breakdown:")
|
||||
print("-" * 60)
|
||||
print(f"{'Function':<30} {'Calls':<10} {'Total(s)':<10} {'Avg(ms)':<10}")
|
||||
print("-" * 60)
|
||||
|
||||
for func, data in sorted(funcs.items(), key=lambda x: x[1]["total_time"], reverse=True):
|
||||
avg_ms = (data["total_time"] / data["calls"]) * 1000
|
||||
print(f"{func:<30} {data['calls']:<10} {data['total_time']:<10.3f} {avg_ms:<10.2f}")
|
||||
|
||||
timing_stats = TimingStats()
|
||||
|
||||
# Modify timing decorator
|
||||
def timing_decorator(strategy_name):
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start = time.time()
|
||||
result = func(*args, **kwargs)
|
||||
elapsed = time.time() - start
|
||||
timing_stats.add(strategy_name, func.__name__, elapsed)
|
||||
return result
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
# Modified decorator application
|
||||
def apply_decorators(cls, method_name, strategy_name):
|
||||
try:
|
||||
original_method = getattr(cls, method_name)
|
||||
decorated_method = timing_decorator(strategy_name)(original_method)
|
||||
setattr(cls, method_name, decorated_method)
|
||||
except AttributeError:
|
||||
print(f"Method {method_name} not found in class {cls.__name__}.")
|
||||
|
||||
# Apply to key methods
|
||||
methods_to_profile = [
|
||||
'_scrap',
|
||||
# 'process_element',
|
||||
'_process_element',
|
||||
'process_image',
|
||||
]
|
||||
|
||||
|
||||
# Apply decorators to both strategies
|
||||
for strategy, name in [(WebScrapingStrategy, "Original"), (LXMLWebScrapingStrategy, "LXML")]:
|
||||
for method in methods_to_profile:
|
||||
apply_decorators(strategy, method, name)
|
||||
|
||||
|
||||
def generate_large_html(n_elements=1000):
|
||||
html = ['<!DOCTYPE html><html><head></head><body>']
|
||||
for i in range(n_elements):
|
||||
html.append(f'''
|
||||
<div class="article">
|
||||
<h2>Heading {i}</h2>
|
||||
<div>
|
||||
<div>
|
||||
<p>This is paragraph {i} with some content and a <a href="http://example.com/{i}">link</a></p>
|
||||
</div>
|
||||
</div>
|
||||
<img src="image{i}.jpg" alt="Image {i}">
|
||||
<ul>
|
||||
<li>List item {i}.1</li>
|
||||
<li>List item {i}.2</li>
|
||||
</ul>
|
||||
</div>
|
||||
''')
|
||||
html.append('</body></html>')
|
||||
return ''.join(html)
|
||||
|
||||
def test_scraping():
|
||||
# Initialize both scrapers
|
||||
original_scraper = WebScrapingStrategy()
|
||||
selected_scraper = LXMLWebScrapingStrategy()
|
||||
|
||||
# Generate test HTML
|
||||
print("Generating HTML...")
|
||||
html = generate_large_html(5000)
|
||||
print(f"HTML Size: {len(html)/1024:.2f} KB")
|
||||
|
||||
# Time the scraping
|
||||
print("\nStarting scrape...")
|
||||
start_time = time.time()
|
||||
|
||||
kwargs = {
|
||||
"url": "http://example.com",
|
||||
"html": html,
|
||||
"word_count_threshold": 5,
|
||||
"keep_data_attributes": True
|
||||
}
|
||||
|
||||
t1 = time.perf_counter()
|
||||
result_selected = selected_scraper.scrap(**kwargs)
|
||||
t2 = time.perf_counter()
|
||||
|
||||
result_original = original_scraper.scrap(**kwargs)
|
||||
t3 = time.perf_counter()
|
||||
|
||||
elapsed = t3 - start_time
|
||||
print(f"\nScraping completed in {elapsed:.2f} seconds")
|
||||
|
||||
timing_stats.report()
|
||||
|
||||
# Print stats of LXML output
|
||||
print("\Turbo Output:")
|
||||
print(f"\nExtracted links: {len(result_selected.links.internal) + len(result_selected.links.external)}")
|
||||
print(f"Extracted images: {len(result_selected.media.images)}")
|
||||
print(f"Clean HTML size: {len(result_selected.cleaned_html)/1024:.2f} KB")
|
||||
print(f"Scraping time: {t2 - t1:.2f} seconds")
|
||||
|
||||
# Print stats of original output
|
||||
print("\nOriginal Output:")
|
||||
print(f"\nExtracted links: {len(result_original.links.internal) + len(result_original.links.external)}")
|
||||
print(f"Extracted images: {len(result_original.media.images)}")
|
||||
print(f"Clean HTML size: {len(result_original.cleaned_html)/1024:.2f} KB")
|
||||
print(f"Scraping time: {t3 - t1:.2f} seconds")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_scraping()
|
||||
51
docs/examples/ssl_example.py
Normal file
51
docs/examples/ssl_example.py
Normal file
@@ -0,0 +1,51 @@
|
||||
"""Example showing how to work with SSL certificates in Crawl4AI."""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode
|
||||
|
||||
# Create tmp directory if it doesn't exist
|
||||
parent_dir = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
tmp_dir = os.path.join(parent_dir, "tmp")
|
||||
os.makedirs(tmp_dir, exist_ok=True)
|
||||
|
||||
|
||||
async def main():
|
||||
# Configure crawler to fetch SSL certificate
|
||||
config = CrawlerRunConfig(
|
||||
fetch_ssl_certificate=True,
|
||||
cache_mode=CacheMode.BYPASS, # Bypass cache to always get fresh certificates
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com", config=config)
|
||||
|
||||
if result.success and result.ssl_certificate:
|
||||
cert = result.ssl_certificate
|
||||
|
||||
# 1. Access certificate properties directly
|
||||
print("\nCertificate Information:")
|
||||
print(f"Issuer: {cert.issuer.get('CN', '')}")
|
||||
print(f"Valid until: {cert.valid_until}")
|
||||
print(f"Fingerprint: {cert.fingerprint}")
|
||||
|
||||
# 2. Export certificate in different formats
|
||||
cert.to_json(os.path.join(tmp_dir, "certificate.json")) # For analysis
|
||||
print("\nCertificate exported to:")
|
||||
print(f"- JSON: {os.path.join(tmp_dir, 'certificate.json')}")
|
||||
|
||||
pem_data = cert.to_pem(
|
||||
os.path.join(tmp_dir, "certificate.pem")
|
||||
) # For web servers
|
||||
print(f"- PEM: {os.path.join(tmp_dir, 'certificate.pem')}")
|
||||
|
||||
der_data = cert.to_der(
|
||||
os.path.join(tmp_dir, "certificate.der")
|
||||
) # For Java apps
|
||||
print(f"- DER: {os.path.join(tmp_dir, 'certificate.der')}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
225
docs/examples/storage_state_tutorial.md
Normal file
225
docs/examples/storage_state_tutorial.md
Normal file
@@ -0,0 +1,225 @@
|
||||
### Using `storage_state` to Pre-Load Cookies and LocalStorage
|
||||
|
||||
Crawl4ai’s `AsyncWebCrawler` lets you preserve and reuse session data, including cookies and localStorage, across multiple runs. By providing a `storage_state`, you can start your crawls already “logged in” or with any other necessary session data—no need to repeat the login flow every time.
|
||||
|
||||
#### What is `storage_state`?
|
||||
|
||||
`storage_state` can be:
|
||||
|
||||
- A dictionary containing cookies and localStorage data.
|
||||
- A path to a JSON file that holds this information.
|
||||
|
||||
When you pass `storage_state` to the crawler, it applies these cookies and localStorage entries before loading any pages. This means your crawler effectively starts in a known authenticated or pre-configured state.
|
||||
|
||||
#### Example Structure
|
||||
|
||||
Here’s an example storage state:
|
||||
|
||||
```json
|
||||
{
|
||||
"cookies": [
|
||||
{
|
||||
"name": "session",
|
||||
"value": "abcd1234",
|
||||
"domain": "example.com",
|
||||
"path": "/",
|
||||
"expires": 1675363572.037711,
|
||||
"httpOnly": false,
|
||||
"secure": false,
|
||||
"sameSite": "None"
|
||||
}
|
||||
],
|
||||
"origins": [
|
||||
{
|
||||
"origin": "https://example.com",
|
||||
"localStorage": [
|
||||
{ "name": "token", "value": "my_auth_token" },
|
||||
{ "name": "refreshToken", "value": "my_refresh_token" }
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This JSON sets a `session` cookie and two localStorage entries (`token` and `refreshToken`) for `https://example.com`.
|
||||
|
||||
---
|
||||
|
||||
### Passing `storage_state` as a Dictionary
|
||||
|
||||
You can directly provide the data as a dictionary:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
storage_dict = {
|
||||
"cookies": [
|
||||
{
|
||||
"name": "session",
|
||||
"value": "abcd1234",
|
||||
"domain": "example.com",
|
||||
"path": "/",
|
||||
"expires": 1675363572.037711,
|
||||
"httpOnly": False,
|
||||
"secure": False,
|
||||
"sameSite": "None"
|
||||
}
|
||||
],
|
||||
"origins": [
|
||||
{
|
||||
"origin": "https://example.com",
|
||||
"localStorage": [
|
||||
{"name": "token", "value": "my_auth_token"},
|
||||
{"name": "refreshToken", "value": "my_refresh_token"}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async with AsyncWebCrawler(
|
||||
headless=True,
|
||||
storage_state=storage_dict
|
||||
) as crawler:
|
||||
result = await crawler.arun(url='https://example.com/protected')
|
||||
if result.success:
|
||||
print("Crawl succeeded with pre-loaded session data!")
|
||||
print("Page HTML length:", len(result.html))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Passing `storage_state` as a File
|
||||
|
||||
If you prefer a file-based approach, save the JSON above to `mystate.json` and reference it:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(
|
||||
headless=True,
|
||||
storage_state="mystate.json" # Uses a JSON file instead of a dictionary
|
||||
) as crawler:
|
||||
result = await crawler.arun(url='https://example.com/protected')
|
||||
if result.success:
|
||||
print("Crawl succeeded with pre-loaded session data!")
|
||||
print("Page HTML length:", len(result.html))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Using `storage_state` to Avoid Repeated Logins (Sign In Once, Use Later)
|
||||
|
||||
A common scenario is when you need to log in to a site (entering username/password, etc.) to access protected pages. Doing so every crawl is cumbersome. Instead, you can:
|
||||
|
||||
1. Perform the login once in a hook.
|
||||
2. After login completes, export the resulting `storage_state` to a file.
|
||||
3. On subsequent runs, provide that `storage_state` to skip the login step.
|
||||
|
||||
**Step-by-Step Example:**
|
||||
|
||||
**First Run (Perform Login and Save State):**
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
|
||||
async def on_browser_created_hook(browser):
|
||||
# Access the default context and create a page
|
||||
context = browser.contexts[0]
|
||||
page = await context.new_page()
|
||||
|
||||
# Navigate to the login page
|
||||
await page.goto("https://example.com/login", wait_until="domcontentloaded")
|
||||
|
||||
# Fill in credentials and submit
|
||||
await page.fill("input[name='username']", "myuser")
|
||||
await page.fill("input[name='password']", "mypassword")
|
||||
await page.click("button[type='submit']")
|
||||
await page.wait_for_load_state("networkidle")
|
||||
|
||||
# Now the site sets tokens in localStorage and cookies
|
||||
# Export this state to a file so we can reuse it
|
||||
await context.storage_state(path="my_storage_state.json")
|
||||
await page.close()
|
||||
|
||||
async def main():
|
||||
# First run: perform login and export the storage_state
|
||||
async with AsyncWebCrawler(
|
||||
headless=True,
|
||||
verbose=True,
|
||||
hooks={"on_browser_created": on_browser_created_hook},
|
||||
use_persistent_context=True,
|
||||
user_data_dir="./my_user_data"
|
||||
) as crawler:
|
||||
|
||||
# After on_browser_created_hook runs, we have storage_state saved to my_storage_state.json
|
||||
result = await crawler.arun(
|
||||
url='https://example.com/protected-page',
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(options={"ignore_links": True}),
|
||||
)
|
||||
print("First run result success:", result.success)
|
||||
if result.success:
|
||||
print("Protected page HTML length:", len(result.html))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
**Second Run (Reuse Saved State, No Login Needed):**
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
|
||||
async def main():
|
||||
# Second run: no need to hook on_browser_created this time.
|
||||
# Just provide the previously saved storage state.
|
||||
async with AsyncWebCrawler(
|
||||
headless=True,
|
||||
verbose=True,
|
||||
use_persistent_context=True,
|
||||
user_data_dir="./my_user_data",
|
||||
storage_state="my_storage_state.json" # Reuse previously exported state
|
||||
) as crawler:
|
||||
|
||||
# Now the crawler starts already logged in
|
||||
result = await crawler.arun(
|
||||
url='https://example.com/protected-page',
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(options={"ignore_links": True}),
|
||||
)
|
||||
print("Second run result success:", result.success)
|
||||
if result.success:
|
||||
print("Protected page HTML length:", len(result.html))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
**What’s Happening Here?**
|
||||
|
||||
- During the first run, the `on_browser_created_hook` logs into the site.
|
||||
- After logging in, the crawler exports the current session (cookies, localStorage, etc.) to `my_storage_state.json`.
|
||||
- On subsequent runs, passing `storage_state="my_storage_state.json"` starts the browser context with these tokens already in place, skipping the login steps.
|
||||
|
||||
**Sign Out Scenario:**
|
||||
If the website allows you to sign out by clearing tokens or by navigating to a sign-out URL, you can also run a script that uses `on_browser_created_hook` or `arun` to simulate signing out, then export the resulting `storage_state` again. That would give you a baseline “logged out” state to start fresh from next time.
|
||||
|
||||
---
|
||||
|
||||
### Conclusion
|
||||
|
||||
By using `storage_state`, you can skip repetitive actions, like logging in, and jump straight into crawling protected content. Whether you provide a file path or a dictionary, this powerful feature helps maintain state between crawls, simplifying your data extraction pipelines.
|
||||
@@ -1,39 +1,41 @@
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from crawl4ai.web_crawler import WebCrawler
|
||||
from crawl4ai.chunking_strategy import *
|
||||
from crawl4ai.extraction_strategy import *
|
||||
from crawl4ai.crawler_strategy import *
|
||||
|
||||
url = r'https://marketplace.visualstudio.com/items?itemName=Unclecode.groqopilot'
|
||||
url = r"https://marketplace.visualstudio.com/items?itemName=Unclecode.groqopilot"
|
||||
|
||||
crawler = WebCrawler()
|
||||
crawler.warmup()
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class PageSummary(BaseModel):
|
||||
title: str = Field(..., description="Title of the page.")
|
||||
summary: str = Field(..., description="Summary of the page.")
|
||||
brief_summary: str = Field(..., description="Brief summary of the page.")
|
||||
keywords: list = Field(..., description="Keywords assigned to the page.")
|
||||
|
||||
|
||||
result = crawler.run(
|
||||
url=url,
|
||||
word_count_threshold=1,
|
||||
extraction_strategy= LLMExtractionStrategy(
|
||||
provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'),
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o",
|
||||
api_token=os.getenv("OPENAI_API_KEY"),
|
||||
schema=PageSummary.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
apply_chunking =False,
|
||||
instruction="From the crawled content, extract the following details: "\
|
||||
"1. Title of the page "\
|
||||
"2. Summary of the page, which is a detailed summary "\
|
||||
"3. Brief summary of the page, which is a paragraph text "\
|
||||
"4. Keywords assigned to the page, which is a list of keywords. "\
|
||||
'The extracted JSON format should look like this: '\
|
||||
'{ "title": "Page Title", "summary": "Detailed summary of the page.", "brief_summary": "Brief summary in a paragraph.", "keywords": ["keyword1", "keyword2", "keyword3"] }'
|
||||
apply_chunking=False,
|
||||
instruction="From the crawled content, extract the following details: "
|
||||
"1. Title of the page "
|
||||
"2. Summary of the page, which is a detailed summary "
|
||||
"3. Brief summary of the page, which is a paragraph text "
|
||||
"4. Keywords assigned to the page, which is a list of keywords. "
|
||||
"The extracted JSON format should look like this: "
|
||||
'{ "title": "Page Title", "summary": "Detailed summary of the page.", "brief_summary": "Brief summary in a paragraph.", "keywords": ["keyword1", "keyword2", "keyword3"] }',
|
||||
),
|
||||
bypass_cache=True,
|
||||
)
|
||||
|
||||
@@ -1,281 +0,0 @@
|
||||
from openai import AsyncOpenAI
|
||||
from chainlit.types import ThreadDict
|
||||
import chainlit as cl
|
||||
from chainlit.input_widget import Select, Switch, Slider
|
||||
client = AsyncOpenAI()
|
||||
|
||||
# Instrument the OpenAI client
|
||||
cl.instrument_openai()
|
||||
|
||||
settings = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.5,
|
||||
"max_tokens": 500,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0,
|
||||
}
|
||||
|
||||
@cl.action_callback("action_button")
|
||||
async def on_action(action: cl.Action):
|
||||
print("The user clicked on the action button!")
|
||||
|
||||
return "Thank you for clicking on the action button!"
|
||||
|
||||
@cl.set_chat_profiles
|
||||
async def chat_profile():
|
||||
return [
|
||||
cl.ChatProfile(
|
||||
name="GPT-3.5",
|
||||
markdown_description="The underlying LLM model is **GPT-3.5**.",
|
||||
icon="https://picsum.photos/200",
|
||||
),
|
||||
cl.ChatProfile(
|
||||
name="GPT-4",
|
||||
markdown_description="The underlying LLM model is **GPT-4**.",
|
||||
icon="https://picsum.photos/250",
|
||||
),
|
||||
]
|
||||
|
||||
@cl.on_chat_start
|
||||
async def on_chat_start():
|
||||
|
||||
settings = await cl.ChatSettings(
|
||||
[
|
||||
Select(
|
||||
id="Model",
|
||||
label="OpenAI - Model",
|
||||
values=["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"],
|
||||
initial_index=0,
|
||||
),
|
||||
Switch(id="Streaming", label="OpenAI - Stream Tokens", initial=True),
|
||||
Slider(
|
||||
id="Temperature",
|
||||
label="OpenAI - Temperature",
|
||||
initial=1,
|
||||
min=0,
|
||||
max=2,
|
||||
step=0.1,
|
||||
),
|
||||
Slider(
|
||||
id="SAI_Steps",
|
||||
label="Stability AI - Steps",
|
||||
initial=30,
|
||||
min=10,
|
||||
max=150,
|
||||
step=1,
|
||||
description="Amount of inference steps performed on image generation.",
|
||||
),
|
||||
Slider(
|
||||
id="SAI_Cfg_Scale",
|
||||
label="Stability AI - Cfg_Scale",
|
||||
initial=7,
|
||||
min=1,
|
||||
max=35,
|
||||
step=0.1,
|
||||
description="Influences how strongly your generation is guided to match your prompt.",
|
||||
),
|
||||
Slider(
|
||||
id="SAI_Width",
|
||||
label="Stability AI - Image Width",
|
||||
initial=512,
|
||||
min=256,
|
||||
max=2048,
|
||||
step=64,
|
||||
tooltip="Measured in pixels",
|
||||
),
|
||||
Slider(
|
||||
id="SAI_Height",
|
||||
label="Stability AI - Image Height",
|
||||
initial=512,
|
||||
min=256,
|
||||
max=2048,
|
||||
step=64,
|
||||
tooltip="Measured in pixels",
|
||||
),
|
||||
]
|
||||
).send()
|
||||
|
||||
chat_profile = cl.user_session.get("chat_profile")
|
||||
await cl.Message(
|
||||
content=f"starting chat using the {chat_profile} chat profile"
|
||||
).send()
|
||||
|
||||
print("A new chat session has started!")
|
||||
cl.user_session.set("session", {
|
||||
"history": [],
|
||||
"context": []
|
||||
})
|
||||
|
||||
image = cl.Image(url="https://c.tenor.com/uzWDSSLMCmkAAAAd/tenor.gif", name="cat image", display="inline")
|
||||
|
||||
# Attach the image to the message
|
||||
await cl.Message(
|
||||
content="You are such a good girl, aren't you?!",
|
||||
elements=[image],
|
||||
).send()
|
||||
|
||||
text_content = "Hello, this is a text element."
|
||||
elements = [
|
||||
cl.Text(name="simple_text", content=text_content, display="inline")
|
||||
]
|
||||
|
||||
await cl.Message(
|
||||
content="Check out this text element!",
|
||||
elements=elements,
|
||||
).send()
|
||||
|
||||
elements = [
|
||||
cl.Audio(path="./assets/audio.mp3", display="inline"),
|
||||
]
|
||||
await cl.Message(
|
||||
content="Here is an audio file",
|
||||
elements=elements,
|
||||
).send()
|
||||
|
||||
await cl.Avatar(
|
||||
name="Tool 1",
|
||||
url="https://avatars.githubusercontent.com/u/128686189?s=400&u=a1d1553023f8ea0921fba0debbe92a8c5f840dd9&v=4",
|
||||
).send()
|
||||
|
||||
await cl.Message(
|
||||
content="This message should not have an avatar!", author="Tool 0"
|
||||
).send()
|
||||
|
||||
await cl.Message(
|
||||
content="This message should have an avatar!", author="Tool 1"
|
||||
).send()
|
||||
|
||||
elements = [
|
||||
cl.File(
|
||||
name="quickstart.py",
|
||||
path="./quickstart.py",
|
||||
display="inline",
|
||||
),
|
||||
]
|
||||
|
||||
await cl.Message(
|
||||
content="This message has a file element", elements=elements
|
||||
).send()
|
||||
|
||||
# Sending an action button within a chatbot message
|
||||
actions = [
|
||||
cl.Action(name="action_button", value="example_value", description="Click me!")
|
||||
]
|
||||
|
||||
await cl.Message(content="Interact with this action button:", actions=actions).send()
|
||||
|
||||
# res = await cl.AskActionMessage(
|
||||
# content="Pick an action!",
|
||||
# actions=[
|
||||
# cl.Action(name="continue", value="continue", label="✅ Continue"),
|
||||
# cl.Action(name="cancel", value="cancel", label="❌ Cancel"),
|
||||
# ],
|
||||
# ).send()
|
||||
|
||||
# if res and res.get("value") == "continue":
|
||||
# await cl.Message(
|
||||
# content="Continue!",
|
||||
# ).send()
|
||||
|
||||
# import plotly.graph_objects as go
|
||||
# fig = go.Figure(
|
||||
# data=[go.Bar(y=[2, 1, 3])],
|
||||
# layout_title_text="An example figure",
|
||||
# )
|
||||
# elements = [cl.Plotly(name="chart", figure=fig, display="inline")]
|
||||
|
||||
# await cl.Message(content="This message has a chart", elements=elements).send()
|
||||
|
||||
# Sending a pdf with the local file path
|
||||
# elements = [
|
||||
# cl.Pdf(name="pdf1", display="inline", path="./pdf1.pdf")
|
||||
# ]
|
||||
|
||||
# cl.Message(content="Look at this local pdf!", elements=elements).send()
|
||||
|
||||
@cl.on_settings_update
|
||||
async def setup_agent(settings):
|
||||
print("on_settings_update", settings)
|
||||
|
||||
@cl.on_stop
|
||||
def on_stop():
|
||||
print("The user wants to stop the task!")
|
||||
|
||||
@cl.on_chat_end
|
||||
def on_chat_end():
|
||||
print("The user disconnected!")
|
||||
|
||||
|
||||
@cl.on_chat_resume
|
||||
async def on_chat_resume(thread: ThreadDict):
|
||||
print("The user resumed a previous chat session!")
|
||||
|
||||
|
||||
|
||||
|
||||
# @cl.on_message
|
||||
async def on_message(message: cl.Message):
|
||||
cl.user_session.get("session")["history"].append({
|
||||
"role": "user",
|
||||
"content": message.content
|
||||
})
|
||||
response = await client.chat.completions.create(
|
||||
messages=[
|
||||
{
|
||||
"content": "You are a helpful bot",
|
||||
"role": "system"
|
||||
},
|
||||
*cl.user_session.get("session")["history"]
|
||||
],
|
||||
**settings
|
||||
)
|
||||
|
||||
|
||||
# Add assitanr message to the history
|
||||
cl.user_session.get("session")["history"].append({
|
||||
"role": "assistant",
|
||||
"content": response.choices[0].message.content
|
||||
})
|
||||
|
||||
# msg.content = response.choices[0].message.content
|
||||
# await msg.update()
|
||||
|
||||
# await cl.Message(content=response.choices[0].message.content).send()
|
||||
|
||||
@cl.on_message
|
||||
async def on_message(message: cl.Message):
|
||||
cl.user_session.get("session")["history"].append({
|
||||
"role": "user",
|
||||
"content": message.content
|
||||
})
|
||||
|
||||
msg = cl.Message(content="")
|
||||
await msg.send()
|
||||
|
||||
stream = await client.chat.completions.create(
|
||||
messages=[
|
||||
{
|
||||
"content": "You are a helpful bot",
|
||||
"role": "system"
|
||||
},
|
||||
*cl.user_session.get("session")["history"]
|
||||
],
|
||||
stream = True,
|
||||
**settings
|
||||
)
|
||||
|
||||
async for part in stream:
|
||||
if token := part.choices[0].delta.content or "":
|
||||
await msg.stream_token(token)
|
||||
|
||||
# Add assitanr message to the history
|
||||
cl.user_session.get("session")["history"].append({
|
||||
"role": "assistant",
|
||||
"content": msg.content
|
||||
})
|
||||
await msg.update()
|
||||
|
||||
if __name__ == "__main__":
|
||||
from chainlit.cli import run_chainlit
|
||||
run_chainlit(__file__)
|
||||
@@ -1,238 +0,0 @@
|
||||
# Make sure to install the required packageschainlit and groq
|
||||
import os, time
|
||||
from openai import AsyncOpenAI
|
||||
import chainlit as cl
|
||||
import re
|
||||
import requests
|
||||
from io import BytesIO
|
||||
from chainlit.element import ElementBased
|
||||
from groq import Groq
|
||||
|
||||
# Import threadpools to run the crawl_url function in a separate thread
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
client = AsyncOpenAI(base_url="https://api.groq.com/openai/v1", api_key=os.getenv("GROQ_API_KEY"))
|
||||
|
||||
# Instrument the OpenAI client
|
||||
cl.instrument_openai()
|
||||
|
||||
settings = {
|
||||
"model": "llama3-8b-8192",
|
||||
"temperature": 0.5,
|
||||
"max_tokens": 500,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0,
|
||||
}
|
||||
|
||||
def extract_urls(text):
|
||||
url_pattern = re.compile(r'(https?://\S+)')
|
||||
return url_pattern.findall(text)
|
||||
|
||||
def crawl_url(url):
|
||||
data = {
|
||||
"urls": [url],
|
||||
"include_raw_html": True,
|
||||
"word_count_threshold": 10,
|
||||
"extraction_strategy": "NoExtractionStrategy",
|
||||
"chunking_strategy": "RegexChunking"
|
||||
}
|
||||
response = requests.post("https://crawl4ai.com/crawl", json=data)
|
||||
response_data = response.json()
|
||||
response_data = response_data['results'][0]
|
||||
return response_data['markdown']
|
||||
|
||||
@cl.on_chat_start
|
||||
async def on_chat_start():
|
||||
cl.user_session.set("session", {
|
||||
"history": [],
|
||||
"context": {}
|
||||
})
|
||||
await cl.Message(
|
||||
content="Welcome to the chat! How can I assist you today?"
|
||||
).send()
|
||||
|
||||
@cl.on_message
|
||||
async def on_message(message: cl.Message):
|
||||
user_session = cl.user_session.get("session")
|
||||
|
||||
# Extract URLs from the user's message
|
||||
urls = extract_urls(message.content)
|
||||
|
||||
|
||||
futures = []
|
||||
with ThreadPoolExecutor() as executor:
|
||||
for url in urls:
|
||||
futures.append(executor.submit(crawl_url, url))
|
||||
|
||||
results = [future.result() for future in futures]
|
||||
|
||||
for url, result in zip(urls, results):
|
||||
ref_number = f"REF_{len(user_session['context']) + 1}"
|
||||
user_session["context"][ref_number] = {
|
||||
"url": url,
|
||||
"content": result
|
||||
}
|
||||
|
||||
# for url in urls:
|
||||
# # Crawl the content of each URL and add it to the session context with a reference number
|
||||
# ref_number = f"REF_{len(user_session['context']) + 1}"
|
||||
# crawled_content = crawl_url(url)
|
||||
# user_session["context"][ref_number] = {
|
||||
# "url": url,
|
||||
# "content": crawled_content
|
||||
# }
|
||||
|
||||
user_session["history"].append({
|
||||
"role": "user",
|
||||
"content": message.content
|
||||
})
|
||||
|
||||
# Create a system message that includes the context
|
||||
context_messages = [
|
||||
f'<appendix ref="{ref}">\n{data["content"]}\n</appendix>'
|
||||
for ref, data in user_session["context"].items()
|
||||
]
|
||||
if context_messages:
|
||||
system_message = {
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You are a helpful bot. Use the following context for answering questions. "
|
||||
"Refer to the sources using the REF number in square brackets, e.g., [1], only if the source is given in the appendices below.\n\n"
|
||||
"If the question requires any information from the provided appendices or context, refer to the sources. "
|
||||
"If not, there is no need to add a references section. "
|
||||
"At the end of your response, provide a reference section listing the URLs and their REF numbers only if sources from the appendices were used.\n\n"
|
||||
"\n\n".join(context_messages)
|
||||
)
|
||||
}
|
||||
else:
|
||||
system_message = {
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant."
|
||||
}
|
||||
|
||||
|
||||
msg = cl.Message(content="")
|
||||
await msg.send()
|
||||
|
||||
# Get response from the LLM
|
||||
stream = await client.chat.completions.create(
|
||||
messages=[
|
||||
system_message,
|
||||
*user_session["history"]
|
||||
],
|
||||
stream=True,
|
||||
**settings
|
||||
)
|
||||
|
||||
assistant_response = ""
|
||||
async for part in stream:
|
||||
if token := part.choices[0].delta.content:
|
||||
assistant_response += token
|
||||
await msg.stream_token(token)
|
||||
|
||||
# Add assistant message to the history
|
||||
user_session["history"].append({
|
||||
"role": "assistant",
|
||||
"content": assistant_response
|
||||
})
|
||||
await msg.update()
|
||||
|
||||
# Append the reference section to the assistant's response
|
||||
reference_section = "\n\nReferences:\n"
|
||||
for ref, data in user_session["context"].items():
|
||||
reference_section += f"[{ref.split('_')[1]}]: {data['url']}\n"
|
||||
|
||||
msg.content += reference_section
|
||||
await msg.update()
|
||||
|
||||
|
||||
@cl.on_audio_chunk
|
||||
async def on_audio_chunk(chunk: cl.AudioChunk):
|
||||
if chunk.isStart:
|
||||
buffer = BytesIO()
|
||||
# This is required for whisper to recognize the file type
|
||||
buffer.name = f"input_audio.{chunk.mimeType.split('/')[1]}"
|
||||
# Initialize the session for a new audio stream
|
||||
cl.user_session.set("audio_buffer", buffer)
|
||||
cl.user_session.set("audio_mime_type", chunk.mimeType)
|
||||
|
||||
# Write the chunks to a buffer and transcribe the whole audio at the end
|
||||
cl.user_session.get("audio_buffer").write(chunk.data)
|
||||
|
||||
pass
|
||||
|
||||
@cl.step(type="tool")
|
||||
async def speech_to_text(audio_file):
|
||||
cli = Groq()
|
||||
|
||||
# response = cli.audio.transcriptions.create(
|
||||
# file=audio_file, #(filename, file.read()),
|
||||
# model="whisper-large-v3",
|
||||
# )
|
||||
|
||||
response = await client.audio.transcriptions.create(
|
||||
model="whisper-large-v3", file=audio_file
|
||||
)
|
||||
|
||||
return response.text
|
||||
|
||||
|
||||
@cl.on_audio_end
|
||||
async def on_audio_end(elements: list[ElementBased]):
|
||||
# Get the audio buffer from the session
|
||||
audio_buffer: BytesIO = cl.user_session.get("audio_buffer")
|
||||
audio_buffer.seek(0) # Move the file pointer to the beginning
|
||||
audio_file = audio_buffer.read()
|
||||
audio_mime_type: str = cl.user_session.get("audio_mime_type")
|
||||
|
||||
# input_audio_el = cl.Audio(
|
||||
# mime=audio_mime_type, content=audio_file, name=audio_buffer.name
|
||||
# )
|
||||
# await cl.Message(
|
||||
# author="You",
|
||||
# type="user_message",
|
||||
# content="",
|
||||
# elements=[input_audio_el, *elements]
|
||||
# ).send()
|
||||
|
||||
# answer_message = await cl.Message(content="").send()
|
||||
|
||||
|
||||
start_time = time.time()
|
||||
whisper_input = (audio_buffer.name, audio_file, audio_mime_type)
|
||||
transcription = await speech_to_text(whisper_input)
|
||||
end_time = time.time()
|
||||
print(f"Transcription took {end_time - start_time} seconds")
|
||||
|
||||
user_msg = cl.Message(
|
||||
author="You",
|
||||
type="user_message",
|
||||
content=transcription
|
||||
)
|
||||
await user_msg.send()
|
||||
await on_message(user_msg)
|
||||
|
||||
# images = [file for file in elements if "image" in file.mime]
|
||||
|
||||
# text_answer = await generate_text_answer(transcription, images)
|
||||
|
||||
# output_name, output_audio = await text_to_speech(text_answer, audio_mime_type)
|
||||
|
||||
# output_audio_el = cl.Audio(
|
||||
# name=output_name,
|
||||
# auto_play=True,
|
||||
# mime=audio_mime_type,
|
||||
# content=output_audio,
|
||||
# )
|
||||
|
||||
# answer_message.elements = [output_audio_el]
|
||||
|
||||
# answer_message.content = transcription
|
||||
# await answer_message.update()
|
||||
|
||||
if __name__ == "__main__":
|
||||
from chainlit.cli import run_chainlit
|
||||
run_chainlit(__file__)
|
||||
|
||||
|
||||
117
docs/examples/tutorial_dynamic_clicks.md
Normal file
117
docs/examples/tutorial_dynamic_clicks.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# Tutorial: Clicking Buttons to Load More Content with Crawl4AI
|
||||
|
||||
## Introduction
|
||||
|
||||
When scraping dynamic websites, it’s common to encounter “Load More” or “Next” buttons that must be clicked to reveal new content. Crawl4AI provides a straightforward way to handle these situations using JavaScript execution and waiting conditions. In this tutorial, we’ll cover two approaches:
|
||||
|
||||
1. **Step-by-step (Session-based) Approach:** Multiple calls to `arun()` to progressively load more content.
|
||||
2. **Single-call Approach:** Execute a more complex JavaScript snippet inside a single `arun()` call to handle all clicks at once before the extraction.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- A working installation of Crawl4AI
|
||||
- Basic familiarity with Python’s `async`/`await` syntax
|
||||
|
||||
## Step-by-Step Approach
|
||||
|
||||
Use a session ID to maintain state across multiple `arun()` calls:
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
|
||||
js_code = [
|
||||
# This JS finds the “Next” button and clicks it
|
||||
"const nextButton = document.querySelector('button.next'); nextButton && nextButton.click();"
|
||||
]
|
||||
|
||||
wait_for_condition = "css:.new-content-class"
|
||||
|
||||
async with AsyncWebCrawler(headless=True, verbose=True) as crawler:
|
||||
# 1. Load the initial page
|
||||
result_initial = await crawler.arun(
|
||||
url="https://example.com",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="my_session"
|
||||
)
|
||||
|
||||
# 2. Click the 'Next' button and wait for new content
|
||||
result_next = await crawler.arun(
|
||||
url="https://example.com",
|
||||
session_id="my_session",
|
||||
js_code=js_code,
|
||||
wait_for=wait_for_condition,
|
||||
js_only=True,
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
# `result_next` now contains the updated HTML after clicking 'Next'
|
||||
```
|
||||
|
||||
**Key Points:**
|
||||
- **`session_id`**: Keeps the same browser context open.
|
||||
- **`js_code`**: Executes JavaScript in the context of the already loaded page.
|
||||
- **`wait_for`**: Ensures the crawler waits until new content is fully loaded.
|
||||
- **`js_only=True`**: Runs the JS in the current session without reloading the page.
|
||||
|
||||
By repeating the `arun()` call multiple times and modifying the `js_code` (e.g., clicking different modules or pages), you can iteratively load all the desired content.
|
||||
|
||||
## Single-call Approach
|
||||
|
||||
If the page allows it, you can run a single `arun()` call with a more elaborate JavaScript snippet that:
|
||||
- Iterates over all the modules or "Next" buttons
|
||||
- Clicks them one by one
|
||||
- Waits for content updates between each click
|
||||
- Once done, returns control to Crawl4AI for extraction.
|
||||
|
||||
Example snippet:
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
|
||||
js_code = [
|
||||
# Example JS that clicks multiple modules:
|
||||
"""
|
||||
(async () => {
|
||||
const modules = document.querySelectorAll('.module-item');
|
||||
for (let i = 0; i < modules.length; i++) {
|
||||
modules[i].scrollIntoView();
|
||||
modules[i].click();
|
||||
// Wait for each module’s content to load, adjust 100ms as needed
|
||||
await new Promise(r => setTimeout(r, 100));
|
||||
}
|
||||
})();
|
||||
"""
|
||||
]
|
||||
|
||||
async with AsyncWebCrawler(headless=True, verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
js_code=js_code,
|
||||
wait_for="css:.final-loaded-content-class",
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
# `result` now contains all content after all modules have been clicked in one go.
|
||||
```
|
||||
|
||||
**Key Points:**
|
||||
- All interactions (clicks and waits) happen before the extraction.
|
||||
- Ideal for pages where all steps can be done in a single pass.
|
||||
|
||||
## Choosing the Right Approach
|
||||
|
||||
- **Step-by-Step (Session-based)**:
|
||||
- Good when you need fine-grained control or must dynamically check conditions before clicking the next page.
|
||||
- Useful if the page requires multiple conditions checked at runtime.
|
||||
|
||||
- **Single-call**:
|
||||
- Perfect if the sequence of interactions is known in advance.
|
||||
- Cleaner code if the page’s structure is consistent and predictable.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Crawl4AI makes it easy to handle dynamic content:
|
||||
- Use session IDs and multiple `arun()` calls for stepwise crawling.
|
||||
- Or pack all actions into one `arun()` call if the interactions are well-defined upfront.
|
||||
|
||||
This flexibility ensures you can handle a wide range of dynamic web pages efficiently.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user