Compare commits
167 Commits
v0.5.0.pos
...
codex/add-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6029097114 | ||
|
|
897e017361 | ||
|
|
a3e9ef91ad | ||
|
|
76dd86d1b3 | ||
|
|
206a9dfabd | ||
|
|
aaf05910eb | ||
|
|
a0555d5fa6 | ||
|
|
38ebcbb304 | ||
|
|
9b5ccac76e | ||
|
|
87d4b0fff4 | ||
|
|
bd5a9ac632 | ||
|
|
6650b2f34a | ||
|
|
5cc58f9bb3 | ||
|
|
baf7f6a6f5 | ||
|
|
94e9959fe0 | ||
|
|
7c2fd5202e | ||
|
|
ee01b81f3e | ||
|
|
0e5d672763 | ||
|
|
cd2b490b40 | ||
|
|
50f0b83fcd | ||
|
|
9499164d3c | ||
|
|
2140d9aca4 | ||
|
|
ccec40ed17 | ||
|
|
ad4dfb21e1 | ||
|
|
7784b2468e | ||
|
|
146f9d415f | ||
|
|
37fd80e4b9 | ||
|
|
949a93982e | ||
|
|
c4f5651199 | ||
|
|
b0aa8bc9f7 | ||
|
|
c98ffe2130 | ||
|
|
4812f08a73 | ||
|
|
f3ebb38edf | ||
|
|
0007aea204 | ||
|
|
b5c25731e6 | ||
|
|
5297e362f3 | ||
|
|
a58c8000aa | ||
|
|
b27bb367e8 | ||
|
|
d2648eaa39 | ||
|
|
c2902fd200 | ||
|
|
16b2318242 | ||
|
|
907cba194f | ||
|
|
3bf78ff47a | ||
|
|
921e0c46b6 | ||
|
|
fd899f66aa | ||
|
|
30ec4f571f | ||
|
|
7db6b468d9 | ||
|
|
eed7f88f29 | ||
|
|
94d486579c | ||
|
|
5206c6f2d6 | ||
|
|
230f22da86 | ||
|
|
793668a413 | ||
|
|
82aa53aa59 | ||
|
|
cd7ff6f9c1 | ||
|
|
c56974cf59 | ||
|
|
dcc265458c | ||
|
|
ecec53a8c1 | ||
|
|
7d8e81fb2e | ||
|
|
9fc5d315af | ||
|
|
d84508b4d5 | ||
|
|
022f5c9e25 | ||
|
|
3179d6ad0c | ||
|
|
b2f3cb0dfa | ||
|
|
18e8227dfb | ||
|
|
7c358a1aee | ||
|
|
108b2a8bfb | ||
|
|
66ac07b4f3 | ||
|
|
a2061bf31e | ||
|
|
6f7ab9c927 | ||
|
|
9038e9acbd | ||
|
|
02e627e0bd | ||
|
|
5b66208a7e | ||
|
|
591f55edc7 | ||
|
|
e1d9e2489c | ||
|
|
b1693b1c21 | ||
|
|
49d904ca0a | ||
|
|
ca9351252a | ||
|
|
935d9d39f8 | ||
|
|
f8213c32b9 | ||
|
|
14894b4d70 | ||
|
|
7155778eac | ||
|
|
4133e5460d | ||
|
|
73fda8a6ec | ||
|
|
86df20234b | ||
|
|
179921a131 | ||
|
|
9e16a4bb26 | ||
|
|
c5cac2b459 | ||
|
|
555455d710 | ||
|
|
765f856ed4 | ||
|
|
757e3177ed | ||
|
|
d8357e80d2 | ||
|
|
ef1f0c4102 | ||
|
|
1119f2f5b5 | ||
|
|
bb02398086 | ||
|
|
3ff7eec8f3 | ||
|
|
d8cbeff386 | ||
|
|
64f20ab44a | ||
|
|
57e0423b3a | ||
|
|
c635f6b9a2 | ||
|
|
7be5427283 | ||
|
|
7f93e88379 | ||
|
|
40d4dd36c9 | ||
|
|
d8f38f2298 | ||
|
|
5c88d1310d | ||
|
|
4a20d7f7c2 | ||
|
|
585e5e5973 | ||
|
|
e3111d0a32 | ||
|
|
2f0e217751 | ||
|
|
6405cf0a6f | ||
|
|
6eed4adc65 | ||
|
|
bdd9db579a | ||
|
|
1107fa1d62 | ||
|
|
efa73257c5 | ||
|
|
8c08521301 | ||
|
|
462d5765e2 | ||
|
|
6eeb2e4076 | ||
|
|
0094cac675 | ||
|
|
4ab0893ffb | ||
|
|
e01d1e73e1 | ||
|
|
471d110c5e | ||
|
|
f89113377a | ||
|
|
6740e87b4d | ||
|
|
8b761f232b | ||
|
|
e0c2a7c284 | ||
|
|
ac2f9ae533 | ||
|
|
eedda1ae5c | ||
|
|
8cecbec7a7 | ||
|
|
6432ff1257 | ||
|
|
4359b12003 | ||
|
|
5358ac0fc2 | ||
|
|
529a79725e | ||
|
|
9109ecd8fc | ||
|
|
84883be513 | ||
|
|
79328e4292 | ||
|
|
a24799918c | ||
|
|
a31d7b86be | ||
|
|
7884a98be7 | ||
|
|
c190ba816d | ||
|
|
a3954dd4c6 | ||
|
|
6e3c048328 | ||
|
|
b750542e6d | ||
|
|
cbb8755972 | ||
|
|
dc36997a08 | ||
|
|
1630fbdafe | ||
|
|
341b7a5f2a | ||
|
|
9547bada3a | ||
|
|
9d69fce834 | ||
|
|
c6a605ccce | ||
|
|
4aeb7ef9ad | ||
|
|
a68cbb232b | ||
|
|
e1b3bfe6fb | ||
|
|
f78c46446b | ||
|
|
1b72880007 | ||
|
|
29f7915b79 | ||
|
|
2327db6fdc | ||
|
|
fd02dc782d | ||
|
|
3a234ec950 | ||
|
|
9e89d27fcd | ||
|
|
b3ec7ce960 | ||
|
|
baee4949d3 | ||
|
|
14fe5ef873 | ||
|
|
fc425023f5 | ||
|
|
504207faa6 | ||
|
|
f14e4a4b67 | ||
|
|
1e819cdb26 | ||
|
|
5edfea279d | ||
|
|
7c1705712d |
35
.github/workflows/main.yml
vendored
Normal file
35
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Discord GitHub Notifications
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request:
|
||||
types: [opened]
|
||||
discussion:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set webhook based on event type
|
||||
id: set-webhook
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "discussion" ]; then
|
||||
echo "webhook=${{ secrets.DISCORD_DISCUSSIONS_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "webhook=${{ secrets.DISCORD_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Discord Notification
|
||||
uses: Ilshidur/action-discord@master
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ steps.set-webhook.outputs.webhook }}
|
||||
with:
|
||||
args: |
|
||||
${{ github.event_name == 'issues' && format('📣 New issue created: **{0}** by {1} - {2}', github.event.issue.title, github.event.issue.user.login, github.event.issue.html_url) ||
|
||||
github.event_name == 'issue_comment' && format('💬 New comment on issue **{0}** by {1} - {2}', github.event.issue.title, github.event.comment.user.login, github.event.comment.html_url) ||
|
||||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
|
||||
format('💬 New discussion started: **{0}** by {1} - {2}', github.event.discussion.title, github.event.discussion.user.login, github.event.discussion.html_url) }}
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -255,3 +255,13 @@ continue_config.json
|
||||
|
||||
.llm.env
|
||||
.private/
|
||||
|
||||
CLAUDE_MONITOR.md
|
||||
CLAUDE.md
|
||||
|
||||
tests/**/test_site
|
||||
tests/**/reports
|
||||
tests/**/benchmark_reports
|
||||
|
||||
docs/**/data
|
||||
.codecat/
|
||||
|
||||
139
CHANGELOG.md
139
CHANGELOG.md
@@ -5,6 +5,145 @@ All notable changes to Crawl4AI will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.6.2] - 2025-05-02
|
||||
|
||||
### Added
|
||||
- New `RegexExtractionStrategy` for fast pattern-based extraction without requiring LLM
|
||||
- Built-in patterns for emails, URLs, phone numbers, dates, and more
|
||||
- Support for custom regex patterns
|
||||
- `generate_pattern` utility for LLM-assisted pattern creation (one-time use)
|
||||
- Added `fit_html` as a top-level field in `CrawlResult` for optimized HTML extraction
|
||||
- Added support for network response body capture in network request tracking
|
||||
|
||||
### Changed
|
||||
- Updated documentation for no-LLM extraction strategies
|
||||
- Enhanced API reference to include RegexExtractionStrategy examples and usage
|
||||
- Improved HTML preprocessing with optimized performance for extraction strategies
|
||||
|
||||
## [0.6.1] - 2025-04-24
|
||||
|
||||
### Added
|
||||
- New dedicated `tables` field in `CrawlResult` model for better table extraction handling
|
||||
- Updated crypto_analysis_example.py to use the new tables field with backward compatibility
|
||||
|
||||
### Changed
|
||||
- Improved playground UI in Docker deployment with better endpoint handling and UI feedback
|
||||
|
||||
## [0.6.0] ‑ 2025‑04‑22
|
||||
|
||||
### Added
|
||||
- Browser pooling with page pre‑warming and fine‑grained **geolocation, locale, and timezone** controls
|
||||
- Crawler pool manager (SDK + Docker API) for smarter resource allocation
|
||||
- Network & console log capture plus MHTML snapshot export
|
||||
- **Table extractor**: turn HTML `<table>`s into DataFrames or CSV with one flag
|
||||
- High‑volume stress‑test framework in `tests/memory` and API load scripts
|
||||
- MCP protocol endpoints with socket & SSE support; playground UI scaffold
|
||||
- Docs v2 revamp: TOC, GitHub badge, copy‑code buttons, Docker API demo
|
||||
- “Ask AI” helper button *(work‑in‑progress, shipping soon)*
|
||||
- New examples: geo‑location usage, network/console capture, Docker API, markdown source selection, crypto analysis
|
||||
- Expanded automated test suites for browser, Docker, MCP and memory benchmarks
|
||||
|
||||
### Changed
|
||||
- Consolidated and renamed browser strategies; legacy docker strategy modules removed
|
||||
- `ProxyConfig` moved to `async_configs`
|
||||
- Server migrated to pool‑based crawler management
|
||||
- FastAPI validators replace custom query validation
|
||||
- Docker build now uses Chromium base image
|
||||
- Large‑scale repo tidy‑up (≈36 k insertions, ≈5 k deletions)
|
||||
|
||||
### Fixed
|
||||
- Async crawler session leak, duplicate‑visit handling, URL normalisation
|
||||
- Target‑element regressions in scraping strategies
|
||||
- Logged‑URL readability, encoded‑URL decoding, middle truncation for long URLs
|
||||
- Closed issues: #701, #733, #756, #774, #804, #822, #839, #841, #842, #843, #867, #902, #911
|
||||
|
||||
### Removed
|
||||
- Obsolete modules under `crawl4ai/browser/*` superseded by the new pooled browser layer
|
||||
|
||||
### Deprecated
|
||||
- Old markdown generator names now alias `DefaultMarkdownGenerator` and emit warnings
|
||||
|
||||
---
|
||||
|
||||
#### Upgrade notes
|
||||
1. Update any direct imports from `crawl4ai/browser/*` to the new pooled browser modules
|
||||
2. If you override `AsyncPlaywrightCrawlerStrategy.get_page`, adopt the new signature
|
||||
3. Rebuild Docker images to pull the new Chromium layer
|
||||
4. Switch to `DefaultMarkdownGenerator` (or silence the deprecation warning)
|
||||
|
||||
---
|
||||
|
||||
`121 files changed, ≈36 223 insertions, ≈4 975 deletions` :contentReference[oaicite:0]{index=0}​:contentReference[oaicite:1]{index=1}
|
||||
|
||||
|
||||
### [Feature] 2025-04-21
|
||||
- Implemented MCP protocol for machine-to-machine communication
|
||||
- Added WebSocket and SSE transport for MCP server
|
||||
- Exposed server endpoints via MCP protocol
|
||||
- Created tests for MCP socket and SSE communication
|
||||
- Enhanced Docker server with file handling and intelligent search
|
||||
- Added PDF and screenshot endpoints with file saving capability
|
||||
- Added JavaScript execution endpoint for page interaction
|
||||
- Implemented advanced context search with BM25 and code chunking
|
||||
- Added file path output support for generated assets
|
||||
- Improved server endpoints and API surface
|
||||
- Added intelligent context search with query filtering
|
||||
- Added syntax-aware code function chunking
|
||||
- Implemented efficient HTML processing pipeline
|
||||
- Added support for controlling browser geolocation via new GeolocationConfig class
|
||||
- Added locale and timezone configuration options to CrawlerRunConfig
|
||||
- Added example script demonstrating geolocation and locale usage
|
||||
- Added documentation for location-based identity features
|
||||
|
||||
### [Refactor] 2025-04-20
|
||||
- Replaced crawler_manager.py with simpler crawler_pool.py implementation
|
||||
- Added global page semaphore for hard concurrency cap
|
||||
- Implemented browser pool with idle cleanup
|
||||
- Added playground UI for testing and stress testing
|
||||
- Updated API handlers to use pooled crawlers
|
||||
- Enhanced logging levels and symbols
|
||||
- Added memory tests and stress test utilities
|
||||
|
||||
### [Added] 2025-04-17
|
||||
- Added content source selection feature for markdown generation
|
||||
- New `content_source` parameter allows choosing between `cleaned_html`, `raw_html`, and `fit_html`
|
||||
- Provides flexibility in how HTML content is processed before markdown conversion
|
||||
- Added examples and documentation for the new feature
|
||||
- Includes backward compatibility with default `cleaned_html` behavior
|
||||
|
||||
## Version 0.5.0.post5 (2025-03-14)
|
||||
|
||||
### Added
|
||||
|
||||
- *(crawler)* Add experimental parameters dictionary to CrawlerRunConfig to support beta features
|
||||
- *(tables)* Add comprehensive table detection and extraction functionality with scoring system
|
||||
- *(monitor)* Add real-time crawler monitoring system with memory management
|
||||
- *(content)* Add target_elements parameter for selective content extraction
|
||||
- *(browser)* Add standalone CDP browser launch capability
|
||||
- *(schema)* Add preprocess_html_for_schema utility for better HTML cleaning
|
||||
- *(api)* Add special handling for single URL requests in Docker API
|
||||
|
||||
### Changed
|
||||
|
||||
- *(filters)* Add reverse option to URLPatternFilter for inverting filter logic
|
||||
- *(browser)* Make CSP nonce headers optional via experimental config
|
||||
- *(browser)* Remove default cookie injection from page initialization
|
||||
- *(crawler)* Optimize response handling for single-URL processing
|
||||
- *(api)* Refactor crawl request handling to streamline processing
|
||||
- *(config)* Update default provider to gpt-4o
|
||||
- *(cache)* Change default cache_mode from aggressive to bypass in examples
|
||||
|
||||
### Fixed
|
||||
|
||||
- *(browser)* Clean up browser context creation code
|
||||
- *(api)* Improve code formatting in API handler
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- WebScrapingStrategy no longer returns 'scraped_html' in its output dictionary
|
||||
- Table extraction logic has been modified to better handle thead/tbody structures
|
||||
- Default cookie injection has been removed from page initialization
|
||||
|
||||
## Version 0.5.0 (2025-03-02)
|
||||
|
||||
### Added
|
||||
|
||||
70
Dockerfile
70
Dockerfile
@@ -1,4 +1,9 @@
|
||||
FROM python:3.10-slim
|
||||
FROM python:3.12-slim-bookworm AS build
|
||||
|
||||
# C4ai version
|
||||
ARG C4AI_VER=0.6.0
|
||||
ENV C4AI_VERSION=$C4AI_VER
|
||||
LABEL c4ai.version=$C4AI_VER
|
||||
|
||||
# Set build arguments
|
||||
ARG APP_HOME=/app
|
||||
@@ -17,14 +22,14 @@ ENV PYTHONFAULTHANDLER=1 \
|
||||
REDIS_HOST=localhost \
|
||||
REDIS_PORT=6379
|
||||
|
||||
ARG PYTHON_VERSION=3.10
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ARG INSTALL_TYPE=default
|
||||
ARG ENABLE_GPU=false
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL maintainer="unclecode"
|
||||
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
|
||||
LABEL version="1.0"
|
||||
LABEL version="1.0"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
@@ -38,8 +43,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libjpeg-dev \
|
||||
redis-server \
|
||||
supervisor \
|
||||
xvfb \
|
||||
x11vnc \
|
||||
fluxbox \
|
||||
websockify \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install noVNC for browser-based VNC access
|
||||
RUN git clone --depth 1 https://github.com/novnc/noVNC /opt/novnc \
|
||||
&& git clone --depth 1 https://github.com/novnc/websockify /opt/novnc/utils/websockify
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libglib2.0-0 \
|
||||
libnss3 \
|
||||
@@ -62,11 +76,16 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libcairo2 \
|
||||
libasound2 \
|
||||
libatspi2.0-0 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-get update && apt-get dist-upgrade -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
nvidia-cuda-toolkit \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* ; \
|
||||
else \
|
||||
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
|
||||
@@ -76,16 +95,24 @@ RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
echo "🦾 Installing ARM-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libopenblas-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
elif [ "$TARGETARCH" = "amd64" ]; then \
|
||||
echo "🖥️ Installing AMD64-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libomp-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
echo "Skipping platform-specific optimizations (unsupported platform)"; \
|
||||
fi
|
||||
|
||||
# Create a non-root user and group
|
||||
RUN groupadd -r appuser && useradd --no-log-init -r -g appuser appuser
|
||||
|
||||
# Create and set permissions for appuser home directory
|
||||
RUN mkdir -p /home/appuser && chown -R appuser:appuser /home/appuser
|
||||
|
||||
WORKDIR ${APP_HOME}
|
||||
|
||||
RUN echo '#!/bin/bash\n\
|
||||
@@ -103,6 +130,7 @@ fi' > /tmp/install.sh && chmod +x /tmp/install.sh
|
||||
|
||||
COPY . /tmp/project/
|
||||
|
||||
# Copy supervisor config first (might need root later, but okay for now)
|
||||
COPY deploy/docker/supervisord.conf .
|
||||
|
||||
COPY deploy/docker/requirements.txt .
|
||||
@@ -131,16 +159,34 @@ RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
else \
|
||||
pip install "/tmp/project" ; \
|
||||
fi
|
||||
|
||||
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
/tmp/install.sh && \
|
||||
python -c "import crawl4ai; print('✅ crawl4ai is ready to rock!')" && \
|
||||
python -c "from playwright.sync_api import sync_playwright; print('✅ Playwright is feeling dramatic!')"
|
||||
|
||||
RUN playwright install --with-deps chromium
|
||||
|
||||
RUN crawl4ai-setup
|
||||
|
||||
RUN playwright install --with-deps
|
||||
|
||||
RUN mkdir -p /home/appuser/.cache/ms-playwright \
|
||||
&& cp -r /root/.cache/ms-playwright/chromium-* /home/appuser/.cache/ms-playwright/ \
|
||||
&& chown -R appuser:appuser /home/appuser/.cache/ms-playwright
|
||||
|
||||
RUN crawl4ai-doctor
|
||||
|
||||
# Copy application code
|
||||
COPY deploy/docker/* ${APP_HOME}/
|
||||
|
||||
# copy the playground + any future static assets
|
||||
COPY deploy/docker/static ${APP_HOME}/static
|
||||
|
||||
# Change ownership of the application directory to the non-root user
|
||||
RUN chown -R appuser:appuser ${APP_HOME}
|
||||
|
||||
# give permissions to redis persistence dirs if used
|
||||
RUN mkdir -p /var/lib/redis /var/log/redis && chown -R appuser:appuser /var/lib/redis /var/log/redis
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD bash -c '\
|
||||
MEM=$(free -m | awk "/^Mem:/{print \$2}"); \
|
||||
@@ -149,8 +195,14 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
exit 1; \
|
||||
fi && \
|
||||
redis-cli ping > /dev/null && \
|
||||
curl -f http://localhost:8000/health || exit 1'
|
||||
curl -f http://localhost:11235/health || exit 1'
|
||||
|
||||
EXPOSE 6379
|
||||
CMD ["supervisord", "-c", "supervisord.conf"]
|
||||
|
||||
# Switch to the non-root user before starting the application
|
||||
USER appuser
|
||||
|
||||
# Set environment variables to ptoduction
|
||||
ENV PYTHON_ENV=production
|
||||
|
||||
# Start the application using supervisord
|
||||
CMD ["supervisord", "-c", "supervisord.conf"]
|
||||
339
JOURNAL.md
Normal file
339
JOURNAL.md
Normal file
@@ -0,0 +1,339 @@
|
||||
# Development Journal
|
||||
|
||||
This journal tracks significant feature additions, bug fixes, and architectural decisions in the crawl4ai project. It serves as both documentation and a historical record of the project's evolution.
|
||||
|
||||
## [2025-04-17] Added Content Source Selection for Markdown Generation
|
||||
|
||||
**Feature:** Configurable content source for markdown generation
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `content_source: str = "cleaned_html"` parameter to `MarkdownGenerationStrategy` class
|
||||
2. Updated `DefaultMarkdownGenerator` to accept and pass the content source parameter
|
||||
3. Renamed the `cleaned_html` parameter to `input_html` in the `generate_markdown` method
|
||||
4. Modified `AsyncWebCrawler.aprocess_html` to select the appropriate HTML source based on the generator's config
|
||||
5. Added `preprocess_html_for_schema` import in `async_webcrawler.py`
|
||||
|
||||
**Implementation Details:**
|
||||
- Added a new `content_source` parameter to specify which HTML input to use for markdown generation
|
||||
- Options include: "cleaned_html" (default), "raw_html", and "fit_html"
|
||||
- Used a dictionary dispatch pattern in `aprocess_html` to select the appropriate HTML source
|
||||
- Added proper error handling with fallback to cleaned_html if content source selection fails
|
||||
- Ensured backward compatibility by defaulting to "cleaned_html" option
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/markdown_generation_strategy.py`: Added content_source parameter and updated the method signature
|
||||
- `crawl4ai/async_webcrawler.py`: Added HTML source selection logic and updated imports
|
||||
|
||||
**Examples:**
|
||||
- Created `docs/examples/content_source_example.py` demonstrating how to use the new parameter
|
||||
|
||||
**Challenges:**
|
||||
- Maintaining backward compatibility while reorganizing the parameter flow
|
||||
- Ensuring proper error handling for all content source options
|
||||
- Making the change with minimal code modifications
|
||||
|
||||
**Why This Feature:**
|
||||
The content source selection feature allows users to choose which HTML content to use as input for markdown generation:
|
||||
1. "cleaned_html" - Uses the post-processed HTML after scraping strategy (original behavior)
|
||||
2. "raw_html" - Uses the original raw HTML directly from the web page
|
||||
3. "fit_html" - Uses the preprocessed HTML optimized for schema extraction
|
||||
|
||||
This feature provides greater flexibility in how users generate markdown, enabling them to:
|
||||
- Capture more detailed content from the original HTML when needed
|
||||
- Use schema-optimized HTML when working with structured data
|
||||
- Choose the approach that best suits their specific use case
|
||||
## [2025-04-17] Implemented High Volume Stress Testing Solution for SDK
|
||||
|
||||
**Feature:** Comprehensive stress testing framework using `arun_many` and the dispatcher system to evaluate performance, concurrency handling, and identify potential issues under high-volume crawling scenarios.
|
||||
|
||||
**Changes Made:**
|
||||
1. Created a dedicated stress testing framework in the `benchmarking/` (or similar) directory.
|
||||
2. Implemented local test site generation (`SiteGenerator`) with configurable heavy HTML pages.
|
||||
3. Added basic memory usage tracking (`SimpleMemoryTracker`) using platform-specific commands (avoiding `psutil` dependency for this specific test).
|
||||
4. Utilized `CrawlerMonitor` from `crawl4ai` for rich terminal UI and real-time monitoring of test progress and dispatcher activity.
|
||||
5. Implemented detailed result summary saving (JSON) and memory sample logging (CSV).
|
||||
6. Developed `run_benchmark.py` to orchestrate tests with predefined configurations.
|
||||
7. Created `run_all.sh` as a simple wrapper for `run_benchmark.py`.
|
||||
|
||||
**Implementation Details:**
|
||||
- Generates a local test site with configurable pages containing heavy text and image content.
|
||||
- Uses Python's built-in `http.server` for local serving, minimizing network variance.
|
||||
- Leverages `crawl4ai`'s `arun_many` method for processing URLs.
|
||||
- Utilizes `MemoryAdaptiveDispatcher` to manage concurrency via the `max_sessions` parameter (note: memory adaptation features require `psutil`, not used by `SimpleMemoryTracker`).
|
||||
- Tracks memory usage via `SimpleMemoryTracker`, recording samples throughout test execution to a CSV file.
|
||||
- Uses `CrawlerMonitor` (which uses the `rich` library) for clear terminal visualization and progress reporting directly from the dispatcher.
|
||||
- Stores detailed final metrics in a JSON summary file.
|
||||
|
||||
**Files Created/Updated:**
|
||||
- `stress_test_sdk.py`: Main stress testing implementation using `arun_many`.
|
||||
- `benchmark_report.py`: (Assumed) Report generator for comparing test results.
|
||||
- `run_benchmark.py`: Test runner script with predefined configurations.
|
||||
- `run_all.sh`: Simple bash script wrapper for `run_benchmark.py`.
|
||||
- `USAGE.md`: Comprehensive documentation on usage and interpretation (updated).
|
||||
|
||||
**Testing Approach:**
|
||||
- Creates a controlled, reproducible test environment with a local HTTP server.
|
||||
- Processes URLs using `arun_many`, allowing the dispatcher to manage concurrency up to `max_sessions`.
|
||||
- Optionally logs per-batch summaries (when not in streaming mode) after processing chunks.
|
||||
- Supports different test sizes via `run_benchmark.py` configurations.
|
||||
- Records memory samples via platform commands for basic trend analysis.
|
||||
- Includes cleanup functionality for the test environment.
|
||||
|
||||
**Challenges:**
|
||||
- Ensuring proper cleanup of HTTP server processes.
|
||||
- Getting reliable memory tracking across platforms without adding heavy dependencies (`psutil`) to this specific test script.
|
||||
- Designing `run_benchmark.py` to correctly pass arguments to `stress_test_sdk.py`.
|
||||
|
||||
**Why This Feature:**
|
||||
The high volume stress testing solution addresses critical needs for ensuring Crawl4AI's `arun_many` reliability:
|
||||
1. Provides a reproducible way to evaluate performance under concurrent load.
|
||||
2. Allows testing the dispatcher's concurrency control (`max_session_permit`) and queue management.
|
||||
3. Enables performance tuning by observing throughput (`URLs/sec`) under different `max_sessions` settings.
|
||||
4. Creates a controlled environment for testing `arun_many` behavior.
|
||||
5. Supports continuous integration by providing deterministic test conditions for `arun_many`.
|
||||
|
||||
**Design Decisions:**
|
||||
- Chose local site generation for reproducibility and isolation from network issues.
|
||||
- Utilized the built-in `CrawlerMonitor` for real-time feedback, leveraging its `rich` integration.
|
||||
- Implemented optional per-batch logging in `stress_test_sdk.py` (when not streaming) to provide chunk-level summaries alongside the continuous monitor.
|
||||
- Adopted `arun_many` with a `MemoryAdaptiveDispatcher` as the core mechanism for parallel execution, reflecting the intended SDK usage.
|
||||
- Created `run_benchmark.py` to simplify running standard test configurations.
|
||||
- Used `SimpleMemoryTracker` to provide basic memory insights without requiring `psutil` for this particular test runner.
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Create a separate test variant that *does* use `psutil` to specifically stress the memory-adaptive features of the dispatcher.
|
||||
- Add support for generated JavaScript content.
|
||||
- Add support for Docker-based testing with explicit memory limits.
|
||||
- Enhance `benchmark_report.py` to provide more sophisticated analysis of performance and memory trends from the generated JSON/CSV files.
|
||||
|
||||
---
|
||||
|
||||
## [2025-04-17] Refined Stress Testing System Parameters and Execution
|
||||
|
||||
**Changes Made:**
|
||||
1. Corrected `run_benchmark.py` and `stress_test_sdk.py` to use `--max-sessions` instead of the incorrect `--workers` parameter, accurately reflecting dispatcher configuration.
|
||||
2. Updated `run_benchmark.py` argument handling to correctly pass all relevant custom parameters (including `--stream`, `--monitor-mode`, etc.) to `stress_test_sdk.py`.
|
||||
3. (Assuming changes in `benchmark_report.py`) Applied dark theme to benchmark reports for better readability.
|
||||
4. (Assuming changes in `benchmark_report.py`) Improved visualization code to eliminate matplotlib warnings.
|
||||
5. Updated `run_benchmark.py` to provide clickable `file://` links to generated reports in the terminal output.
|
||||
6. Updated `USAGE.md` with comprehensive parameter descriptions reflecting the final script arguments.
|
||||
7. Updated `run_all.sh` wrapper to correctly invoke `run_benchmark.py` with flexible arguments.
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Parameter Correction (`--max-sessions`)**:
|
||||
* Identified the fundamental misunderstanding where `--workers` was used incorrectly.
|
||||
* Refactored `stress_test_sdk.py` to accept `--max-sessions` and configure the `MemoryAdaptiveDispatcher`'s `max_session_permit` accordingly.
|
||||
* Updated `run_benchmark.py` argument parsing and command construction to use `--max-sessions`.
|
||||
* Updated `TEST_CONFIGS` in `run_benchmark.py` to use `max_sessions`.
|
||||
|
||||
2. **Argument Handling (`run_benchmark.py`)**:
|
||||
* Improved logic to collect all command-line arguments provided to `run_benchmark.py`.
|
||||
* Ensured all relevant arguments (like `--stream`, `--monitor-mode`, `--port`, `--use-rate-limiter`, etc.) are correctly forwarded when calling `stress_test_sdk.py` as a subprocess.
|
||||
|
||||
3. **Dark Theme & Visualization Fixes (Assumed in `benchmark_report.py`)**:
|
||||
* (Describes changes assumed to be made in the separate reporting script).
|
||||
|
||||
4. **Clickable Links (`run_benchmark.py`)**:
|
||||
* Added logic to find the latest HTML report and PNG chart in the `benchmark_reports` directory after `benchmark_report.py` runs.
|
||||
* Used `pathlib` to generate correct `file://` URLs for terminal output.
|
||||
|
||||
5. **Documentation Improvements (`USAGE.md`)**:
|
||||
* Rewrote sections to explain `arun_many`, dispatchers, and `--max-sessions`.
|
||||
* Updated parameter tables for all scripts (`stress_test_sdk.py`, `run_benchmark.py`).
|
||||
* Clarified the difference between batch and streaming modes and their effect on logging.
|
||||
* Updated examples to use correct arguments.
|
||||
|
||||
**Files Modified:**
|
||||
- `stress_test_sdk.py`: Changed `--workers` to `--max-sessions`, added new arguments, used `arun_many`.
|
||||
- `run_benchmark.py`: Changed argument handling, updated configs, calls `stress_test_sdk.py`.
|
||||
- `run_all.sh`: Updated to call `run_benchmark.py` correctly.
|
||||
- `USAGE.md`: Updated documentation extensively.
|
||||
- `benchmark_report.py`: (Assumed modifications for dark theme and viz fixes).
|
||||
|
||||
**Testing:**
|
||||
- Verified that `--max-sessions` correctly limits concurrency via the `CrawlerMonitor` output.
|
||||
- Confirmed that custom arguments passed to `run_benchmark.py` are forwarded to `stress_test_sdk.py`.
|
||||
- Validated clickable links work in supporting terminals.
|
||||
- Ensured documentation matches the final script parameters and behavior.
|
||||
|
||||
**Why These Changes:**
|
||||
These refinements correct the fundamental approach of the stress test to align with `crawl4ai`'s actual architecture and intended usage:
|
||||
1. Ensures the test evaluates the correct components (`arun_many`, `MemoryAdaptiveDispatcher`).
|
||||
2. Makes test configurations more accurate and flexible.
|
||||
3. Improves the usability of the testing framework through better argument handling and documentation.
|
||||
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add support for generated JavaScript content to test JS rendering performance
|
||||
- Implement more sophisticated memory analysis like generational garbage collection tracking
|
||||
- Add support for Docker-based testing with memory limits to force OOM conditions
|
||||
- Create visualization tools for analyzing memory usage patterns across test runs
|
||||
- Add benchmark comparisons between different crawler versions or configurations
|
||||
|
||||
## [2025-04-17] Fixed Issues in Stress Testing System
|
||||
|
||||
**Changes Made:**
|
||||
1. Fixed custom parameter handling in run_benchmark.py
|
||||
2. Applied dark theme to benchmark reports for better readability
|
||||
3. Improved visualization code to eliminate matplotlib warnings
|
||||
4. Added clickable links to generated reports in terminal output
|
||||
5. Enhanced documentation with comprehensive parameter descriptions
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Custom Parameter Handling Fix**
|
||||
- Identified bug where custom URL count was being ignored in run_benchmark.py
|
||||
- Rewrote argument handling to use a custom args dictionary
|
||||
- Properly passed parameters to the test_simple_stress.py command
|
||||
- Added better UI indication of custom parameters in use
|
||||
|
||||
2. **Dark Theme Implementation**
|
||||
- Added complete dark theme to HTML benchmark reports
|
||||
- Applied dark styling to all visualization components
|
||||
- Used Nord-inspired color palette for charts and graphs
|
||||
- Improved contrast and readability for data visualization
|
||||
- Updated text colors and backgrounds for better eye comfort
|
||||
|
||||
3. **Matplotlib Warning Fixes**
|
||||
- Resolved warnings related to improper use of set_xticklabels()
|
||||
- Implemented correct x-axis positioning for bar charts
|
||||
- Ensured proper alignment of bar labels and data points
|
||||
- Updated plotting code to use modern matplotlib practices
|
||||
|
||||
4. **Documentation Improvements**
|
||||
- Created comprehensive USAGE.md with detailed instructions
|
||||
- Added parameter documentation for all scripts
|
||||
- Included examples for all common use cases
|
||||
- Provided detailed explanations for interpreting results
|
||||
- Added troubleshooting guide for common issues
|
||||
|
||||
**Files Modified:**
|
||||
- `tests/memory/run_benchmark.py`: Fixed custom parameter handling
|
||||
- `tests/memory/benchmark_report.py`: Added dark theme and fixed visualization warnings
|
||||
- `tests/memory/run_all.sh`: Added clickable links to reports
|
||||
- `tests/memory/USAGE.md`: Created comprehensive documentation
|
||||
|
||||
**Testing:**
|
||||
- Verified that custom URL counts are now correctly used
|
||||
- Confirmed dark theme is properly applied to all report elements
|
||||
- Checked that matplotlib warnings are no longer appearing
|
||||
- Validated clickable links to reports work in terminals that support them
|
||||
|
||||
**Why These Changes:**
|
||||
These improvements address several usability issues with the stress testing system:
|
||||
1. Better parameter handling ensures test configurations work as expected
|
||||
2. Dark theme reduces eye strain during extended test review sessions
|
||||
3. Fixing visualization warnings improves code quality and output clarity
|
||||
4. Enhanced documentation makes the system more accessible for future use
|
||||
|
||||
**Future Enhancements:**
|
||||
- Add additional visualization options for different types of analysis
|
||||
- Implement theme toggle to support both light and dark preferences
|
||||
- Add export options for embedding reports in other documentation
|
||||
- Create dedicated CI/CD integration templates for automated testing
|
||||
|
||||
## [2025-04-09] Added MHTML Capture Feature
|
||||
|
||||
**Feature:** MHTML snapshot capture of crawled pages
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_mhtml: bool = False` parameter to `CrawlerRunConfig` class
|
||||
2. Added `mhtml: Optional[str] = None` field to `CrawlResult` model
|
||||
3. Added `mhtml_data: Optional[str] = None` field to `AsyncCrawlResponse` class
|
||||
4. Implemented `capture_mhtml()` method in `AsyncPlaywrightCrawlerStrategy` class to capture MHTML via CDP
|
||||
5. Modified the crawler to capture MHTML when enabled and pass it to the result
|
||||
|
||||
**Implementation Details:**
|
||||
- MHTML capture uses Chrome DevTools Protocol (CDP) via Playwright's CDP session API
|
||||
- The implementation waits for page to fully load before capturing MHTML content
|
||||
- Enhanced waiting for JavaScript content with requestAnimationFrame for better JS content capture
|
||||
- We ensure all browser resources are properly cleaned up after capture
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added the mhtml field to CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added capture_mhtml parameter to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented MHTML capture logic
|
||||
- `crawl4ai/async_webcrawler.py`: Added mapping from AsyncCrawlResponse.mhtml_data to CrawlResult.mhtml
|
||||
|
||||
**Testing:**
|
||||
- Created comprehensive tests in `tests/20241401/test_mhtml.py` covering:
|
||||
- Capturing MHTML when enabled
|
||||
- Ensuring mhtml is None when disabled explicitly
|
||||
- Ensuring mhtml is None by default
|
||||
- Capturing MHTML on JavaScript-enabled pages
|
||||
|
||||
**Challenges:**
|
||||
- Had to improve page loading detection to ensure JavaScript content was fully rendered
|
||||
- Tests needed to be run independently due to Playwright browser instance management
|
||||
- Modified test expected content to match actual MHTML output
|
||||
|
||||
**Why This Feature:**
|
||||
The MHTML capture feature allows users to capture complete web pages including all resources (CSS, images, etc.) in a single file. This is valuable for:
|
||||
1. Offline viewing of captured pages
|
||||
2. Creating permanent snapshots of web content for archival
|
||||
3. Ensuring consistent content for later analysis, even if the original site changes
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add option to save MHTML to file
|
||||
- Support for filtering what resources get included in MHTML
|
||||
- Add support for specifying MHTML capture options
|
||||
|
||||
## [2025-04-10] Added Network Request and Console Message Capturing
|
||||
|
||||
**Feature:** Comprehensive capturing of network requests/responses and browser console messages during crawling
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_network_requests: bool = False` and `capture_console_messages: bool = False` parameters to `CrawlerRunConfig` class
|
||||
2. Added `network_requests: Optional[List[Dict[str, Any]]] = None` and `console_messages: Optional[List[Dict[str, Any]]] = None` fields to both `AsyncCrawlResponse` and `CrawlResult` models
|
||||
3. Implemented event listeners in `AsyncPlaywrightCrawlerStrategy._crawl_web()` to capture browser network events and console messages
|
||||
4. Added proper event listener cleanup in the finally block to prevent resource leaks
|
||||
5. Modified the crawler flow to pass captured data from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Implementation Details:**
|
||||
- Network capture uses Playwright event listeners (`request`, `response`, and `requestfailed`) to record all network activity
|
||||
- Console capture uses Playwright event listeners (`console` and `pageerror`) to record console messages and errors
|
||||
- Each network event includes metadata like URL, headers, status, and timing information
|
||||
- Each console message includes type, text content, and source location when available
|
||||
- All captured events include timestamps for chronological analysis
|
||||
- Error handling ensures even failed capture attempts won't crash the main crawling process
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added new fields to AsyncCrawlResponse and CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added new configuration parameters to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented capture logic using event listeners
|
||||
- `crawl4ai/async_webcrawler.py`: Added data transfer from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Documentation:**
|
||||
- Created detailed documentation in `docs/md_v2/advanced/network-console-capture.md`
|
||||
- Added feature to site navigation in `mkdocs.yml`
|
||||
- Updated CrawlResult documentation in `docs/md_v2/api/crawl-result.md`
|
||||
- Created comprehensive example in `docs/examples/network_console_capture_example.py`
|
||||
|
||||
**Testing:**
|
||||
- Created `tests/general/test_network_console_capture.py` with tests for:
|
||||
- Verifying capture is disabled by default
|
||||
- Testing network request capturing
|
||||
- Testing console message capturing
|
||||
- Ensuring both capture types can be enabled simultaneously
|
||||
- Checking correct content is captured in expected formats
|
||||
|
||||
**Challenges:**
|
||||
- Initial implementation had synchronous/asynchronous mismatches in event handlers
|
||||
- Needed to fix type of property access vs. method calls in handlers
|
||||
- Required careful cleanup of event listeners to prevent memory leaks
|
||||
|
||||
**Why This Feature:**
|
||||
The network and console capture feature provides deep visibility into web page activity, enabling:
|
||||
1. Debugging complex web applications by seeing all network requests and errors
|
||||
2. Security analysis to detect unexpected third-party requests and data flows
|
||||
3. Performance profiling to identify slow-loading resources
|
||||
4. API discovery in single-page applications
|
||||
5. Comprehensive analysis of web application behavior
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Option to filter captured events by type, domain, or content
|
||||
- Support for capturing response bodies (with size limits)
|
||||
- Aggregate statistics calculation for performance metrics
|
||||
- Integration with visualization tools for network waterfall analysis
|
||||
- Exporting captures in HAR format for use with external tools
|
||||
142
README.md
142
README.md
@@ -21,9 +21,9 @@
|
||||
|
||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
||||
|
||||
[✨ Check out latest update v0.5.0](#-recent-updates)
|
||||
[✨ Check out latest update v0.6.0](#-recent-updates)
|
||||
|
||||
🎉 **Version 0.5.0 is out!** This major release introduces Deep Crawling with BFS/DFS/BestFirst strategies, Memory-Adaptive Dispatcher, Multiple Crawling Strategies (Playwright and HTTP), Docker Deployment with FastAPI, Command-Line Interface (CLI), and more! [Read the release notes →](https://docs.crawl4ai.com/blog)
|
||||
🎉 **Version 0.6.0 is now available!** This release candidate introduces World-aware Crawling with geolocation and locale settings, Table-to-DataFrame extraction, Browser pooling with pre-warming, Network and console traffic capture, MCP integration for AI tools, and a completely revamped Docker deployment! [Read the release notes →](https://docs.crawl4ai.com/blog)
|
||||
|
||||
<details>
|
||||
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||
@@ -253,24 +253,29 @@ pip install -e ".[all]" # Install all optional features
|
||||
<details>
|
||||
<summary>🐳 <strong>Docker Deployment</strong></summary>
|
||||
|
||||
> 🚀 **Major Changes Coming!** We're developing a completely new Docker implementation that will make deployment even more efficient and seamless. The current Docker setup is being deprecated in favor of this new solution.
|
||||
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
|
||||
|
||||
### Current Docker Support
|
||||
### New Docker Features
|
||||
|
||||
The existing Docker implementation is being deprecated and will be replaced soon. If you still need to use Docker with the current version:
|
||||
The new Docker implementation includes:
|
||||
- **Browser pooling** with page pre-warming for faster response times
|
||||
- **Interactive playground** to test and generate request code
|
||||
- **MCP integration** for direct connection to AI tools like Claude Code
|
||||
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
|
||||
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
|
||||
- **Optimized resources** with improved memory management
|
||||
|
||||
- 📚 [Deprecated Docker Setup](./docs/deprecated/docker-deployment.md) - Instructions for the current Docker implementation
|
||||
- ⚠️ Note: This setup will be replaced in the next major release
|
||||
### Getting Started
|
||||
|
||||
### What's Coming Next?
|
||||
```bash
|
||||
# Pull and run the latest release candidate
|
||||
docker pull unclecode/crawl4ai:0.6.0-rN # Use your favorite revision number
|
||||
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:0.6.0-rN # Use your favorite revision number
|
||||
|
||||
Our new Docker implementation will bring:
|
||||
- Improved performance and resource efficiency
|
||||
- Streamlined deployment process
|
||||
- Better integration with Crawl4AI features
|
||||
- Enhanced scalability options
|
||||
# Visit the playground at http://localhost:11235/playground
|
||||
```
|
||||
|
||||
Stay connected with our [GitHub repository](https://github.com/unclecode/crawl4ai) for updates!
|
||||
For complete documentation, see our [Docker Deployment Guide](https://docs.crawl4ai.com/core/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
@@ -420,7 +425,7 @@ if __name__ == "__main__":
|
||||
```python
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LlmConfig
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -436,7 +441,7 @@ async def main():
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
|
||||
# provider="ollama/qwen2", api_token="no-token",
|
||||
llmConfig = LlmConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||
llm_config = LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||
schema=OpenAIModelFee.schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
@@ -500,31 +505,92 @@ async def test_news_crawl():
|
||||
|
||||
## ✨ Recent Updates
|
||||
|
||||
### Version 0.5.0 Major Release Highlights
|
||||
### Version 0.6.0 Release Highlights
|
||||
|
||||
- **🚀 Deep Crawling System**: Explore websites beyond initial URLs with three strategies:
|
||||
- **BFS Strategy**: Breadth-first search explores websites level by level
|
||||
- **DFS Strategy**: Depth-first search explores each branch deeply before backtracking
|
||||
- **BestFirst Strategy**: Uses scoring functions to prioritize which URLs to crawl next
|
||||
- **Page Limiting**: Control the maximum number of pages to crawl with `max_pages` parameter
|
||||
- **Score Thresholds**: Filter URLs based on relevance scores
|
||||
- **⚡ Memory-Adaptive Dispatcher**: Dynamically adjusts concurrency based on system memory with built-in rate limiting
|
||||
- **🔄 Multiple Crawling Strategies**:
|
||||
- **AsyncPlaywrightCrawlerStrategy**: Browser-based crawling with JavaScript support (Default)
|
||||
- **AsyncHTTPCrawlerStrategy**: Fast, lightweight HTTP-only crawler for simple tasks
|
||||
- **🐳 Docker Deployment**: Easy deployment with FastAPI server and streaming/non-streaming endpoints
|
||||
- **💻 Command-Line Interface**: New `crwl` CLI provides convenient terminal access to all features with intuitive commands and configuration options
|
||||
- **👤 Browser Profiler**: Create and manage persistent browser profiles to save authentication states, cookies, and settings for seamless crawling of protected content
|
||||
- **🧠 Crawl4AI Coding Assistant**: AI-powered coding assistant to answer your question for Crawl4ai, and generate proper code for crawling.
|
||||
- **🏎️ LXML Scraping Mode**: Fast HTML parsing using the `lxml` library for improved performance
|
||||
- **🌐 Proxy Rotation**: Built-in support for proxy switching with `RoundRobinProxyStrategy`
|
||||
- **🌎 World-aware Crawling**: Set geolocation, language, and timezone for authentic locale-specific content:
|
||||
```python
|
||||
crun_cfg = CrawlerRunConfig(
|
||||
url="https://browserleaks.com/geo", # test page that shows your location
|
||||
locale="en-US", # Accept-Language & UI locale
|
||||
timezone_id="America/Los_Angeles", # JS Date()/Intl timezone
|
||||
geolocation=GeolocationConfig( # override GPS coords
|
||||
latitude=34.0522,
|
||||
longitude=-118.2437,
|
||||
accuracy=10.0,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
- **📊 Table-to-DataFrame Extraction**: Extract HTML tables directly to CSV or pandas DataFrames:
|
||||
```python
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
|
||||
try:
|
||||
# Set up scraping parameters
|
||||
crawl_config = CrawlerRunConfig(
|
||||
table_score_threshold=8, # Strict table detection
|
||||
)
|
||||
|
||||
# Execute market data extraction
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
url="https://coinmarketcap.com/?page=1", config=crawl_config
|
||||
)
|
||||
|
||||
# Process results
|
||||
raw_df = pd.DataFrame()
|
||||
for result in results:
|
||||
if result.success and result.media["tables"]:
|
||||
raw_df = pd.DataFrame(
|
||||
result.media["tables"][0]["rows"],
|
||||
columns=result.media["tables"][0]["headers"],
|
||||
)
|
||||
break
|
||||
print(raw_df.head())
|
||||
|
||||
finally:
|
||||
await crawler.stop()
|
||||
```
|
||||
|
||||
- **🚀 Browser Pooling**: Pages launch hot with pre-warmed browser instances for lower latency and memory usage
|
||||
|
||||
- **🕸️ Network and Console Capture**: Full traffic logs and MHTML snapshots for debugging:
|
||||
```python
|
||||
crawler_config = CrawlerRunConfig(
|
||||
capture_network=True,
|
||||
capture_console=True,
|
||||
mhtml=True
|
||||
)
|
||||
```
|
||||
|
||||
- **🔌 MCP Integration**: Connect to AI tools like Claude Code through the Model Context Protocol
|
||||
```bash
|
||||
# Add Crawl4AI to Claude Code
|
||||
claude mcp add --transport sse c4ai-sse http://localhost:11235/mcp/sse
|
||||
```
|
||||
|
||||
- **🖥️ Interactive Playground**: Test configurations and generate API requests with the built-in web interface at `http://localhost:11235//playground`
|
||||
|
||||
- **🐳 Revamped Docker Deployment**: Streamlined multi-architecture Docker image with improved resource efficiency
|
||||
|
||||
- **📱 Multi-stage Build System**: Optimized Dockerfile with platform-specific performance enhancements
|
||||
|
||||
Read the full details in our [0.6.0 Release Notes](https://docs.crawl4ai.com/blog/releases/0.6.0.html) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
|
||||
### Previous Version: 0.5.0 Major Release Highlights
|
||||
|
||||
- **🚀 Deep Crawling System**: Explore websites beyond initial URLs with BFS, DFS, and BestFirst strategies
|
||||
- **⚡ Memory-Adaptive Dispatcher**: Dynamically adjusts concurrency based on system memory
|
||||
- **🔄 Multiple Crawling Strategies**: Browser-based and lightweight HTTP-only crawlers
|
||||
- **💻 Command-Line Interface**: New `crwl` CLI provides convenient terminal access
|
||||
- **👤 Browser Profiler**: Create and manage persistent browser profiles
|
||||
- **🧠 Crawl4AI Coding Assistant**: AI-powered coding assistant
|
||||
- **🏎️ LXML Scraping Mode**: Fast HTML parsing using the `lxml` library
|
||||
- **🌐 Proxy Rotation**: Built-in support for proxy switching
|
||||
- **🤖 LLM Content Filter**: Intelligent markdown generation using LLMs
|
||||
- **📄 PDF Processing**: Extract text, images, and metadata from PDF files
|
||||
- **🔗 URL Redirection Tracking**: Automatically follow and record HTTP redirects
|
||||
- **🤖 LLM Schema Generation**: Easily create extraction schemas with LLM assistance
|
||||
- **🔍 robots.txt Compliance**: Respect website crawling rules
|
||||
|
||||
Read the full details in our [0.5.0 Release Notes](https://docs.crawl4ai.com/blog/releases/0.5.0.html) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
Read the full details in our [0.5.0 Release Notes](https://docs.crawl4ai.com/blog/releases/0.5.0.html).
|
||||
|
||||
## Version Numbering in Crawl4AI
|
||||
|
||||
@@ -540,7 +606,7 @@ We use different suffixes to indicate development stages:
|
||||
- `dev` (0.4.3dev1): Development versions, unstable
|
||||
- `a` (0.4.3a1): Alpha releases, experimental features
|
||||
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
|
||||
- `rc` (0.4.3rc1): Release candidates, potential final version
|
||||
- `rc` (0.4.3): Release candidates, potential final version
|
||||
|
||||
#### Installation
|
||||
- Regular installation (stable version):
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
import warnings
|
||||
|
||||
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig
|
||||
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy,
|
||||
WebScrapingStrategy,
|
||||
@@ -22,6 +23,8 @@ from .extraction_strategy import (
|
||||
CosineStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy,
|
||||
JsonLxmlExtractionStrategy,
|
||||
RegexExtractionStrategy
|
||||
)
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
@@ -31,13 +34,12 @@ from .content_filter_strategy import (
|
||||
LLMContentFilter,
|
||||
RelevantContentFilter,
|
||||
)
|
||||
from .models import CrawlResult, MarkdownGenerationResult
|
||||
from .models import CrawlResult, MarkdownGenerationResult, DisplayMode
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
from .async_dispatcher import (
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
RateLimiter,
|
||||
CrawlerMonitor,
|
||||
DisplayMode,
|
||||
BaseDispatcher,
|
||||
)
|
||||
from .docker_client import Crawl4aiDockerClient
|
||||
@@ -47,8 +49,9 @@ from .deep_crawling import (
|
||||
DeepCrawlStrategy,
|
||||
BFSDeepCrawlStrategy,
|
||||
FilterChain,
|
||||
ContentTypeFilter,
|
||||
URLPatternFilter,
|
||||
DomainFilter,
|
||||
ContentTypeFilter,
|
||||
URLFilter,
|
||||
FilterStats,
|
||||
SEOFilter,
|
||||
@@ -68,11 +71,14 @@ __all__ = [
|
||||
"AsyncLogger",
|
||||
"AsyncWebCrawler",
|
||||
"BrowserProfiler",
|
||||
"LLMConfig",
|
||||
"GeolocationConfig",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
"DFSDeepCrawlStrategy",
|
||||
"FilterChain",
|
||||
"URLPatternFilter",
|
||||
"ContentTypeFilter",
|
||||
"DomainFilter",
|
||||
"FilterStats",
|
||||
@@ -99,6 +105,8 @@ __all__ = [
|
||||
"CosineStrategy",
|
||||
"JsonCssExtractionStrategy",
|
||||
"JsonXPathExtractionStrategy",
|
||||
"JsonLxmlExtractionStrategy",
|
||||
"RegexExtractionStrategy",
|
||||
"ChunkingStrategy",
|
||||
"RegexChunking",
|
||||
"DefaultMarkdownGenerator",
|
||||
@@ -116,6 +124,7 @@ __all__ = [
|
||||
"Crawl4aiDockerClient",
|
||||
"ProxyRotationStrategy",
|
||||
"RoundRobinProxyStrategy",
|
||||
"ProxyConfig"
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
# crawl4ai/_version.py
|
||||
__version__ = "0.5.0.post1"
|
||||
__version__ = "0.6.3"
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import os
|
||||
from .config import (
|
||||
DEFAULT_PROVIDER,
|
||||
DEFAULT_PROVIDER_API_KEY,
|
||||
MIN_WORD_THRESHOLD,
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
PROVIDER_MODELS,
|
||||
PROVIDER_MODELS_PREFIXES,
|
||||
SCREENSHOT_HEIGHT_TRESHOLD,
|
||||
PAGE_TIMEOUT,
|
||||
IMAGE_SCORE_THRESHOLD,
|
||||
@@ -11,19 +13,24 @@ from .config import (
|
||||
)
|
||||
|
||||
from .user_agent_generator import UAGen, ValidUAGenerator # , OnlineUAGenerator
|
||||
from .extraction_strategy import ExtractionStrategy
|
||||
from .extraction_strategy import ExtractionStrategy, LLMExtractionStrategy
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import MarkdownGenerationStrategy
|
||||
|
||||
from .markdown_generation_strategy import MarkdownGenerationStrategy, DefaultMarkdownGenerator
|
||||
from .content_scraping_strategy import ContentScrapingStrategy, WebScrapingStrategy
|
||||
from .deep_crawling import DeepCrawlStrategy
|
||||
from typing import Union, List
|
||||
|
||||
from .cache_context import CacheMode
|
||||
from .proxy_strategy import ProxyRotationStrategy
|
||||
|
||||
from typing import Union, List
|
||||
import inspect
|
||||
from typing import Any, Dict, Optional
|
||||
from enum import Enum
|
||||
|
||||
# from .proxy_strategy import ProxyConfig
|
||||
|
||||
|
||||
|
||||
def to_serializable_dict(obj: Any, ignore_default_value : bool = False) -> Dict:
|
||||
"""
|
||||
@@ -113,23 +120,25 @@ def from_serializable_dict(data: Any) -> Any:
|
||||
# Handle typed data
|
||||
if isinstance(data, dict) and "type" in data:
|
||||
# Handle plain dictionaries
|
||||
if data["type"] == "dict":
|
||||
if data["type"] == "dict" and "value" in data:
|
||||
return {k: from_serializable_dict(v) for k, v in data["value"].items()}
|
||||
|
||||
# Import from crawl4ai for class instances
|
||||
import crawl4ai
|
||||
|
||||
cls = getattr(crawl4ai, data["type"])
|
||||
if hasattr(crawl4ai, data["type"]):
|
||||
cls = getattr(crawl4ai, data["type"])
|
||||
|
||||
# Handle Enum
|
||||
if issubclass(cls, Enum):
|
||||
return cls(data["params"])
|
||||
# Handle Enum
|
||||
if issubclass(cls, Enum):
|
||||
return cls(data["params"])
|
||||
|
||||
# Handle class instances
|
||||
constructor_args = {
|
||||
k: from_serializable_dict(v) for k, v in data["params"].items()
|
||||
}
|
||||
return cls(**constructor_args)
|
||||
if "params" in data:
|
||||
# Handle class instances
|
||||
constructor_args = {
|
||||
k: from_serializable_dict(v) for k, v in data["params"].items()
|
||||
}
|
||||
return cls(**constructor_args)
|
||||
|
||||
# Handle lists
|
||||
if isinstance(data, list):
|
||||
@@ -150,6 +159,166 @@ def is_empty_value(value: Any) -> bool:
|
||||
return True
|
||||
return False
|
||||
|
||||
class GeolocationConfig:
|
||||
def __init__(
|
||||
self,
|
||||
latitude: float,
|
||||
longitude: float,
|
||||
accuracy: Optional[float] = 0.0
|
||||
):
|
||||
"""Configuration class for geolocation settings.
|
||||
|
||||
Args:
|
||||
latitude: Latitude coordinate (e.g., 37.7749)
|
||||
longitude: Longitude coordinate (e.g., -122.4194)
|
||||
accuracy: Accuracy in meters. Default: 0.0
|
||||
"""
|
||||
self.latitude = latitude
|
||||
self.longitude = longitude
|
||||
self.accuracy = accuracy
|
||||
|
||||
@staticmethod
|
||||
def from_dict(geo_dict: Dict) -> "GeolocationConfig":
|
||||
"""Create a GeolocationConfig from a dictionary."""
|
||||
return GeolocationConfig(
|
||||
latitude=geo_dict.get("latitude"),
|
||||
longitude=geo_dict.get("longitude"),
|
||||
accuracy=geo_dict.get("accuracy", 0.0)
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"latitude": self.latitude,
|
||||
"longitude": self.longitude,
|
||||
"accuracy": self.accuracy
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "GeolocationConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
GeolocationConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return GeolocationConfig.from_dict(config_dict)
|
||||
|
||||
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
server: str,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
):
|
||||
"""Configuration class for a single proxy.
|
||||
|
||||
Args:
|
||||
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||
username: Optional username for proxy authentication
|
||||
password: Optional password for proxy authentication
|
||||
ip: Optional IP address for verification purposes
|
||||
"""
|
||||
self.server = server
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
# Extract IP from server if not explicitly provided
|
||||
self.ip = ip or self._extract_ip_from_server()
|
||||
|
||||
def _extract_ip_from_server(self) -> Optional[str]:
|
||||
"""Extract IP address from server URL."""
|
||||
try:
|
||||
# Simple extraction assuming http://ip:port format
|
||||
if "://" in self.server:
|
||||
parts = self.server.split("://")[1].split(":")
|
||||
return parts[0]
|
||||
else:
|
||||
parts = self.server.split(":")
|
||||
return parts[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||
parts = proxy_str.split(":")
|
||||
if len(parts) == 4: # ip:port:username:password
|
||||
ip, port, username, password = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
username=username,
|
||||
password=password,
|
||||
ip=ip
|
||||
)
|
||||
elif len(parts) == 2: # ip:port only
|
||||
ip, port = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
ip=ip
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a dictionary."""
|
||||
return ProxyConfig(
|
||||
server=proxy_dict.get("server"),
|
||||
username=proxy_dict.get("username"),
|
||||
password=proxy_dict.get("password"),
|
||||
ip=proxy_dict.get("ip")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||
"""Load proxies from environment variable.
|
||||
|
||||
Args:
|
||||
env_var: Name of environment variable containing comma-separated proxy strings
|
||||
|
||||
Returns:
|
||||
List of ProxyConfig objects
|
||||
"""
|
||||
proxies = []
|
||||
try:
|
||||
proxy_list = os.getenv(env_var, "").split(",")
|
||||
for proxy in proxy_list:
|
||||
if not proxy:
|
||||
continue
|
||||
proxies.append(ProxyConfig.from_string(proxy))
|
||||
except Exception as e:
|
||||
print(f"Error loading proxies from environment: {e}")
|
||||
return proxies
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"server": self.server,
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"ip": self.ip
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "ProxyConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
ProxyConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return ProxyConfig.from_dict(config_dict)
|
||||
|
||||
|
||||
|
||||
class BrowserConfig:
|
||||
"""
|
||||
@@ -164,6 +333,12 @@ class BrowserConfig:
|
||||
Default: "chromium".
|
||||
headless (bool): Whether to run the browser in headless mode (no visible GUI).
|
||||
Default: True.
|
||||
browser_mode (str): Determines how the browser should be initialized:
|
||||
"builtin" - use the builtin CDP browser running in background
|
||||
"dedicated" - create a new dedicated browser instance each time
|
||||
"cdp" - use explicit CDP settings provided in cdp_url
|
||||
"docker" - run browser in Docker container with isolation
|
||||
Default: "dedicated"
|
||||
use_managed_browser (bool): Launch the browser using a managed approach (e.g., via CDP), allowing
|
||||
advanced manipulation. Default: False.
|
||||
cdp_url (str): URL for the Chrome DevTools Protocol (CDP) endpoint. Default: "ws://localhost:9222/devtools/browser/".
|
||||
@@ -178,7 +353,7 @@ class BrowserConfig:
|
||||
is "chromium". Default: "chromium".
|
||||
proxy (Optional[str]): Proxy server URL (e.g., "http://username:password@proxy:port"). If None, no proxy is used.
|
||||
Default: None.
|
||||
proxy_config (dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
proxy_config (ProxyConfig or dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
If None, no additional proxy config. Default: None.
|
||||
viewport_width (int): Default viewport width for pages. Default: 1080.
|
||||
viewport_height (int): Default viewport height for pages. Default: 600.
|
||||
@@ -190,7 +365,7 @@ class BrowserConfig:
|
||||
Default: False.
|
||||
downloads_path (str or None): Directory to store downloaded files. If None and accept_downloads is True,
|
||||
a default path will be created. Default: None.
|
||||
storage_state (str or dict or None): Path or object describing storage state (cookies, localStorage).
|
||||
storage_state (str or dict or None): An in-memory storage state (cookies, localStorage).
|
||||
Default: None.
|
||||
ignore_https_errors (bool): Ignore HTTPS certificate errors. Default: True.
|
||||
java_script_enabled (bool): Enable JavaScript execution in pages. Default: True.
|
||||
@@ -216,6 +391,7 @@ class BrowserConfig:
|
||||
self,
|
||||
browser_type: str = "chromium",
|
||||
headless: bool = True,
|
||||
browser_mode: str = "dedicated",
|
||||
use_managed_browser: bool = False,
|
||||
cdp_url: str = None,
|
||||
use_persistent_context: bool = False,
|
||||
@@ -223,7 +399,7 @@ class BrowserConfig:
|
||||
chrome_channel: str = "chromium",
|
||||
channel: str = "chromium",
|
||||
proxy: str = None,
|
||||
proxy_config: dict = None,
|
||||
proxy_config: Union[ProxyConfig, dict, None] = None,
|
||||
viewport_width: int = 1080,
|
||||
viewport_height: int = 600,
|
||||
viewport: dict = None,
|
||||
@@ -251,7 +427,8 @@ class BrowserConfig:
|
||||
host: str = "localhost",
|
||||
):
|
||||
self.browser_type = browser_type
|
||||
self.headless = headless
|
||||
self.headless = headless
|
||||
self.browser_mode = browser_mode
|
||||
self.use_managed_browser = use_managed_browser
|
||||
self.cdp_url = cdp_url
|
||||
self.use_persistent_context = use_persistent_context
|
||||
@@ -263,6 +440,8 @@ class BrowserConfig:
|
||||
self.chrome_channel = ""
|
||||
self.proxy = proxy
|
||||
self.proxy_config = proxy_config
|
||||
|
||||
|
||||
self.viewport_width = viewport_width
|
||||
self.viewport_height = viewport_height
|
||||
self.viewport = viewport
|
||||
@@ -285,6 +464,7 @@ class BrowserConfig:
|
||||
self.sleep_on_close = sleep_on_close
|
||||
self.verbose = verbose
|
||||
self.debugging_port = debugging_port
|
||||
self.host = host
|
||||
|
||||
fa_user_agenr_generator = ValidUAGenerator()
|
||||
if self.user_agent_mode == "random":
|
||||
@@ -297,6 +477,22 @@ class BrowserConfig:
|
||||
self.browser_hint = UAGen.generate_client_hints(self.user_agent)
|
||||
self.headers.setdefault("sec-ch-ua", self.browser_hint)
|
||||
|
||||
# Set appropriate browser management flags based on browser_mode
|
||||
if self.browser_mode == "builtin":
|
||||
# Builtin mode uses managed browser connecting to builtin CDP endpoint
|
||||
self.use_managed_browser = True
|
||||
# cdp_url will be set later by browser_manager
|
||||
elif self.browser_mode == "docker":
|
||||
# Docker mode uses managed browser with CDP to connect to browser in container
|
||||
self.use_managed_browser = True
|
||||
# cdp_url will be set later by docker browser strategy
|
||||
elif self.browser_mode == "custom" and self.cdp_url:
|
||||
# Custom mode with explicit CDP URL
|
||||
self.use_managed_browser = True
|
||||
elif self.browser_mode == "dedicated":
|
||||
# Dedicated mode uses a new browser instance each time
|
||||
pass
|
||||
|
||||
# If persistent context is requested, ensure managed browser is enabled
|
||||
if self.use_persistent_context:
|
||||
self.use_managed_browser = True
|
||||
@@ -306,6 +502,7 @@ class BrowserConfig:
|
||||
return BrowserConfig(
|
||||
browser_type=kwargs.get("browser_type", "chromium"),
|
||||
headless=kwargs.get("headless", True),
|
||||
browser_mode=kwargs.get("browser_mode", "dedicated"),
|
||||
use_managed_browser=kwargs.get("use_managed_browser", False),
|
||||
cdp_url=kwargs.get("cdp_url"),
|
||||
use_persistent_context=kwargs.get("use_persistent_context", False),
|
||||
@@ -313,7 +510,7 @@ class BrowserConfig:
|
||||
chrome_channel=kwargs.get("chrome_channel", "chromium"),
|
||||
channel=kwargs.get("channel", "chromium"),
|
||||
proxy=kwargs.get("proxy"),
|
||||
proxy_config=kwargs.get("proxy_config"),
|
||||
proxy_config=kwargs.get("proxy_config", None),
|
||||
viewport_width=kwargs.get("viewport_width", 1080),
|
||||
viewport_height=kwargs.get("viewport_height", 600),
|
||||
accept_downloads=kwargs.get("accept_downloads", False),
|
||||
@@ -333,12 +530,15 @@ class BrowserConfig:
|
||||
text_mode=kwargs.get("text_mode", False),
|
||||
light_mode=kwargs.get("light_mode", False),
|
||||
extra_args=kwargs.get("extra_args", []),
|
||||
debugging_port=kwargs.get("debugging_port", 9222),
|
||||
host=kwargs.get("host", "localhost"),
|
||||
)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
result = {
|
||||
"browser_type": self.browser_type,
|
||||
"headless": self.headless,
|
||||
"browser_mode": self.browser_mode,
|
||||
"use_managed_browser": self.use_managed_browser,
|
||||
"cdp_url": self.cdp_url,
|
||||
"use_persistent_context": self.use_persistent_context,
|
||||
@@ -365,8 +565,12 @@ class BrowserConfig:
|
||||
"sleep_on_close": self.sleep_on_close,
|
||||
"verbose": self.verbose,
|
||||
"debugging_port": self.debugging_port,
|
||||
"host": self.host,
|
||||
}
|
||||
|
||||
|
||||
return result
|
||||
|
||||
def clone(self, **kwargs):
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
@@ -497,6 +701,15 @@ class CrawlerRunConfig():
|
||||
Default: False.
|
||||
css_selector (str or None): CSS selector to extract a specific portion of the page.
|
||||
Default: None.
|
||||
|
||||
target_elements (list of str or None): List of CSS selectors for specific elements for Markdown generation
|
||||
and structured data extraction. When you set this, only the contents
|
||||
of these elements are processed for extraction and Markdown generation.
|
||||
If you do not set any value, the entire page is processed.
|
||||
The difference between this and css_selector is that this will shrink
|
||||
the initial raw HTML to the selected element, while this will only affect
|
||||
the extraction and Markdown generation.
|
||||
Default: None
|
||||
excluded_tags (list of str or None): List of HTML tags to exclude from processing.
|
||||
Default: None.
|
||||
excluded_selector (str or None): CSS selector to exclude from processing.
|
||||
@@ -513,9 +726,17 @@ class CrawlerRunConfig():
|
||||
Default: "lxml".
|
||||
scraping_strategy (ContentScrapingStrategy): Scraping strategy to use.
|
||||
Default: WebScrapingStrategy.
|
||||
proxy_config (dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
proxy_config (ProxyConfig or dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
If None, no additional proxy config. Default: None.
|
||||
|
||||
# Browser Location and Identity Parameters
|
||||
locale (str or None): Locale to use for the browser context (e.g., "en-US").
|
||||
Default: None.
|
||||
timezone_id (str or None): Timezone identifier to use for the browser context (e.g., "America/New_York").
|
||||
Default: None.
|
||||
geolocation (GeolocationConfig or None): Geolocation configuration for the browser.
|
||||
Default: None.
|
||||
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate: bool = False,
|
||||
# Caching Parameters
|
||||
@@ -593,6 +814,8 @@ class CrawlerRunConfig():
|
||||
Default: IMAGE_SCORE_THRESHOLD (e.g., 3).
|
||||
exclude_external_images (bool): If True, exclude all external images from processing.
|
||||
Default: False.
|
||||
table_score_threshold (int): Minimum score threshold for processing a table.
|
||||
Default: 7.
|
||||
|
||||
# Link and Domain Handling Parameters
|
||||
exclude_social_media_domains (list of str): List of domains to exclude for social media links.
|
||||
@@ -634,6 +857,12 @@ class CrawlerRunConfig():
|
||||
user_agent_generator_config (dict or None): Configuration for user agent generation if user_agent_mode is set.
|
||||
Default: None.
|
||||
|
||||
# Experimental Parameters
|
||||
experimental (dict): Dictionary containing experimental parameters that are in beta phase.
|
||||
This allows passing temporary features that are not yet fully integrated
|
||||
into the main parameter set.
|
||||
Default: None.
|
||||
|
||||
url: str = None # This is not a compulsory parameter
|
||||
"""
|
||||
|
||||
@@ -643,9 +872,10 @@ class CrawlerRunConfig():
|
||||
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
markdown_generator: MarkdownGenerationStrategy = None,
|
||||
markdown_generator: MarkdownGenerationStrategy = DefaultMarkdownGenerator(),
|
||||
only_text: bool = False,
|
||||
css_selector: str = None,
|
||||
target_elements: List[str] = None,
|
||||
excluded_tags: list = None,
|
||||
excluded_selector: str = None,
|
||||
keep_data_attributes: bool = False,
|
||||
@@ -654,8 +884,12 @@ class CrawlerRunConfig():
|
||||
prettiify: bool = False,
|
||||
parser_type: str = "lxml",
|
||||
scraping_strategy: ContentScrapingStrategy = None,
|
||||
proxy_config: dict = None,
|
||||
proxy_config: Union[ProxyConfig, dict, None] = None,
|
||||
proxy_rotation_strategy: Optional[ProxyRotationStrategy] = None,
|
||||
# Browser Location and Identity Parameters
|
||||
locale: Optional[str] = None,
|
||||
timezone_id: Optional[str] = None,
|
||||
geolocation: Optional[GeolocationConfig] = None,
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate: bool = False,
|
||||
# Caching Parameters
|
||||
@@ -692,9 +926,12 @@ class CrawlerRunConfig():
|
||||
screenshot_wait_for: float = None,
|
||||
screenshot_height_threshold: int = SCREENSHOT_HEIGHT_TRESHOLD,
|
||||
pdf: bool = False,
|
||||
capture_mhtml: bool = False,
|
||||
image_description_min_word_threshold: int = IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
image_score_threshold: int = IMAGE_SCORE_THRESHOLD,
|
||||
table_score_threshold: int = 7,
|
||||
exclude_external_images: bool = False,
|
||||
exclude_all_images: bool = False,
|
||||
# Link and Domain Handling Parameters
|
||||
exclude_social_media_domains: list = None,
|
||||
exclude_external_links: bool = False,
|
||||
@@ -704,6 +941,9 @@ class CrawlerRunConfig():
|
||||
# Debugging and Logging Parameters
|
||||
verbose: bool = True,
|
||||
log_console: bool = False,
|
||||
# Network and Console Capturing Parameters
|
||||
capture_network_requests: bool = False,
|
||||
capture_console_messages: bool = False,
|
||||
# Connection Parameters
|
||||
method: str = "GET",
|
||||
stream: bool = False,
|
||||
@@ -714,6 +954,8 @@ class CrawlerRunConfig():
|
||||
user_agent_generator_config: dict = {},
|
||||
# Deep Crawl Parameters
|
||||
deep_crawl_strategy: Optional[DeepCrawlStrategy] = None,
|
||||
# Experimental Parameters
|
||||
experimental: Dict[str, Any] = None,
|
||||
):
|
||||
# TODO: Planning to set properties dynamically based on the __init__ signature
|
||||
self.url = url
|
||||
@@ -725,6 +967,7 @@ class CrawlerRunConfig():
|
||||
self.markdown_generator = markdown_generator
|
||||
self.only_text = only_text
|
||||
self.css_selector = css_selector
|
||||
self.target_elements = target_elements or []
|
||||
self.excluded_tags = excluded_tags or []
|
||||
self.excluded_selector = excluded_selector or ""
|
||||
self.keep_data_attributes = keep_data_attributes
|
||||
@@ -735,6 +978,11 @@ class CrawlerRunConfig():
|
||||
self.scraping_strategy = scraping_strategy or WebScrapingStrategy()
|
||||
self.proxy_config = proxy_config
|
||||
self.proxy_rotation_strategy = proxy_rotation_strategy
|
||||
|
||||
# Browser Location and Identity Parameters
|
||||
self.locale = locale
|
||||
self.timezone_id = timezone_id
|
||||
self.geolocation = geolocation
|
||||
|
||||
# SSL Parameters
|
||||
self.fetch_ssl_certificate = fetch_ssl_certificate
|
||||
@@ -776,9 +1024,12 @@ class CrawlerRunConfig():
|
||||
self.screenshot_wait_for = screenshot_wait_for
|
||||
self.screenshot_height_threshold = screenshot_height_threshold
|
||||
self.pdf = pdf
|
||||
self.capture_mhtml = capture_mhtml
|
||||
self.image_description_min_word_threshold = image_description_min_word_threshold
|
||||
self.image_score_threshold = image_score_threshold
|
||||
self.exclude_external_images = exclude_external_images
|
||||
self.exclude_all_images = exclude_all_images
|
||||
self.table_score_threshold = table_score_threshold
|
||||
|
||||
# Link and Domain Handling Parameters
|
||||
self.exclude_social_media_domains = (
|
||||
@@ -792,6 +1043,10 @@ class CrawlerRunConfig():
|
||||
# Debugging and Logging Parameters
|
||||
self.verbose = verbose
|
||||
self.log_console = log_console
|
||||
|
||||
# Network and Console Capturing Parameters
|
||||
self.capture_network_requests = capture_network_requests
|
||||
self.capture_console_messages = capture_console_messages
|
||||
|
||||
# Connection Parameters
|
||||
self.stream = stream
|
||||
@@ -825,6 +1080,9 @@ class CrawlerRunConfig():
|
||||
|
||||
# Deep Crawl Parameters
|
||||
self.deep_crawl_strategy = deep_crawl_strategy
|
||||
|
||||
# Experimental Parameters
|
||||
self.experimental = experimental or {}
|
||||
|
||||
|
||||
def __getattr__(self, name):
|
||||
@@ -854,6 +1112,7 @@ class CrawlerRunConfig():
|
||||
markdown_generator=kwargs.get("markdown_generator"),
|
||||
only_text=kwargs.get("only_text", False),
|
||||
css_selector=kwargs.get("css_selector"),
|
||||
target_elements=kwargs.get("target_elements", []),
|
||||
excluded_tags=kwargs.get("excluded_tags", []),
|
||||
excluded_selector=kwargs.get("excluded_selector", ""),
|
||||
keep_data_attributes=kwargs.get("keep_data_attributes", False),
|
||||
@@ -864,6 +1123,10 @@ class CrawlerRunConfig():
|
||||
scraping_strategy=kwargs.get("scraping_strategy"),
|
||||
proxy_config=kwargs.get("proxy_config"),
|
||||
proxy_rotation_strategy=kwargs.get("proxy_rotation_strategy"),
|
||||
# Browser Location and Identity Parameters
|
||||
locale=kwargs.get("locale", None),
|
||||
timezone_id=kwargs.get("timezone_id", None),
|
||||
geolocation=kwargs.get("geolocation", None),
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate=kwargs.get("fetch_ssl_certificate", False),
|
||||
# Caching Parameters
|
||||
@@ -902,6 +1165,7 @@ class CrawlerRunConfig():
|
||||
"screenshot_height_threshold", SCREENSHOT_HEIGHT_TRESHOLD
|
||||
),
|
||||
pdf=kwargs.get("pdf", False),
|
||||
capture_mhtml=kwargs.get("capture_mhtml", False),
|
||||
image_description_min_word_threshold=kwargs.get(
|
||||
"image_description_min_word_threshold",
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
@@ -909,6 +1173,8 @@ class CrawlerRunConfig():
|
||||
image_score_threshold=kwargs.get(
|
||||
"image_score_threshold", IMAGE_SCORE_THRESHOLD
|
||||
),
|
||||
table_score_threshold=kwargs.get("table_score_threshold", 7),
|
||||
exclude_all_images=kwargs.get("exclude_all_images", False),
|
||||
exclude_external_images=kwargs.get("exclude_external_images", False),
|
||||
# Link and Domain Handling Parameters
|
||||
exclude_social_media_domains=kwargs.get(
|
||||
@@ -921,6 +1187,9 @@ class CrawlerRunConfig():
|
||||
# Debugging and Logging Parameters
|
||||
verbose=kwargs.get("verbose", True),
|
||||
log_console=kwargs.get("log_console", False),
|
||||
# Network and Console Capturing Parameters
|
||||
capture_network_requests=kwargs.get("capture_network_requests", False),
|
||||
capture_console_messages=kwargs.get("capture_console_messages", False),
|
||||
# Connection Parameters
|
||||
method=kwargs.get("method", "GET"),
|
||||
stream=kwargs.get("stream", False),
|
||||
@@ -931,6 +1200,8 @@ class CrawlerRunConfig():
|
||||
# Deep Crawl Parameters
|
||||
deep_crawl_strategy=kwargs.get("deep_crawl_strategy"),
|
||||
url=kwargs.get("url"),
|
||||
# Experimental Parameters
|
||||
experimental=kwargs.get("experimental"),
|
||||
)
|
||||
|
||||
# Create a funciton returns dict of the object
|
||||
@@ -954,6 +1225,7 @@ class CrawlerRunConfig():
|
||||
"markdown_generator": self.markdown_generator,
|
||||
"only_text": self.only_text,
|
||||
"css_selector": self.css_selector,
|
||||
"target_elements": self.target_elements,
|
||||
"excluded_tags": self.excluded_tags,
|
||||
"excluded_selector": self.excluded_selector,
|
||||
"keep_data_attributes": self.keep_data_attributes,
|
||||
@@ -964,6 +1236,9 @@ class CrawlerRunConfig():
|
||||
"scraping_strategy": self.scraping_strategy,
|
||||
"proxy_config": self.proxy_config,
|
||||
"proxy_rotation_strategy": self.proxy_rotation_strategy,
|
||||
"locale": self.locale,
|
||||
"timezone_id": self.timezone_id,
|
||||
"geolocation": self.geolocation,
|
||||
"fetch_ssl_certificate": self.fetch_ssl_certificate,
|
||||
"cache_mode": self.cache_mode,
|
||||
"session_id": self.session_id,
|
||||
@@ -995,8 +1270,11 @@ class CrawlerRunConfig():
|
||||
"screenshot_wait_for": self.screenshot_wait_for,
|
||||
"screenshot_height_threshold": self.screenshot_height_threshold,
|
||||
"pdf": self.pdf,
|
||||
"capture_mhtml": self.capture_mhtml,
|
||||
"image_description_min_word_threshold": self.image_description_min_word_threshold,
|
||||
"image_score_threshold": self.image_score_threshold,
|
||||
"table_score_threshold": self.table_score_threshold,
|
||||
"exclude_all_images": self.exclude_all_images,
|
||||
"exclude_external_images": self.exclude_external_images,
|
||||
"exclude_social_media_domains": self.exclude_social_media_domains,
|
||||
"exclude_external_links": self.exclude_external_links,
|
||||
@@ -1005,6 +1283,8 @@ class CrawlerRunConfig():
|
||||
"exclude_internal_links": self.exclude_internal_links,
|
||||
"verbose": self.verbose,
|
||||
"log_console": self.log_console,
|
||||
"capture_network_requests": self.capture_network_requests,
|
||||
"capture_console_messages": self.capture_console_messages,
|
||||
"method": self.method,
|
||||
"stream": self.stream,
|
||||
"check_robots_txt": self.check_robots_txt,
|
||||
@@ -1013,6 +1293,7 @@ class CrawlerRunConfig():
|
||||
"user_agent_generator_config": self.user_agent_generator_config,
|
||||
"deep_crawl_strategy": self.deep_crawl_strategy,
|
||||
"url": self.url,
|
||||
"experimental": self.experimental,
|
||||
}
|
||||
|
||||
def clone(self, **kwargs):
|
||||
@@ -1042,12 +1323,19 @@ class CrawlerRunConfig():
|
||||
return CrawlerRunConfig.from_kwargs(config_dict)
|
||||
|
||||
|
||||
class LlmConfig:
|
||||
class LLMConfig:
|
||||
def __init__(
|
||||
self,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
temprature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
frequency_penalty: Optional[float] = None,
|
||||
presence_penalty: Optional[float] = None,
|
||||
stop: Optional[List[str]] = None,
|
||||
n: Optional[int] = None,
|
||||
):
|
||||
"""Configuaration class for LLM provider and API token."""
|
||||
self.provider = provider
|
||||
@@ -1056,25 +1344,54 @@ class LlmConfig:
|
||||
elif api_token and api_token.startswith("env:"):
|
||||
self.api_token = os.getenv(api_token[4:])
|
||||
else:
|
||||
self.api_token = PROVIDER_MODELS.get(provider, "no-token") or os.getenv(
|
||||
"OPENAI_API_KEY"
|
||||
)
|
||||
# Check if given provider starts with any of key in PROVIDER_MODELS_PREFIXES
|
||||
# If not, check if it is in PROVIDER_MODELS
|
||||
prefixes = PROVIDER_MODELS_PREFIXES.keys()
|
||||
if any(provider.startswith(prefix) for prefix in prefixes):
|
||||
selected_prefix = next(
|
||||
(prefix for prefix in prefixes if provider.startswith(prefix)),
|
||||
None,
|
||||
)
|
||||
self.api_token = PROVIDER_MODELS_PREFIXES.get(selected_prefix)
|
||||
else:
|
||||
self.provider = DEFAULT_PROVIDER
|
||||
self.api_token = os.getenv(DEFAULT_PROVIDER_API_KEY)
|
||||
self.base_url = base_url
|
||||
|
||||
self.temprature = temprature
|
||||
self.max_tokens = max_tokens
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.presence_penalty = presence_penalty
|
||||
self.stop = stop
|
||||
self.n = n
|
||||
|
||||
@staticmethod
|
||||
def from_kwargs(kwargs: dict) -> "LlmConfig":
|
||||
return LlmConfig(
|
||||
def from_kwargs(kwargs: dict) -> "LLMConfig":
|
||||
return LLMConfig(
|
||||
provider=kwargs.get("provider", DEFAULT_PROVIDER),
|
||||
api_token=kwargs.get("api_token"),
|
||||
base_url=kwargs.get("base_url"),
|
||||
temprature=kwargs.get("temprature"),
|
||||
max_tokens=kwargs.get("max_tokens"),
|
||||
top_p=kwargs.get("top_p"),
|
||||
frequency_penalty=kwargs.get("frequency_penalty"),
|
||||
presence_penalty=kwargs.get("presence_penalty"),
|
||||
stop=kwargs.get("stop"),
|
||||
n=kwargs.get("n")
|
||||
)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"provider": self.provider,
|
||||
"api_token": self.api_token,
|
||||
"base_url": self.base_url
|
||||
"base_url": self.base_url,
|
||||
"temprature": self.temprature,
|
||||
"max_tokens": self.max_tokens,
|
||||
"top_p": self.top_p,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"stop": self.stop,
|
||||
"n": self.n
|
||||
}
|
||||
|
||||
def clone(self, **kwargs):
|
||||
@@ -1084,8 +1401,10 @@ class LlmConfig:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
LLMConfig: A new instance with the specified updates
|
||||
llm_config: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return LlmConfig.from_kwargs(config_dict)
|
||||
return LLMConfig.from_kwargs(config_dict)
|
||||
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ from .browser_manager import BrowserManager
|
||||
|
||||
import aiofiles
|
||||
import aiohttp
|
||||
import cchardet
|
||||
import chardet
|
||||
from aiohttp.client import ClientTimeout
|
||||
from urllib.parse import urlparse
|
||||
from types import MappingProxyType
|
||||
@@ -130,6 +130,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
Close the browser and clean up resources.
|
||||
"""
|
||||
await self.browser_manager.close()
|
||||
# Explicitly reset the static Playwright instance
|
||||
BrowserManager._playwright_instance = None
|
||||
|
||||
async def kill_session(self, session_id: str):
|
||||
"""
|
||||
@@ -409,7 +411,11 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
user_agent = kwargs.get("user_agent", self.user_agent)
|
||||
# Use browser_manager to get a fresh page & context assigned to this session_id
|
||||
page, context = await self.browser_manager.get_page(session_id, user_agent)
|
||||
page, context = await self.browser_manager.get_page(CrawlerRunConfig(
|
||||
session_id=session_id,
|
||||
user_agent=user_agent,
|
||||
**kwargs,
|
||||
))
|
||||
return session_id
|
||||
|
||||
async def crawl(
|
||||
@@ -435,7 +441,7 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
status_code = 200 # Default for local/raw HTML
|
||||
screenshot_data = None
|
||||
|
||||
if url.startswith(("http://", "https://")):
|
||||
if url.startswith(("http://", "https://", "view-source:")):
|
||||
return await self._crawl_web(url, config)
|
||||
|
||||
elif url.startswith("file://"):
|
||||
@@ -447,12 +453,17 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
html = f.read()
|
||||
if config.screenshot:
|
||||
screenshot_data = await self._generate_screenshot_from_html(html)
|
||||
if config.capture_console_messages:
|
||||
page, context = await self.browser_manager.get_page(crawlerRunConfig=config)
|
||||
captured_console = await self._capture_console_messages(page, url)
|
||||
|
||||
return AsyncCrawlResponse(
|
||||
html=html,
|
||||
response_headers=response_headers,
|
||||
status_code=status_code,
|
||||
screenshot=screenshot_data,
|
||||
get_delayed_content=None,
|
||||
console_messages=captured_console,
|
||||
)
|
||||
|
||||
elif url.startswith("raw:") or url.startswith("raw://"):
|
||||
@@ -478,6 +489,7 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
) -> AsyncCrawlResponse:
|
||||
"""
|
||||
Internal method to crawl web URLs with the specified configuration.
|
||||
Includes optional network and console capturing.
|
||||
|
||||
Args:
|
||||
url (str): The web URL to crawl
|
||||
@@ -494,6 +506,10 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
# Reset downloaded files list for new crawl
|
||||
self._downloaded_files = []
|
||||
|
||||
# Initialize capture lists
|
||||
captured_requests = []
|
||||
captured_console = []
|
||||
|
||||
# Handle user agent with magic mode
|
||||
user_agent_to_override = config.user_agent
|
||||
@@ -507,10 +523,12 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
# Get page for session
|
||||
page, context = await self.browser_manager.get_page(crawlerRunConfig=config)
|
||||
|
||||
# await page.goto(URL)
|
||||
|
||||
# Add default cookie
|
||||
await context.add_cookies(
|
||||
[{"name": "cookiesEnabled", "value": "true", "url": url}]
|
||||
)
|
||||
# await context.add_cookies(
|
||||
# [{"name": "cookiesEnabled", "value": "true", "url": url}]
|
||||
# )
|
||||
|
||||
# Handle navigator overrides
|
||||
if config.override_navigator or config.simulate_user or config.magic:
|
||||
@@ -519,23 +537,169 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
# Call hook after page creation
|
||||
await self.execute_hook("on_page_context_created", page, context=context, config=config)
|
||||
|
||||
# Network Request Capturing
|
||||
if config.capture_network_requests:
|
||||
async def handle_request_capture(request):
|
||||
try:
|
||||
post_data_str = None
|
||||
try:
|
||||
# Be cautious with large post data
|
||||
post_data = request.post_data_buffer
|
||||
if post_data:
|
||||
# Attempt to decode, fallback to base64 or size indication
|
||||
try:
|
||||
post_data_str = post_data.decode('utf-8', errors='replace')
|
||||
except UnicodeDecodeError:
|
||||
post_data_str = f"[Binary data: {len(post_data)} bytes]"
|
||||
except Exception:
|
||||
post_data_str = "[Error retrieving post data]"
|
||||
|
||||
captured_requests.append({
|
||||
"event_type": "request",
|
||||
"url": request.url,
|
||||
"method": request.method,
|
||||
"headers": dict(request.headers), # Convert Header dict
|
||||
"post_data": post_data_str,
|
||||
"resource_type": request.resource_type,
|
||||
"is_navigation_request": request.is_navigation_request(),
|
||||
"timestamp": time.time()
|
||||
})
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Error capturing request details for {request.url}: {e}", tag="CAPTURE")
|
||||
captured_requests.append({"event_type": "request_capture_error", "url": request.url, "error": str(e), "timestamp": time.time()})
|
||||
|
||||
async def handle_response_capture(response):
|
||||
try:
|
||||
try:
|
||||
# body = await response.body()
|
||||
# json_body = await response.json()
|
||||
text_body = await response.text()
|
||||
except Exception as e:
|
||||
body = None
|
||||
# json_body = None
|
||||
# text_body = None
|
||||
captured_requests.append({
|
||||
"event_type": "response",
|
||||
"url": response.url,
|
||||
"status": response.status,
|
||||
"status_text": response.status_text,
|
||||
"headers": dict(response.headers), # Convert Header dict
|
||||
"from_service_worker": response.from_service_worker,
|
||||
"request_timing": response.request.timing, # Detailed timing info
|
||||
"timestamp": time.time(),
|
||||
"body" : {
|
||||
# "raw": body,
|
||||
# "json": json_body,
|
||||
"text": text_body
|
||||
}
|
||||
})
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Error capturing response details for {response.url}: {e}", tag="CAPTURE")
|
||||
captured_requests.append({"event_type": "response_capture_error", "url": response.url, "error": str(e), "timestamp": time.time()})
|
||||
|
||||
async def handle_request_failed_capture(request):
|
||||
try:
|
||||
captured_requests.append({
|
||||
"event_type": "request_failed",
|
||||
"url": request.url,
|
||||
"method": request.method,
|
||||
"resource_type": request.resource_type,
|
||||
"failure_text": str(request.failure) if request.failure else "Unknown failure",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Error capturing request failed details for {request.url}: {e}", tag="CAPTURE")
|
||||
captured_requests.append({"event_type": "request_failed_capture_error", "url": request.url, "error": str(e), "timestamp": time.time()})
|
||||
|
||||
page.on("request", handle_request_capture)
|
||||
page.on("response", handle_response_capture)
|
||||
page.on("requestfailed", handle_request_failed_capture)
|
||||
|
||||
# Console Message Capturing
|
||||
if config.capture_console_messages:
|
||||
def handle_console_capture(msg):
|
||||
try:
|
||||
message_type = "unknown"
|
||||
try:
|
||||
message_type = msg.type
|
||||
except:
|
||||
pass
|
||||
|
||||
message_text = "unknown"
|
||||
try:
|
||||
message_text = msg.text
|
||||
except:
|
||||
pass
|
||||
|
||||
# Basic console message with minimal content
|
||||
entry = {
|
||||
"type": message_type,
|
||||
"text": message_text,
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
||||
captured_console.append(entry)
|
||||
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Error capturing console message: {e}", tag="CAPTURE")
|
||||
# Still add something to the list even on error
|
||||
captured_console.append({
|
||||
"type": "console_capture_error",
|
||||
"error": str(e),
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
def handle_pageerror_capture(err):
|
||||
try:
|
||||
error_message = "Unknown error"
|
||||
try:
|
||||
error_message = err.message
|
||||
except:
|
||||
pass
|
||||
|
||||
error_stack = ""
|
||||
try:
|
||||
error_stack = err.stack
|
||||
except:
|
||||
pass
|
||||
|
||||
captured_console.append({
|
||||
"type": "error",
|
||||
"text": error_message,
|
||||
"stack": error_stack,
|
||||
"timestamp": time.time()
|
||||
})
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Error capturing page error: {e}", tag="CAPTURE")
|
||||
captured_console.append({
|
||||
"type": "pageerror_capture_error",
|
||||
"error": str(e),
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
# Add event listeners directly
|
||||
page.on("console", handle_console_capture)
|
||||
page.on("pageerror", handle_pageerror_capture)
|
||||
|
||||
# Set up console logging if requested
|
||||
if config.log_console:
|
||||
|
||||
def log_consol(
|
||||
msg, console_log_type="debug"
|
||||
): # Corrected the parameter syntax
|
||||
if console_log_type == "error":
|
||||
self.logger.error(
|
||||
message=f"Console error: {msg}", # Use f-string for variable interpolation
|
||||
tag="CONSOLE",
|
||||
params={"msg": msg.text},
|
||||
tag="CONSOLE"
|
||||
)
|
||||
elif console_log_type == "debug":
|
||||
self.logger.debug(
|
||||
message=f"Console: {msg}", # Use f-string for variable interpolation
|
||||
tag="CONSOLE",
|
||||
params={"msg": msg.text},
|
||||
tag="CONSOLE"
|
||||
)
|
||||
|
||||
page.on("console", log_consol)
|
||||
@@ -562,14 +726,15 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
try:
|
||||
# Generate a unique nonce for this request
|
||||
nonce = hashlib.sha256(os.urandom(32)).hexdigest()
|
||||
if config.experimental.get("use_csp_nonce", False):
|
||||
nonce = hashlib.sha256(os.urandom(32)).hexdigest()
|
||||
|
||||
# Add CSP headers to the request
|
||||
await page.set_extra_http_headers(
|
||||
{
|
||||
"Content-Security-Policy": f"default-src 'self'; script-src 'self' 'nonce-{nonce}' 'strict-dynamic'"
|
||||
}
|
||||
)
|
||||
# Add CSP headers to the request
|
||||
await page.set_extra_http_headers(
|
||||
{
|
||||
"Content-Security-Policy": f"default-src 'self'; script-src 'self' 'nonce-{nonce}' 'strict-dynamic'"
|
||||
}
|
||||
)
|
||||
|
||||
response = await page.goto(
|
||||
url, wait_until=config.wait_until, timeout=config.page_timeout
|
||||
@@ -619,7 +784,7 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
except Error:
|
||||
visibility_info = await self.check_visibility(page)
|
||||
|
||||
if self.config.verbose:
|
||||
if self.browser_config.config.verbose:
|
||||
self.logger.debug(
|
||||
message="Body visibility info: {info}",
|
||||
tag="DEBUG",
|
||||
@@ -767,6 +932,7 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
# Handle wait_for condition
|
||||
# Todo: Decide how to handle this
|
||||
if not config.wait_for and config.css_selector and False:
|
||||
# if not config.wait_for and config.css_selector:
|
||||
config.wait_for = f"css:{config.css_selector}"
|
||||
|
||||
if config.wait_for:
|
||||
@@ -806,20 +972,48 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
if config.remove_overlay_elements:
|
||||
await self.remove_overlay_elements(page)
|
||||
|
||||
# Get final HTML content
|
||||
html = await page.content()
|
||||
if config.css_selector:
|
||||
try:
|
||||
# Handle comma-separated selectors by splitting them
|
||||
selectors = [s.strip() for s in config.css_selector.split(',')]
|
||||
html_parts = []
|
||||
|
||||
for selector in selectors:
|
||||
try:
|
||||
content = await page.evaluate(
|
||||
f"""Array.from(document.querySelectorAll("{selector}"))
|
||||
.map(el => el.outerHTML)
|
||||
.join('')"""
|
||||
)
|
||||
html_parts.append(content)
|
||||
except Error as e:
|
||||
print(f"Warning: Could not get content for selector '{selector}': {str(e)}")
|
||||
|
||||
# Wrap in a div to create a valid HTML structure
|
||||
html = f"<div class='crawl4ai-result'>\n" + "\n".join(html_parts) + "\n</div>"
|
||||
except Error as e:
|
||||
raise RuntimeError(f"Failed to extract HTML content: {str(e)}")
|
||||
else:
|
||||
html = await page.content()
|
||||
|
||||
# # Get final HTML content
|
||||
# html = await page.content()
|
||||
await self.execute_hook(
|
||||
"before_return_html", page=page, html=html, context=context, config=config
|
||||
)
|
||||
|
||||
# Handle PDF and screenshot generation
|
||||
# Handle PDF, MHTML and screenshot generation
|
||||
start_export_time = time.perf_counter()
|
||||
pdf_data = None
|
||||
screenshot_data = None
|
||||
mhtml_data = None
|
||||
|
||||
if config.pdf:
|
||||
pdf_data = await self.export_pdf(page)
|
||||
|
||||
if config.capture_mhtml:
|
||||
mhtml_data = await self.capture_mhtml(page)
|
||||
|
||||
if config.screenshot:
|
||||
if config.screenshot_wait_for:
|
||||
await asyncio.sleep(config.screenshot_wait_for)
|
||||
@@ -827,9 +1021,9 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
page, screenshot_height_threshold=config.screenshot_height_threshold
|
||||
)
|
||||
|
||||
if screenshot_data or pdf_data:
|
||||
if screenshot_data or pdf_data or mhtml_data:
|
||||
self.logger.info(
|
||||
message="Exporting PDF and taking screenshot took {duration:.2f}s",
|
||||
message="Exporting media (PDF/MHTML/screenshot) took {duration:.2f}s",
|
||||
tag="EXPORT",
|
||||
params={"duration": time.perf_counter() - start_export_time},
|
||||
)
|
||||
@@ -852,12 +1046,16 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
status_code=status_code,
|
||||
screenshot=screenshot_data,
|
||||
pdf_data=pdf_data,
|
||||
mhtml_data=mhtml_data,
|
||||
get_delayed_content=get_delayed_content,
|
||||
ssl_certificate=ssl_cert,
|
||||
downloaded_files=(
|
||||
self._downloaded_files if self._downloaded_files else None
|
||||
),
|
||||
redirected_url=redirected_url,
|
||||
# Include captured data if enabled
|
||||
network_requests=captured_requests if config.capture_network_requests else None,
|
||||
console_messages=captured_console if config.capture_console_messages else None,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -866,6 +1064,15 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
finally:
|
||||
# If no session_id is given we should close the page
|
||||
if not config.session_id:
|
||||
# Detach listeners before closing to prevent potential errors during close
|
||||
if config.capture_network_requests:
|
||||
page.remove_listener("request", handle_request_capture)
|
||||
page.remove_listener("response", handle_response_capture)
|
||||
page.remove_listener("requestfailed", handle_request_failed_capture)
|
||||
if config.capture_console_messages:
|
||||
page.remove_listener("console", handle_console_capture)
|
||||
page.remove_listener("pageerror", handle_pageerror_capture)
|
||||
|
||||
await page.close()
|
||||
|
||||
async def _handle_full_page_scan(self, page: Page, scroll_delay: float = 0.1):
|
||||
@@ -1028,7 +1235,107 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
"""
|
||||
pdf_data = await page.pdf(print_background=True)
|
||||
return pdf_data
|
||||
|
||||
async def capture_mhtml(self, page: Page) -> Optional[str]:
|
||||
"""
|
||||
Captures the current page as MHTML using CDP.
|
||||
|
||||
MHTML (MIME HTML) is a web page archive format that combines the HTML content
|
||||
with its resources (images, CSS, etc.) into a single MIME-encoded file.
|
||||
|
||||
Args:
|
||||
page (Page): The Playwright page object
|
||||
|
||||
Returns:
|
||||
Optional[str]: The MHTML content as a string, or None if there was an error
|
||||
"""
|
||||
try:
|
||||
# Ensure the page is fully loaded before capturing
|
||||
try:
|
||||
# Wait for DOM content and network to be idle
|
||||
await page.wait_for_load_state("domcontentloaded", timeout=5000)
|
||||
await page.wait_for_load_state("networkidle", timeout=5000)
|
||||
|
||||
# Give a little extra time for JavaScript execution
|
||||
await page.wait_for_timeout(1000)
|
||||
|
||||
# Wait for any animations to complete
|
||||
await page.evaluate("""
|
||||
() => new Promise(resolve => {
|
||||
// First requestAnimationFrame gets scheduled after the next repaint
|
||||
requestAnimationFrame(() => {
|
||||
// Second requestAnimationFrame gets called after all animations complete
|
||||
requestAnimationFrame(resolve);
|
||||
});
|
||||
})
|
||||
""")
|
||||
except Error as e:
|
||||
if self.logger:
|
||||
self.logger.warning(
|
||||
message="Wait for load state timed out: {error}",
|
||||
tag="MHTML",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
# Create a new CDP session
|
||||
cdp_session = await page.context.new_cdp_session(page)
|
||||
|
||||
# Call Page.captureSnapshot with format "mhtml"
|
||||
result = await cdp_session.send("Page.captureSnapshot", {"format": "mhtml"})
|
||||
|
||||
# The result contains a 'data' field with the MHTML content
|
||||
mhtml_content = result.get("data")
|
||||
|
||||
# Detach the CDP session to clean up resources
|
||||
await cdp_session.detach()
|
||||
|
||||
return mhtml_content
|
||||
except Exception as e:
|
||||
# Log the error but don't raise it - we'll just return None for the MHTML
|
||||
if self.logger:
|
||||
self.logger.error(
|
||||
message="Failed to capture MHTML: {error}",
|
||||
tag="MHTML",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
return None
|
||||
|
||||
async def _capture_console_messages(
|
||||
self, page: Page, file_path: str
|
||||
) -> List[Dict[str, Union[str, float]]]:
|
||||
"""
|
||||
Captures console messages from the page.
|
||||
Args:
|
||||
|
||||
page (Page): The Playwright page object
|
||||
Returns:
|
||||
List[Dict[str, Union[str, float]]]: A list of captured console messages
|
||||
"""
|
||||
captured_console = []
|
||||
|
||||
def handle_console_message(msg):
|
||||
try:
|
||||
message_type = msg.type
|
||||
message_text = msg.text
|
||||
|
||||
entry = {
|
||||
"type": message_type,
|
||||
"text": message_text,
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
captured_console.append(entry)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(
|
||||
f"Error capturing console message: {e}", tag="CAPTURE"
|
||||
)
|
||||
|
||||
page.on("console", handle_console_message)
|
||||
|
||||
await page.goto(file_path)
|
||||
|
||||
return captured_console
|
||||
|
||||
async def take_screenshot(self, page, **kwargs) -> str:
|
||||
"""
|
||||
Take a screenshot of the current page.
|
||||
@@ -1160,8 +1467,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
finally:
|
||||
await page.close()
|
||||
# finally:
|
||||
# await page.close()
|
||||
|
||||
async def take_screenshot_naive(self, page: Page) -> str:
|
||||
"""
|
||||
@@ -1194,8 +1501,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
finally:
|
||||
await page.close()
|
||||
# finally:
|
||||
# await page.close()
|
||||
|
||||
async def export_storage_state(self, path: str = None) -> dict:
|
||||
"""
|
||||
@@ -1685,7 +1992,7 @@ class AsyncHTTPCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
await self.start()
|
||||
yield self._session
|
||||
finally:
|
||||
await self.close()
|
||||
pass
|
||||
|
||||
def set_hook(self, hook_type: str, hook_func: Callable) -> None:
|
||||
if hook_type in self.hooks:
|
||||
@@ -1801,7 +2108,7 @@ class AsyncHTTPCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
encoding = response.charset
|
||||
if not encoding:
|
||||
encoding = cchardet.detect(content.tobytes())['encoding'] or 'utf-8'
|
||||
encoding = chardet.detect(content.tobytes())['encoding'] or 'utf-8'
|
||||
|
||||
result = AsyncCrawlResponse(
|
||||
html=content.tobytes().decode(encoding, errors='replace'),
|
||||
|
||||
@@ -4,19 +4,14 @@ import aiosqlite
|
||||
import asyncio
|
||||
from typing import Optional, Dict
|
||||
from contextlib import asynccontextmanager
|
||||
import logging
|
||||
import json # Added for serialization/deserialization
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
import json
|
||||
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
|
||||
import aiofiles
|
||||
from .utils import VersionManager
|
||||
from .async_logger import AsyncLogger
|
||||
from .utils import get_error_context, create_box_message
|
||||
|
||||
# Set up logging
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
# logger = logging.getLogger(__name__)
|
||||
# logger.setLevel(logging.INFO)
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
from .utils import VersionManager
|
||||
from .utils import get_error_context, create_box_message
|
||||
|
||||
base_directory = DB_PATH = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
@@ -176,7 +171,10 @@ class AsyncDatabaseManager:
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type="error"),
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
|
||||
raise
|
||||
@@ -194,7 +192,10 @@ class AsyncDatabaseManager:
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type="error"),
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
|
||||
@@ -4,17 +4,15 @@ from .models import (
|
||||
CrawlResult,
|
||||
CrawlerTaskResult,
|
||||
CrawlStatus,
|
||||
DisplayMode,
|
||||
CrawlStats,
|
||||
DomainState,
|
||||
)
|
||||
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
from rich.console import Console
|
||||
from rich import box
|
||||
from datetime import timedelta
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
|
||||
from .types import AsyncWebCrawler
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
import time
|
||||
import psutil
|
||||
import asyncio
|
||||
@@ -24,8 +22,6 @@ from urllib.parse import urlparse
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from math import inf as infinity
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
@@ -87,201 +83,6 @@ class RateLimiter:
|
||||
return True
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
def __init__(
|
||||
self,
|
||||
max_visible_rows: int = 15,
|
||||
display_mode: DisplayMode = DisplayMode.DETAILED,
|
||||
):
|
||||
self.console = Console()
|
||||
self.max_visible_rows = max_visible_rows
|
||||
self.display_mode = display_mode
|
||||
self.stats: Dict[str, CrawlStats] = {}
|
||||
self.process = psutil.Process()
|
||||
self.start_time = time.time()
|
||||
self.live = Live(self._create_table(), refresh_per_second=2)
|
||||
|
||||
def start(self):
|
||||
self.live.start()
|
||||
|
||||
def stop(self):
|
||||
self.live.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
self.stats[task_id] = CrawlStats(
|
||||
task_id=task_id, url=url, status=CrawlStatus.QUEUED
|
||||
)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def update_task(self, task_id: str, **kwargs):
|
||||
if task_id in self.stats:
|
||||
for key, value in kwargs.items():
|
||||
setattr(self.stats[task_id], key, value)
|
||||
self.live.update(self._create_table())
|
||||
|
||||
def _create_aggregated_table(self) -> Table:
|
||||
"""Creates a compact table showing only aggregated statistics"""
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Status Overview",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
show_lines=True,
|
||||
)
|
||||
|
||||
# Calculate statistics
|
||||
total_tasks = len(self.stats)
|
||||
queued = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.QUEUED
|
||||
)
|
||||
in_progress = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
# Memory statistics
|
||||
current_memory = self.process.memory_info().rss / (1024 * 1024)
|
||||
total_task_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
peak_memory = max(
|
||||
(stat.peak_memory for stat in self.stats.values()), default=0.0
|
||||
)
|
||||
|
||||
# Duration
|
||||
duration = time.time() - self.start_time
|
||||
|
||||
# Create status row
|
||||
table.add_column("Status", style="bold cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
|
||||
table.add_row("Total Tasks", str(total_tasks), "100%")
|
||||
table.add_row(
|
||||
"[yellow]In Queue[/yellow]",
|
||||
str(queued),
|
||||
f"{(queued / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[blue]In Progress[/blue]",
|
||||
str(in_progress),
|
||||
f"{(in_progress / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[green]Completed[/green]",
|
||||
str(completed),
|
||||
f"{(completed / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
table.add_row(
|
||||
"[red]Failed[/red]",
|
||||
str(failed),
|
||||
f"{(failed / total_tasks * 100):.1f}%" if total_tasks > 0 else "0%",
|
||||
)
|
||||
|
||||
# Add memory information
|
||||
table.add_section()
|
||||
table.add_row(
|
||||
"[magenta]Current Memory[/magenta]", f"{current_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Total Task Memory[/magenta]", f"{total_task_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[magenta]Peak Task Memory[/magenta]", f"{peak_memory:.1f} MB", ""
|
||||
)
|
||||
table.add_row(
|
||||
"[yellow]Runtime[/yellow]",
|
||||
str(timedelta(seconds=int(duration))),
|
||||
"",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_detailed_table(self) -> Table:
|
||||
table = Table(
|
||||
box=box.ROUNDED,
|
||||
title="Crawler Performance Monitor",
|
||||
title_style="bold magenta",
|
||||
header_style="bold blue",
|
||||
)
|
||||
|
||||
# Add columns
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True)
|
||||
table.add_column("URL", style="cyan", no_wrap=True)
|
||||
table.add_column("Status", style="bold")
|
||||
table.add_column("Memory (MB)", justify="right")
|
||||
table.add_column("Peak (MB)", justify="right")
|
||||
table.add_column("Duration", justify="right")
|
||||
table.add_column("Info", style="italic")
|
||||
|
||||
# Add summary row
|
||||
total_memory = sum(stat.memory_usage for stat in self.stats.values())
|
||||
active_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
completed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.COMPLETED
|
||||
)
|
||||
failed_count = sum(
|
||||
1 for stat in self.stats.values() if stat.status == CrawlStatus.FAILED
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"[bold yellow]SUMMARY",
|
||||
f"Total: {len(self.stats)}",
|
||||
f"Active: {active_count}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{self.process.memory_info().rss / (1024 * 1024):.1f}",
|
||||
str(
|
||||
timedelta(
|
||||
seconds=int(time.time() - self.start_time)
|
||||
)
|
||||
),
|
||||
f"✓{completed_count} ✗{failed_count}",
|
||||
style="bold",
|
||||
)
|
||||
|
||||
table.add_section()
|
||||
|
||||
# Add rows for each task
|
||||
visible_stats = sorted(
|
||||
self.stats.values(),
|
||||
key=lambda x: (
|
||||
x.status != CrawlStatus.IN_PROGRESS,
|
||||
x.status != CrawlStatus.QUEUED,
|
||||
x.end_time or infinity,
|
||||
),
|
||||
)[: self.max_visible_rows]
|
||||
|
||||
for stat in visible_stats:
|
||||
status_style = {
|
||||
CrawlStatus.QUEUED: "white",
|
||||
CrawlStatus.IN_PROGRESS: "yellow",
|
||||
CrawlStatus.COMPLETED: "green",
|
||||
CrawlStatus.FAILED: "red",
|
||||
}[stat.status]
|
||||
|
||||
table.add_row(
|
||||
stat.task_id[:8], # Show first 8 chars of task ID
|
||||
stat.url[:40] + "..." if len(stat.url) > 40 else stat.url,
|
||||
f"[{status_style}]{stat.status.value}[/{status_style}]",
|
||||
f"{stat.memory_usage:.1f}",
|
||||
f"{stat.peak_memory:.1f}",
|
||||
stat.duration,
|
||||
stat.error_message[:40] if stat.error_message else "",
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
def _create_table(self) -> Table:
|
||||
"""Creates the appropriate table based on display mode"""
|
||||
if self.display_mode == DisplayMode.AGGREGATED:
|
||||
return self._create_aggregated_table()
|
||||
return self._create_detailed_table()
|
||||
|
||||
|
||||
class BaseDispatcher(ABC):
|
||||
def __init__(
|
||||
@@ -309,7 +110,7 @@ class BaseDispatcher(ABC):
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
@@ -320,71 +121,144 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
memory_threshold_percent: float = 90.0,
|
||||
critical_threshold_percent: float = 95.0, # New critical threshold
|
||||
recovery_threshold_percent: float = 85.0, # New recovery threshold
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
memory_wait_timeout: float = 300.0, # 5 minutes default timeout
|
||||
fairness_timeout: float = 600.0, # 10 minutes before prioritizing long-waiting URLs
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.memory_threshold_percent = memory_threshold_percent
|
||||
self.critical_threshold_percent = critical_threshold_percent
|
||||
self.recovery_threshold_percent = recovery_threshold_percent
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.memory_wait_timeout = memory_wait_timeout
|
||||
self.result_queue = asyncio.Queue() # Queue for storing results
|
||||
|
||||
self.fairness_timeout = fairness_timeout
|
||||
self.result_queue = asyncio.Queue()
|
||||
self.task_queue = asyncio.PriorityQueue() # Priority queue for better management
|
||||
self.memory_pressure_mode = False # Flag to indicate when we're in memory pressure mode
|
||||
self.current_memory_percent = 0.0 # Track current memory usage
|
||||
|
||||
async def _memory_monitor_task(self):
|
||||
"""Background task to continuously monitor memory usage and update state"""
|
||||
while True:
|
||||
self.current_memory_percent = psutil.virtual_memory().percent
|
||||
|
||||
# Enter memory pressure mode if we cross the threshold
|
||||
if not self.memory_pressure_mode and self.current_memory_percent >= self.memory_threshold_percent:
|
||||
self.memory_pressure_mode = True
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("PRESSURE")
|
||||
|
||||
# Exit memory pressure mode if we go below recovery threshold
|
||||
elif self.memory_pressure_mode and self.current_memory_percent <= self.recovery_threshold_percent:
|
||||
self.memory_pressure_mode = False
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("NORMAL")
|
||||
|
||||
# In critical mode, we might need to take more drastic action
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("CRITICAL")
|
||||
# We could implement additional memory-saving measures here
|
||||
|
||||
await asyncio.sleep(self.check_interval)
|
||||
|
||||
def _get_priority_score(self, wait_time: float, retry_count: int) -> float:
|
||||
"""Calculate priority score (lower is higher priority)
|
||||
- URLs waiting longer than fairness_timeout get higher priority
|
||||
- More retry attempts decreases priority
|
||||
"""
|
||||
if wait_time > self.fairness_timeout:
|
||||
# High priority for long-waiting URLs
|
||||
return -wait_time
|
||||
# Standard priority based on retries
|
||||
return retry_count
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
retry_count: int = 0,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = time.time()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
|
||||
# Get starting memory for accurate measurement
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
self.concurrent_sessions += 1
|
||||
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
# Check if we're in critical memory state
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
# Requeue this task with increased priority and retry count
|
||||
enqueue_time = time.time()
|
||||
priority = self._get_priority_score(enqueue_time - start_time, retry_count + 1)
|
||||
await self.task_queue.put((priority, (url, task_id, retry_count + 1, enqueue_time)))
|
||||
|
||||
# Update monitoring
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.QUEUED,
|
||||
error_message="Requeued due to critical memory pressure"
|
||||
)
|
||||
|
||||
# Return placeholder result with requeued status
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=CrawlResult(
|
||||
url=url, html="", metadata={"status": "requeued"},
|
||||
success=False, error_message="Requeued due to critical memory pressure"
|
||||
),
|
||||
memory_usage=0,
|
||||
peak_memory=0,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message="Requeued due to critical memory pressure",
|
||||
retry_count=retry_count + 1
|
||||
)
|
||||
|
||||
# Execute the crawl
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
|
||||
# Measure memory usage
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
|
||||
# Handle rate limiting
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message=error_message,
|
||||
)
|
||||
await self.result_queue.put(result)
|
||||
return result
|
||||
|
||||
|
||||
# Update status based on result
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
@@ -392,7 +266,7 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
|
||||
finally:
|
||||
end_time = time.time()
|
||||
if self.monitor:
|
||||
@@ -402,9 +276,10 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
self.concurrent_sessions -= 1
|
||||
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
@@ -414,116 +289,240 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
|
||||
results = []
|
||||
|
||||
try:
|
||||
pending_tasks = []
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while task_queue or active_tasks:
|
||||
wait_start_time = time.time()
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
# Check if we've exceeded the timeout
|
||||
if time.time() - wait_start_time > self.memory_wait_timeout:
|
||||
raise MemoryError(
|
||||
f"Memory usage above threshold ({self.memory_threshold_percent}%) for more than {self.memory_wait_timeout} seconds"
|
||||
)
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
pending_tasks.extend(done)
|
||||
active_tasks = list(pending)
|
||||
|
||||
return await asyncio.gather(*pending_tasks)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
async def run_urls_stream(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||
self.crawler = crawler
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
active_tasks = []
|
||||
task_queue = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task_queue.append((url, task_id))
|
||||
|
||||
while completed_count < total_urls:
|
||||
# Start new tasks if memory permits
|
||||
while len(active_tasks) < self.max_session_permit and task_queue:
|
||||
if psutil.virtual_memory().percent >= self.memory_threshold_percent:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
continue
|
||||
|
||||
url, task_id = task_queue.pop(0)
|
||||
task = asyncio.create_task(self.crawl_url(url, config, task_id))
|
||||
active_tasks.append(task)
|
||||
|
||||
if not active_tasks and not task_queue:
|
||||
break
|
||||
|
||||
# Wait for any task to complete and yield results
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
|
||||
# Process until both queues are empty
|
||||
while not self.task_queue.empty() or active_tasks:
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
# Try to get a task with timeout to avoid blocking indefinitely
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No tasks in queue, that's fine
|
||||
pass
|
||||
|
||||
# Wait for completion even if queue is starved
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
# Process completed tasks
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
completed_count += 1
|
||||
yield result
|
||||
results.append(result)
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
await asyncio.sleep(self.check_interval)
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
|
||||
async def _update_queue_priorities(self):
|
||||
"""Periodically update priorities of items in the queue to prevent starvation"""
|
||||
# Skip if queue is empty
|
||||
if self.task_queue.empty():
|
||||
return
|
||||
|
||||
# Use a drain-and-refill approach to update all priorities
|
||||
temp_items = []
|
||||
|
||||
# Drain the queue (with a safety timeout to prevent blocking)
|
||||
try:
|
||||
drain_start = time.time()
|
||||
while not self.task_queue.empty() and time.time() - drain_start < 5.0: # 5 second safety timeout
|
||||
try:
|
||||
# Get item from queue with timeout
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Calculate new priority based on current wait time
|
||||
current_time = time.time()
|
||||
wait_time = current_time - enqueue_time
|
||||
new_priority = self._get_priority_score(wait_time, retry_count)
|
||||
|
||||
# Store with updated priority
|
||||
temp_items.append((new_priority, (url, task_id, retry_count, enqueue_time)))
|
||||
|
||||
# Update monitoring stats for this task
|
||||
if self.monitor and task_id in self.monitor.stats:
|
||||
self.monitor.update_task(task_id, wait_time=wait_time)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Queue might be empty or very slow
|
||||
break
|
||||
except Exception as e:
|
||||
# If anything goes wrong, make sure we refill the queue with what we've got
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
# Calculate queue statistics
|
||||
if temp_items and self.monitor:
|
||||
total_queued = len(temp_items)
|
||||
wait_times = [item[1][3] for item in temp_items]
|
||||
highest_wait_time = time.time() - min(wait_times) if wait_times else 0
|
||||
avg_wait_time = sum(time.time() - t for t in wait_times) / len(wait_times) if wait_times else 0
|
||||
|
||||
# Update queue statistics in monitor
|
||||
self.monitor.update_queue_statistics(
|
||||
total_queued=total_queued,
|
||||
highest_wait_time=highest_wait_time,
|
||||
avg_wait_time=avg_wait_time
|
||||
)
|
||||
|
||||
# Sort by priority (lowest number = highest priority)
|
||||
temp_items.sort(key=lambda x: x[0])
|
||||
|
||||
# Refill the queue with updated priorities
|
||||
for item in temp_items:
|
||||
await self.task_queue.put(item)
|
||||
|
||||
async def run_urls_stream(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||
self.crawler = crawler
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
while completed_count < total_urls:
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
# Try to get a task with timeout
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No tasks in queue, that's fine
|
||||
pass
|
||||
|
||||
# Process completed tasks and yield results
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
|
||||
# Only count as completed if it wasn't requeued
|
||||
if "requeued" not in result.error_message:
|
||||
completed_count += 1
|
||||
yield result
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
|
||||
class SemaphoreDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
@@ -620,7 +619,7 @@ class SemaphoreDispatcher(BaseDispatcher):
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
crawler: "AsyncWebCrawler", # noqa: F821
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
urls: List[str],
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
@@ -644,4 +643,4 @@ class SemaphoreDispatcher(BaseDispatcher):
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
self.monitor.stop()
|
||||
@@ -1,18 +1,48 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional, Dict, Any
|
||||
from colorama import Fore, Style, init
|
||||
from typing import Optional, Dict, Any, List
|
||||
import os
|
||||
from datetime import datetime
|
||||
from urllib.parse import unquote
|
||||
from rich.console import Console
|
||||
from rich.text import Text
|
||||
from .utils import create_box_message
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
DEFAULT = 0
|
||||
DEBUG = 1
|
||||
INFO = 2
|
||||
SUCCESS = 3
|
||||
WARNING = 4
|
||||
ERROR = 5
|
||||
CRITICAL = 6
|
||||
ALERT = 7
|
||||
NOTICE = 8
|
||||
EXCEPTION = 9
|
||||
FATAL = 10
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return self.name.lower()
|
||||
|
||||
class LogColor(str, Enum):
|
||||
"""Enum for log colors."""
|
||||
|
||||
DEBUG = "lightblack"
|
||||
INFO = "cyan"
|
||||
SUCCESS = "green"
|
||||
WARNING = "yellow"
|
||||
ERROR = "red"
|
||||
CYAN = "cyan"
|
||||
GREEN = "green"
|
||||
YELLOW = "yellow"
|
||||
MAGENTA = "magenta"
|
||||
DIM_MAGENTA = "dim magenta"
|
||||
|
||||
def __str__(self):
|
||||
"""Automatically convert rich color to string."""
|
||||
return self.value
|
||||
|
||||
|
||||
class AsyncLoggerBase(ABC):
|
||||
@@ -37,13 +67,14 @@ class AsyncLoggerBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 50):
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 50):
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncLogger(AsyncLoggerBase):
|
||||
"""
|
||||
Asynchronous logger with support for colored console output and file logging.
|
||||
@@ -61,14 +92,21 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
"DEBUG": "⋯",
|
||||
"INFO": "ℹ",
|
||||
"WARNING": "⚠",
|
||||
"SUCCESS": "✔",
|
||||
"CRITICAL": "‼",
|
||||
"ALERT": "⚡",
|
||||
"NOTICE": "ℹ",
|
||||
"EXCEPTION": "❗",
|
||||
"FATAL": "☠",
|
||||
"DEFAULT": "•",
|
||||
}
|
||||
|
||||
DEFAULT_COLORS = {
|
||||
LogLevel.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
LogLevel.INFO: Fore.CYAN,
|
||||
LogLevel.SUCCESS: Fore.GREEN,
|
||||
LogLevel.WARNING: Fore.YELLOW,
|
||||
LogLevel.ERROR: Fore.RED,
|
||||
LogLevel.DEBUG: LogColor.DEBUG,
|
||||
LogLevel.INFO: LogColor.INFO,
|
||||
LogLevel.SUCCESS: LogColor.SUCCESS,
|
||||
LogLevel.WARNING: LogColor.WARNING,
|
||||
LogLevel.ERROR: LogColor.ERROR,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
@@ -77,7 +115,7 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
log_level: LogLevel = LogLevel.DEBUG,
|
||||
tag_width: int = 10,
|
||||
icons: Optional[Dict[str, str]] = None,
|
||||
colors: Optional[Dict[LogLevel, str]] = None,
|
||||
colors: Optional[Dict[LogLevel, LogColor]] = None,
|
||||
verbose: bool = True,
|
||||
):
|
||||
"""
|
||||
@@ -91,13 +129,13 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
colors: Custom colors for different log levels
|
||||
verbose: Whether to output to console
|
||||
"""
|
||||
init() # Initialize colorama
|
||||
self.log_file = log_file
|
||||
self.log_level = log_level
|
||||
self.tag_width = tag_width
|
||||
self.icons = icons or self.DEFAULT_ICONS
|
||||
self.colors = colors or self.DEFAULT_COLORS
|
||||
self.verbose = verbose
|
||||
self.console = Console()
|
||||
|
||||
# Create log file directory if needed
|
||||
if log_file:
|
||||
@@ -110,20 +148,23 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
def _get_icon(self, tag: str) -> str:
|
||||
"""Get the icon for a tag, defaulting to info icon if not found."""
|
||||
return self.icons.get(tag, self.icons["INFO"])
|
||||
|
||||
def _shorten(self, text, length, placeholder="..."):
|
||||
"""Truncate text in the middle if longer than length, or pad if shorter."""
|
||||
if len(text) <= length:
|
||||
return text.ljust(length) # Pad with spaces to reach desired length
|
||||
half = (length - len(placeholder)) // 2
|
||||
shortened = text[:half] + placeholder + text[-half:]
|
||||
return shortened.ljust(length) # Also pad shortened text to consistent length
|
||||
|
||||
def _write_to_file(self, message: str):
|
||||
"""Write a message to the log file if configured."""
|
||||
if self.log_file:
|
||||
text = Text.from_markup(message)
|
||||
plain_text = text.plain
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
# Strip ANSI color codes for file output
|
||||
clean_message = message.replace(Fore.RESET, "").replace(
|
||||
Style.RESET_ALL, ""
|
||||
)
|
||||
for color in vars(Fore).values():
|
||||
if isinstance(color, str):
|
||||
clean_message = clean_message.replace(color, "")
|
||||
f.write(f"[{timestamp}] {clean_message}\n")
|
||||
f.write(f"[{timestamp}] {plain_text}\n")
|
||||
|
||||
def _log(
|
||||
self,
|
||||
@@ -131,8 +172,9 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
message: str,
|
||||
tag: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
colors: Optional[Dict[str, str]] = None,
|
||||
base_color: Optional[str] = None,
|
||||
colors: Optional[Dict[str, LogColor]] = None,
|
||||
boxes: Optional[List[str]] = None,
|
||||
base_color: Optional[LogColor] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
@@ -144,42 +186,44 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
tag: Tag for the message
|
||||
params: Parameters to format into the message
|
||||
colors: Color overrides for specific parameters
|
||||
boxes: Box overrides for specific parameters
|
||||
base_color: Base color for the entire message
|
||||
"""
|
||||
if level.value < self.log_level.value:
|
||||
return
|
||||
|
||||
# Format the message with parameters if provided
|
||||
# avoid conflict with rich formatting
|
||||
parsed_message = message.replace("[", "[[").replace("]", "]]")
|
||||
if params:
|
||||
try:
|
||||
# First format the message with raw parameters
|
||||
formatted_message = message.format(**params)
|
||||
# FIXME: If there are formatting strings in floating point format,
|
||||
# this may result in colors and boxes not being applied properly.
|
||||
# such as {value:.2f}, the value is 0.23333 format it to 0.23,
|
||||
# but we replace("0.23333", "[color]0.23333[/color]")
|
||||
formatted_message = parsed_message.format(**params)
|
||||
for key, value in params.items():
|
||||
# value_str may discard `[` and `]`, so we need to replace it.
|
||||
value_str = str(value).replace("[", "[[").replace("]", "]]")
|
||||
# check is need apply color
|
||||
if colors and key in colors:
|
||||
color_str = f"[{colors[key]}]{value_str}[/{colors[key]}]"
|
||||
formatted_message = formatted_message.replace(value_str, color_str)
|
||||
value_str = color_str
|
||||
|
||||
# Then apply colors if specified
|
||||
if colors:
|
||||
for key, color in colors.items():
|
||||
# Find the formatted value in the message and wrap it with color
|
||||
if key in params:
|
||||
value_str = str(params[key])
|
||||
formatted_message = formatted_message.replace(
|
||||
value_str, f"{color}{value_str}{Style.RESET_ALL}"
|
||||
)
|
||||
# check is need apply box
|
||||
if boxes and key in boxes:
|
||||
formatted_message = formatted_message.replace(value_str,
|
||||
create_box_message(value_str, type=str(level)))
|
||||
|
||||
except KeyError as e:
|
||||
formatted_message = (
|
||||
f"LOGGING ERROR: Missing parameter {e} in message template"
|
||||
)
|
||||
level = LogLevel.ERROR
|
||||
else:
|
||||
formatted_message = message
|
||||
formatted_message = parsed_message
|
||||
|
||||
# Construct the full log line
|
||||
color = base_color or self.colors[level]
|
||||
log_line = f"{color}{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message}{Style.RESET_ALL}"
|
||||
color: LogColor = base_color or self.colors[level]
|
||||
log_line = f"[{color}]{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message} [/{color}]"
|
||||
|
||||
# Output to console if verbose
|
||||
if self.verbose or kwargs.get("force_verbose", False):
|
||||
print(log_line)
|
||||
self.console.print(log_line)
|
||||
|
||||
# Write to file if configured
|
||||
self._write_to_file(log_line)
|
||||
@@ -199,6 +243,22 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message."""
|
||||
self._log(LogLevel.WARNING, message, tag, **kwargs)
|
||||
|
||||
def critical(self, message: str, tag: str = "CRITICAL", **kwargs):
|
||||
"""Log a critical message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def exception(self, message: str, tag: str = "EXCEPTION", **kwargs):
|
||||
"""Log an exception message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def fatal(self, message: str, tag: str = "FATAL", **kwargs):
|
||||
"""Log a fatal message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def alert(self, message: str, tag: str = "ALERT", **kwargs):
|
||||
"""Log an alert message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def notice(self, message: str, tag: str = "NOTICE", **kwargs):
|
||||
"""Log a notice message."""
|
||||
self._log(LogLevel.INFO, message, tag, **kwargs)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message."""
|
||||
@@ -210,7 +270,7 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
success: bool,
|
||||
timing: float,
|
||||
tag: str = "FETCH",
|
||||
url_length: int = 50,
|
||||
url_length: int = 100,
|
||||
):
|
||||
"""
|
||||
Convenience method for logging URL fetch status.
|
||||
@@ -222,19 +282,20 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Status: {status} | Time: {timing:.2f}s",
|
||||
message="{url} | {status} | ⏱: {timing:.2f}s",
|
||||
tag=tag,
|
||||
params={
|
||||
"url": url,
|
||||
"url_length": url_length,
|
||||
"status": success,
|
||||
"url": readable_url,
|
||||
"status": "✓" if success else "✗",
|
||||
"timing": timing,
|
||||
},
|
||||
colors={
|
||||
"status": Fore.GREEN if success else Fore.RED,
|
||||
"timing": Fore.YELLOW,
|
||||
"status": LogColor.SUCCESS if success else LogColor.ERROR,
|
||||
"timing": LogColor.WARNING,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -250,11 +311,13 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Error: {error}",
|
||||
message="{url} | Error: {error}",
|
||||
tag=tag,
|
||||
params={"url": url, "url_length": url_length, "error": error},
|
||||
params={"url": readable_url, "error": error},
|
||||
)
|
||||
|
||||
class AsyncFileLogger(AsyncLoggerBase):
|
||||
@@ -298,13 +361,13 @@ class AsyncFileLogger(AsyncLoggerBase):
|
||||
"""Log an error message to file."""
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 50):
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
"""Log URL fetch status to file."""
|
||||
status = "SUCCESS" if success else "FAILED"
|
||||
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
|
||||
self._write_to_file("URL_STATUS", message, tag)
|
||||
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 50):
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
"""Log error status to file."""
|
||||
message = f"{url[:url_length]}... | Error: {error}"
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
@@ -2,7 +2,6 @@ from .__version__ import __version__ as crawl4ai_version
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from colorama import Fore
|
||||
from pathlib import Path
|
||||
from typing import Optional, List
|
||||
import json
|
||||
@@ -10,50 +9,45 @@ import asyncio
|
||||
|
||||
# from contextlib import nullcontext, asynccontextmanager
|
||||
from contextlib import asynccontextmanager
|
||||
from .models import CrawlResult, MarkdownGenerationResult, DispatchResult, ScrapingResult
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
MarkdownGenerationResult,
|
||||
DispatchResult,
|
||||
ScrapingResult,
|
||||
CrawlResultContainer,
|
||||
RunManyReturn
|
||||
)
|
||||
from .async_database import async_db_manager
|
||||
from .chunking_strategy import * # noqa: F403
|
||||
from .chunking_strategy import RegexChunking, ChunkingStrategy, IdentityChunking
|
||||
from .chunking_strategy import IdentityChunking
|
||||
from .content_filter_strategy import * # noqa: F403
|
||||
from .content_filter_strategy import RelevantContentFilter
|
||||
from .extraction_strategy import * # noqa: F403
|
||||
from .extraction_strategy import NoExtractionStrategy, ExtractionStrategy
|
||||
from .extraction_strategy import * # noqa: F403
|
||||
from .extraction_strategy import NoExtractionStrategy
|
||||
from .async_crawler_strategy import (
|
||||
AsyncCrawlerStrategy,
|
||||
AsyncPlaywrightCrawlerStrategy,
|
||||
AsyncCrawlResponse,
|
||||
)
|
||||
from .cache_context import CacheMode, CacheContext, _legacy_to_cache_mode
|
||||
from .cache_context import CacheMode, CacheContext
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator,
|
||||
MarkdownGenerationStrategy,
|
||||
)
|
||||
from .deep_crawling import DeepCrawlDecorator
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .async_dispatcher import * # noqa: F403
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, ProxyConfig
|
||||
from .async_dispatcher import * # noqa: F403
|
||||
from .async_dispatcher import BaseDispatcher, MemoryAdaptiveDispatcher, RateLimiter
|
||||
|
||||
from .config import MIN_WORD_THRESHOLD
|
||||
from .utils import (
|
||||
sanitize_input_encode,
|
||||
InvalidCSSSelectorError,
|
||||
fast_format_html,
|
||||
create_box_message,
|
||||
get_error_context,
|
||||
RobotsParser,
|
||||
preprocess_html_for_schema,
|
||||
)
|
||||
|
||||
from typing import Union, AsyncGenerator, TypeVar
|
||||
|
||||
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
|
||||
RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
|
||||
DeepCrawlSingleReturn = Union[List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
DeepCrawlManyReturn = Union[
|
||||
List[List[CrawlResultT]],
|
||||
AsyncGenerator[CrawlResultT, None],
|
||||
]
|
||||
|
||||
class AsyncWebCrawler:
|
||||
"""
|
||||
@@ -116,7 +110,8 @@ class AsyncWebCrawler:
|
||||
self,
|
||||
crawler_strategy: AsyncCrawlerStrategy = None,
|
||||
config: BrowserConfig = None,
|
||||
base_directory: str = str(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())),
|
||||
base_directory: str = str(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())),
|
||||
thread_safe: bool = False,
|
||||
logger: AsyncLoggerBase = None,
|
||||
**kwargs,
|
||||
@@ -144,7 +139,8 @@ class AsyncWebCrawler:
|
||||
)
|
||||
|
||||
# Initialize crawler strategy
|
||||
params = {k: v for k, v in kwargs.items() if k in ["browser_config", "logger"]}
|
||||
params = {k: v for k, v in kwargs.items() if k in [
|
||||
"browser_config", "logger"]}
|
||||
self.crawler_strategy = crawler_strategy or AsyncPlaywrightCrawlerStrategy(
|
||||
browser_config=browser_config,
|
||||
logger=self.logger,
|
||||
@@ -166,23 +162,18 @@ class AsyncWebCrawler:
|
||||
|
||||
# Decorate arun method with deep crawling capabilities
|
||||
self._deep_handler = DeepCrawlDecorator(self)
|
||||
self.arun = self._deep_handler(self.arun)
|
||||
self.arun = self._deep_handler(self.arun)
|
||||
|
||||
async def start(self):
|
||||
"""
|
||||
Start the crawler explicitly without using context manager.
|
||||
This is equivalent to using 'async with' but gives more control over the lifecycle.
|
||||
|
||||
This method will:
|
||||
1. Initialize the browser and context
|
||||
2. Perform warmup sequence
|
||||
3. Return the crawler instance for method chaining
|
||||
|
||||
Returns:
|
||||
AsyncWebCrawler: The initialized crawler instance
|
||||
"""
|
||||
await self.crawler_strategy.__aenter__()
|
||||
await self.awarmup()
|
||||
self.logger.info(f"Crawl4AI {crawl4ai_version}", tag="INIT")
|
||||
self.ready = True
|
||||
return self
|
||||
|
||||
async def close(self):
|
||||
@@ -202,18 +193,6 @@ class AsyncWebCrawler:
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
async def awarmup(self):
|
||||
"""
|
||||
Initialize the crawler with warm-up sequence.
|
||||
|
||||
This method:
|
||||
1. Logs initialization info
|
||||
2. Sets up browser configuration
|
||||
3. Marks the crawler as ready
|
||||
"""
|
||||
self.logger.info(f"Crawl4AI {crawl4ai_version}", tag="INIT")
|
||||
self.ready = True
|
||||
|
||||
@asynccontextmanager
|
||||
async def nullcontext(self):
|
||||
"""异步空上下文管理器"""
|
||||
@@ -223,23 +202,6 @@ class AsyncWebCrawler:
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig = None,
|
||||
# Legacy parameters maintained for backwards compatibility
|
||||
# word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
# extraction_strategy: ExtractionStrategy = None,
|
||||
# chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
# content_filter: RelevantContentFilter = None,
|
||||
# cache_mode: Optional[CacheMode] = None,
|
||||
# Deprecated cache parameters
|
||||
# bypass_cache: bool = False,
|
||||
# disable_cache: bool = False,
|
||||
# no_cache_read: bool = False,
|
||||
# no_cache_write: bool = False,
|
||||
# Other legacy parameters
|
||||
# css_selector: str = None,
|
||||
# screenshot: bool = False,
|
||||
# pdf: bool = False,
|
||||
# user_agent: str = None,
|
||||
# verbose=True,
|
||||
**kwargs,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
@@ -270,56 +232,25 @@ class AsyncWebCrawler:
|
||||
Returns:
|
||||
CrawlResult: The result of crawling and processing
|
||||
"""
|
||||
crawler_config = config or CrawlerRunConfig()
|
||||
# Auto-start if not ready
|
||||
if not self.ready:
|
||||
await self.start()
|
||||
|
||||
config = config or CrawlerRunConfig()
|
||||
if not isinstance(url, str) or not url:
|
||||
raise ValueError("Invalid URL, make sure the URL is a non-empty string")
|
||||
raise ValueError(
|
||||
"Invalid URL, make sure the URL is a non-empty string")
|
||||
|
||||
async with self._lock or self.nullcontext():
|
||||
try:
|
||||
self.logger.verbose = crawler_config.verbose
|
||||
# Handle configuration
|
||||
if crawler_config is not None:
|
||||
config = crawler_config
|
||||
else:
|
||||
# Merge all parameters into a single kwargs dict for config creation
|
||||
# config_kwargs = {
|
||||
# "word_count_threshold": word_count_threshold,
|
||||
# "extraction_strategy": extraction_strategy,
|
||||
# "chunking_strategy": chunking_strategy,
|
||||
# "content_filter": content_filter,
|
||||
# "cache_mode": cache_mode,
|
||||
# "bypass_cache": bypass_cache,
|
||||
# "disable_cache": disable_cache,
|
||||
# "no_cache_read": no_cache_read,
|
||||
# "no_cache_write": no_cache_write,
|
||||
# "css_selector": css_selector,
|
||||
# "screenshot": screenshot,
|
||||
# "pdf": pdf,
|
||||
# "verbose": verbose,
|
||||
# **kwargs,
|
||||
# }
|
||||
# config = CrawlerRunConfig.from_kwargs(config_kwargs)
|
||||
pass
|
||||
|
||||
# Handle deprecated cache parameters
|
||||
# if any([bypass_cache, disable_cache, no_cache_read, no_cache_write]):
|
||||
# # Convert legacy parameters if cache_mode not provided
|
||||
# if config.cache_mode is None:
|
||||
# config.cache_mode = _legacy_to_cache_mode(
|
||||
# disable_cache=disable_cache,
|
||||
# bypass_cache=bypass_cache,
|
||||
# no_cache_read=no_cache_read,
|
||||
# no_cache_write=no_cache_write,
|
||||
# )
|
||||
self.logger.verbose = config.verbose
|
||||
|
||||
# Default to ENABLED if no cache mode specified
|
||||
if config.cache_mode is None:
|
||||
config.cache_mode = CacheMode.ENABLED
|
||||
|
||||
# Create cache context
|
||||
cache_context = CacheContext(
|
||||
url, config.cache_mode, False
|
||||
)
|
||||
cache_context = CacheContext(url, config.cache_mode, False)
|
||||
|
||||
# Initialize processing variables
|
||||
async_response: AsyncCrawlResponse = None
|
||||
@@ -349,7 +280,7 @@ class AsyncWebCrawler:
|
||||
# if config.screenshot and not screenshot or config.pdf and not pdf:
|
||||
if config.screenshot and not screenshot_data:
|
||||
cached_result = None
|
||||
|
||||
|
||||
if config.pdf and not pdf_data:
|
||||
cached_result = None
|
||||
|
||||
@@ -362,12 +293,12 @@ class AsyncWebCrawler:
|
||||
|
||||
# Update proxy configuration from rotation strategy if available
|
||||
if config and config.proxy_rotation_strategy:
|
||||
next_proxy = await config.proxy_rotation_strategy.get_next_proxy()
|
||||
next_proxy: ProxyConfig = await config.proxy_rotation_strategy.get_next_proxy()
|
||||
if next_proxy:
|
||||
self.logger.info(
|
||||
message="Switch proxy: {proxy}",
|
||||
tag="PROXY",
|
||||
params={"proxy": next_proxy.server},
|
||||
params={"proxy": next_proxy.server}
|
||||
)
|
||||
config.proxy_config = next_proxy
|
||||
# config = config.clone(proxy_config=next_proxy)
|
||||
@@ -377,18 +308,23 @@ class AsyncWebCrawler:
|
||||
t1 = time.perf_counter()
|
||||
|
||||
if config.user_agent:
|
||||
self.crawler_strategy.update_user_agent(config.user_agent)
|
||||
self.crawler_strategy.update_user_agent(
|
||||
config.user_agent)
|
||||
|
||||
# Check robots.txt if enabled
|
||||
if config and config.check_robots_txt:
|
||||
if not await self.robots_parser.can_fetch(url, self.browser_config.user_agent):
|
||||
if not await self.robots_parser.can_fetch(
|
||||
url, self.browser_config.user_agent
|
||||
):
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html="",
|
||||
success=False,
|
||||
status_code=403,
|
||||
error_message="Access denied by robots.txt",
|
||||
response_headers={"X-Robots-Status": "Blocked by robots.txt"}
|
||||
response_headers={
|
||||
"X-Robots-Status": "Blocked by robots.txt"
|
||||
},
|
||||
)
|
||||
|
||||
##############################
|
||||
@@ -415,15 +351,16 @@ class AsyncWebCrawler:
|
||||
###############################################################
|
||||
# Process the HTML content, Call CrawlerStrategy.process_html #
|
||||
###############################################################
|
||||
crawl_result : CrawlResult = await self.aprocess_html(
|
||||
crawl_result: CrawlResult = await self.aprocess_html(
|
||||
url=url,
|
||||
html=html,
|
||||
extracted_content=extracted_content,
|
||||
config=config, # Pass the config object instead of individual parameters
|
||||
screenshot=screenshot_data,
|
||||
screenshot_data=screenshot_data,
|
||||
pdf_data=pdf_data,
|
||||
verbose=config.verbose,
|
||||
is_raw_html=True if url.startswith("raw:") else False,
|
||||
redirected_url=async_response.redirected_url,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@@ -432,49 +369,41 @@ class AsyncWebCrawler:
|
||||
crawl_result.response_headers = async_response.response_headers
|
||||
crawl_result.downloaded_files = async_response.downloaded_files
|
||||
crawl_result.js_execution_result = js_execution_result
|
||||
crawl_result.ssl_certificate = (
|
||||
async_response.ssl_certificate
|
||||
) # Add SSL certificate
|
||||
crawl_result.mhtml = async_response.mhtml_data
|
||||
crawl_result.ssl_certificate = async_response.ssl_certificate
|
||||
# Add captured network and console data if available
|
||||
crawl_result.network_requests = async_response.network_requests
|
||||
crawl_result.console_messages = async_response.console_messages
|
||||
|
||||
crawl_result.success = bool(html)
|
||||
crawl_result.session_id = getattr(config, "session_id", None)
|
||||
crawl_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
|
||||
self.logger.success(
|
||||
message="{url:.50}... | Status: {status} | Total: {timing}",
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=crawl_result.success,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE",
|
||||
params={
|
||||
"url": cache_context.display_url,
|
||||
"status": crawl_result.success,
|
||||
"timing": f"{time.perf_counter() - start_time:.2f}s",
|
||||
},
|
||||
colors={
|
||||
"status": Fore.GREEN if crawl_result.success else Fore.RED,
|
||||
"timing": Fore.YELLOW,
|
||||
},
|
||||
)
|
||||
|
||||
# Update cache if appropriate
|
||||
if cache_context.should_write() and not bool(cached_result):
|
||||
await async_db_manager.acache_url(crawl_result)
|
||||
|
||||
return crawl_result
|
||||
return CrawlResultContainer(crawl_result)
|
||||
|
||||
else:
|
||||
self.logger.success(
|
||||
message="{url:.50}... | Status: {status} | Total: {timing}",
|
||||
tag="COMPLETE",
|
||||
params={
|
||||
"url": cache_context.display_url,
|
||||
"status": True,
|
||||
"timing": f"{time.perf_counter() - start_time:.2f}s",
|
||||
},
|
||||
colors={"status": Fore.GREEN, "timing": Fore.YELLOW},
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=True,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE"
|
||||
)
|
||||
|
||||
cached_result.success = bool(html)
|
||||
cached_result.session_id = getattr(config, "session_id", None)
|
||||
cached_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
cached_result.redirected_url = cached_result.redirected_url or url
|
||||
return cached_result
|
||||
return CrawlResultContainer(cached_result)
|
||||
|
||||
except Exception as e:
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
@@ -488,12 +417,14 @@ class AsyncWebCrawler:
|
||||
|
||||
self.logger.error_status(
|
||||
url=url,
|
||||
error=create_box_message(error_message, type="error"),
|
||||
error=error_message,
|
||||
tag="ERROR",
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url, html="", success=False, error_message=error_message
|
||||
return CrawlResultContainer(
|
||||
CrawlResult(
|
||||
url=url, html="", success=False, error_message=error_message
|
||||
)
|
||||
)
|
||||
|
||||
async def aprocess_html(
|
||||
@@ -502,7 +433,7 @@ class AsyncWebCrawler:
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
config: CrawlerRunConfig,
|
||||
screenshot: str,
|
||||
screenshot_data: str,
|
||||
pdf_data: str,
|
||||
verbose: bool,
|
||||
**kwargs,
|
||||
@@ -515,7 +446,7 @@ class AsyncWebCrawler:
|
||||
html: Raw HTML content
|
||||
extracted_content: Previously extracted content (if any)
|
||||
config: Configuration object controlling processing behavior
|
||||
screenshot: Screenshot data (if any)
|
||||
screenshot_data: Screenshot data (if any)
|
||||
pdf_data: PDF data (if any)
|
||||
verbose: Whether to enable verbose logging
|
||||
**kwargs: Additional parameters for backwards compatibility
|
||||
@@ -534,15 +465,17 @@ class AsyncWebCrawler:
|
||||
scraping_strategy.logger = self.logger
|
||||
|
||||
# Process HTML content
|
||||
params = {k: v for k, v in config.to_dict().items() if k not in ["url"]}
|
||||
params = config.__dict__.copy()
|
||||
params.pop("url", None)
|
||||
# add keys from kwargs to params that doesn't exist in params
|
||||
params.update({k: v for k, v in kwargs.items() if k not in params.keys()})
|
||||
params.update({k: v for k, v in kwargs.items()
|
||||
if k not in params.keys()})
|
||||
|
||||
|
||||
################################
|
||||
# Scraping Strategy Execution #
|
||||
################################
|
||||
result : ScrapingResult = scraping_strategy.scrap(url, html, **params)
|
||||
result: ScrapingResult = scraping_strategy.scrap(
|
||||
url, html, **params)
|
||||
|
||||
if result is None:
|
||||
raise ValueError(
|
||||
@@ -558,15 +491,20 @@ class AsyncWebCrawler:
|
||||
|
||||
# Extract results - handle both dict and ScrapingResult
|
||||
if isinstance(result, dict):
|
||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||
cleaned_html = sanitize_input_encode(
|
||||
result.get("cleaned_html", ""))
|
||||
media = result.get("media", {})
|
||||
tables = media.pop("tables", []) if isinstance(media, dict) else []
|
||||
links = result.get("links", {})
|
||||
metadata = result.get("metadata", {})
|
||||
else:
|
||||
cleaned_html = sanitize_input_encode(result.cleaned_html)
|
||||
media = result.media.model_dump()
|
||||
tables = media.pop("tables", [])
|
||||
links = result.links.model_dump()
|
||||
metadata = result.metadata
|
||||
|
||||
fit_html = preprocess_html_for_schema(html_content=html, text_threshold= 500, max_size= 300_000)
|
||||
|
||||
################################
|
||||
# Generate Markdown #
|
||||
@@ -575,24 +513,65 @@ class AsyncWebCrawler:
|
||||
config.markdown_generator or DefaultMarkdownGenerator()
|
||||
)
|
||||
|
||||
# --- SELECT HTML SOURCE BASED ON CONTENT_SOURCE ---
|
||||
# Get the desired source from the generator config, default to 'cleaned_html'
|
||||
selected_html_source = getattr(markdown_generator, 'content_source', 'cleaned_html')
|
||||
|
||||
# Define the source selection logic using dict dispatch
|
||||
html_source_selector = {
|
||||
"raw_html": lambda: html, # The original raw HTML
|
||||
"cleaned_html": lambda: cleaned_html, # The HTML after scraping strategy
|
||||
"fit_html": lambda: fit_html, # The HTML after preprocessing for schema
|
||||
}
|
||||
|
||||
markdown_input_html = cleaned_html # Default to cleaned_html
|
||||
|
||||
try:
|
||||
# Get the appropriate lambda function, default to returning cleaned_html if key not found
|
||||
source_lambda = html_source_selector.get(selected_html_source, lambda: cleaned_html)
|
||||
# Execute the lambda to get the selected HTML
|
||||
markdown_input_html = source_lambda()
|
||||
|
||||
# Log which source is being used (optional, but helpful for debugging)
|
||||
# if self.logger and verbose:
|
||||
# actual_source_used = selected_html_source if selected_html_source in html_source_selector else 'cleaned_html (default)'
|
||||
# self.logger.debug(f"Using '{actual_source_used}' as source for Markdown generation for {url}", tag="MARKDOWN_SRC")
|
||||
|
||||
except Exception as e:
|
||||
# Handle potential errors, especially from preprocess_html_for_schema
|
||||
if self.logger:
|
||||
self.logger.warning(
|
||||
f"Error getting/processing '{selected_html_source}' for markdown source: {e}. Falling back to cleaned_html.",
|
||||
tag="MARKDOWN_SRC"
|
||||
)
|
||||
# Ensure markdown_input_html is still the default cleaned_html in case of error
|
||||
markdown_input_html = cleaned_html
|
||||
# --- END: HTML SOURCE SELECTION ---
|
||||
|
||||
# Uncomment if by default we want to use PruningContentFilter
|
||||
# if not config.content_filter and not markdown_generator.content_filter:
|
||||
# markdown_generator.content_filter = PruningContentFilter()
|
||||
|
||||
markdown_result: MarkdownGenerationResult = (
|
||||
markdown_generator.generate_markdown(
|
||||
cleaned_html=cleaned_html,
|
||||
base_url=url,
|
||||
input_html=markdown_input_html,
|
||||
base_url=params.get("redirected_url", url)
|
||||
# html2text_options=kwargs.get('html2text', {})
|
||||
)
|
||||
)
|
||||
|
||||
# Log processing completion
|
||||
self.logger.info(
|
||||
message="{url:.50}... | Time: {timing}s",
|
||||
tag="SCRAPE",
|
||||
params={"url": _url, "timing": int((time.perf_counter() - t1) * 1000) / 1000},
|
||||
self.logger.url_status(
|
||||
url=_url,
|
||||
success=True,
|
||||
timing=int((time.perf_counter() - t1) * 1000) / 1000,
|
||||
tag="SCRAPE"
|
||||
)
|
||||
# self.logger.info(
|
||||
# message="{url:.50}... | Time: {timing}s",
|
||||
# tag="SCRAPE",
|
||||
# params={"url": _url, "timing": int((time.perf_counter() - t1) * 1000) / 1000},
|
||||
# )
|
||||
|
||||
################################
|
||||
# Structured Content Extraction #
|
||||
@@ -616,6 +595,7 @@ class AsyncWebCrawler:
|
||||
content = {
|
||||
"markdown": markdown_result.raw_markdown,
|
||||
"html": html,
|
||||
"fit_html": fit_html,
|
||||
"cleaned_html": cleaned_html,
|
||||
"fit_markdown": markdown_result.fit_markdown,
|
||||
}.get(content_format, markdown_result.raw_markdown)
|
||||
@@ -623,7 +603,7 @@ class AsyncWebCrawler:
|
||||
# Use IdentityChunking for HTML input, otherwise use provided chunking strategy
|
||||
chunking = (
|
||||
IdentityChunking()
|
||||
if content_format in ["html", "cleaned_html"]
|
||||
if content_format in ["html", "cleaned_html", "fit_html"]
|
||||
else config.chunking_strategy
|
||||
)
|
||||
sections = chunking.chunk(content)
|
||||
@@ -639,10 +619,6 @@ class AsyncWebCrawler:
|
||||
params={"url": _url, "timing": time.perf_counter() - t1},
|
||||
)
|
||||
|
||||
# Handle screenshot and PDF data
|
||||
screenshot_data = None if not screenshot else screenshot
|
||||
pdf_data = None if not pdf_data else pdf_data
|
||||
|
||||
# Apply HTML formatting if requested
|
||||
if config.prettiify:
|
||||
cleaned_html = fast_format_html(cleaned_html)
|
||||
@@ -651,9 +627,11 @@ class AsyncWebCrawler:
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
fit_html=fit_html,
|
||||
cleaned_html=cleaned_html,
|
||||
markdown=markdown_result,
|
||||
media=media,
|
||||
tables=tables, # NEW
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=screenshot_data,
|
||||
@@ -666,22 +644,22 @@ class AsyncWebCrawler:
|
||||
async def arun_many(
|
||||
self,
|
||||
urls: List[str],
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
dispatcher: Optional[BaseDispatcher] = None,
|
||||
# Legacy parameters maintained for backwards compatibility
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
content_filter: RelevantContentFilter = None,
|
||||
cache_mode: Optional[CacheMode] = None,
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
pdf: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs
|
||||
) -> RunManyReturn:
|
||||
# word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
# extraction_strategy: ExtractionStrategy = None,
|
||||
# chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
# content_filter: RelevantContentFilter = None,
|
||||
# cache_mode: Optional[CacheMode] = None,
|
||||
# bypass_cache: bool = False,
|
||||
# css_selector: str = None,
|
||||
# screenshot: bool = False,
|
||||
# pdf: bool = False,
|
||||
# user_agent: str = None,
|
||||
# verbose=True,
|
||||
**kwargs,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Runs the crawler for multiple URLs concurrently using a configurable dispatcher strategy.
|
||||
|
||||
@@ -712,20 +690,21 @@ class AsyncWebCrawler:
|
||||
):
|
||||
print(f"Processed {result.url}: {len(result.markdown)} chars")
|
||||
"""
|
||||
if config is None:
|
||||
config = CrawlerRunConfig(
|
||||
word_count_threshold=word_count_threshold,
|
||||
extraction_strategy=extraction_strategy,
|
||||
chunking_strategy=chunking_strategy,
|
||||
content_filter=content_filter,
|
||||
cache_mode=cache_mode,
|
||||
bypass_cache=bypass_cache,
|
||||
css_selector=css_selector,
|
||||
screenshot=screenshot,
|
||||
pdf=pdf,
|
||||
verbose=verbose,
|
||||
**kwargs,
|
||||
)
|
||||
config = config or CrawlerRunConfig()
|
||||
# if config is None:
|
||||
# config = CrawlerRunConfig(
|
||||
# word_count_threshold=word_count_threshold,
|
||||
# extraction_strategy=extraction_strategy,
|
||||
# chunking_strategy=chunking_strategy,
|
||||
# content_filter=content_filter,
|
||||
# cache_mode=cache_mode,
|
||||
# bypass_cache=bypass_cache,
|
||||
# css_selector=css_selector,
|
||||
# screenshot=screenshot,
|
||||
# pdf=pdf,
|
||||
# verbose=verbose,
|
||||
# **kwargs,
|
||||
# )
|
||||
|
||||
if dispatcher is None:
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
@@ -736,37 +715,32 @@ class AsyncWebCrawler:
|
||||
|
||||
def transform_result(task_result):
|
||||
return (
|
||||
setattr(task_result.result, 'dispatch_result',
|
||||
DispatchResult(
|
||||
task_id=task_result.task_id,
|
||||
memory_usage=task_result.memory_usage,
|
||||
peak_memory=task_result.peak_memory,
|
||||
start_time=task_result.start_time,
|
||||
end_time=task_result.end_time,
|
||||
error_message=task_result.error_message,
|
||||
)
|
||||
) or task_result.result
|
||||
setattr(
|
||||
task_result.result,
|
||||
"dispatch_result",
|
||||
DispatchResult(
|
||||
task_id=task_result.task_id,
|
||||
memory_usage=task_result.memory_usage,
|
||||
peak_memory=task_result.peak_memory,
|
||||
start_time=task_result.start_time,
|
||||
end_time=task_result.end_time,
|
||||
error_message=task_result.error_message,
|
||||
),
|
||||
)
|
||||
or task_result.result
|
||||
)
|
||||
|
||||
stream = config.stream
|
||||
|
||||
|
||||
if stream:
|
||||
|
||||
async def result_transformer():
|
||||
async for task_result in dispatcher.run_urls_stream(crawler=self, urls=urls, config=config):
|
||||
async for task_result in dispatcher.run_urls_stream(
|
||||
crawler=self, urls=urls, config=config
|
||||
):
|
||||
yield transform_result(task_result)
|
||||
|
||||
return result_transformer()
|
||||
else:
|
||||
_results = await dispatcher.run_urls(crawler=self, urls=urls, config=config)
|
||||
return [transform_result(res) for res in _results]
|
||||
|
||||
async def aclear_cache(self):
|
||||
"""Clear the cache database."""
|
||||
await async_db_manager.cleanup()
|
||||
|
||||
async def aflush_cache(self):
|
||||
"""Flush the cache database."""
|
||||
await async_db_manager.aflush_db()
|
||||
|
||||
async def aget_cache_size(self):
|
||||
"""Get the total number of cached items."""
|
||||
return await async_db_manager.aget_total_count()
|
||||
return [transform_result(res) for res in _results]
|
||||
|
||||
@@ -5,7 +5,10 @@ import os
|
||||
import sys
|
||||
import shutil
|
||||
import tempfile
|
||||
import psutil
|
||||
import signal
|
||||
import subprocess
|
||||
import shlex
|
||||
from playwright.async_api import BrowserContext
|
||||
import hashlib
|
||||
from .js_snippet import load_js_script
|
||||
@@ -76,6 +79,51 @@ class ManagedBrowser:
|
||||
_cleanup(): Terminates the browser process and removes the temporary directory.
|
||||
create_profile(): Static method to create a user profile by launching a browser for user interaction.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def build_browser_flags(config: BrowserConfig) -> List[str]:
|
||||
"""Common CLI flags for launching Chromium"""
|
||||
flags = [
|
||||
"--disable-gpu",
|
||||
"--disable-gpu-compositing",
|
||||
"--disable-software-rasterizer",
|
||||
"--no-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--no-first-run",
|
||||
"--no-default-browser-check",
|
||||
"--disable-infobars",
|
||||
"--window-position=0,0",
|
||||
"--ignore-certificate-errors",
|
||||
"--ignore-certificate-errors-spki-list",
|
||||
"--disable-blink-features=AutomationControlled",
|
||||
"--window-position=400,0",
|
||||
"--disable-renderer-backgrounding",
|
||||
"--disable-ipc-flooding-protection",
|
||||
"--force-color-profile=srgb",
|
||||
"--mute-audio",
|
||||
"--disable-background-timer-throttling",
|
||||
]
|
||||
if config.light_mode:
|
||||
flags.extend(BROWSER_DISABLE_OPTIONS)
|
||||
if config.text_mode:
|
||||
flags.extend([
|
||||
"--blink-settings=imagesEnabled=false",
|
||||
"--disable-remote-fonts",
|
||||
"--disable-images",
|
||||
"--disable-javascript",
|
||||
"--disable-software-rasterizer",
|
||||
"--disable-dev-shm-usage",
|
||||
])
|
||||
# proxy support
|
||||
if config.proxy:
|
||||
flags.append(f"--proxy-server={config.proxy}")
|
||||
elif config.proxy_config:
|
||||
creds = ""
|
||||
if config.proxy_config.username and config.proxy_config.password:
|
||||
creds = f"{config.proxy_config.username}:{config.proxy_config.password}@"
|
||||
flags.append(f"--proxy-server={creds}{config.proxy_config.server}")
|
||||
# dedupe
|
||||
return list(dict.fromkeys(flags))
|
||||
|
||||
browser_type: str
|
||||
user_data_dir: str
|
||||
@@ -94,6 +142,7 @@ class ManagedBrowser:
|
||||
host: str = "localhost",
|
||||
debugging_port: int = 9222,
|
||||
cdp_url: Optional[str] = None,
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the ManagedBrowser instance.
|
||||
@@ -109,17 +158,19 @@ class ManagedBrowser:
|
||||
host (str): Host for debugging the browser. Default: "localhost".
|
||||
debugging_port (int): Port for debugging the browser. Default: 9222.
|
||||
cdp_url (str or None): CDP URL to connect to the browser. Default: None.
|
||||
browser_config (BrowserConfig): Configuration object containing all browser settings. Default: None.
|
||||
"""
|
||||
self.browser_type = browser_type
|
||||
self.user_data_dir = user_data_dir
|
||||
self.headless = headless
|
||||
self.browser_type = browser_config.browser_type
|
||||
self.user_data_dir = browser_config.user_data_dir
|
||||
self.headless = browser_config.headless
|
||||
self.browser_process = None
|
||||
self.temp_dir = None
|
||||
self.debugging_port = debugging_port
|
||||
self.host = host
|
||||
self.debugging_port = browser_config.debugging_port
|
||||
self.host = browser_config.host
|
||||
self.logger = logger
|
||||
self.shutting_down = False
|
||||
self.cdp_url = cdp_url
|
||||
self.cdp_url = browser_config.cdp_url
|
||||
self.browser_config = browser_config
|
||||
|
||||
async def start(self) -> str:
|
||||
"""
|
||||
@@ -142,20 +193,105 @@ class ManagedBrowser:
|
||||
# Get browser path and args based on OS and browser type
|
||||
# browser_path = self._get_browser_path()
|
||||
args = await self._get_browser_args()
|
||||
|
||||
if self.browser_config.extra_args:
|
||||
args.extend(self.browser_config.extra_args)
|
||||
|
||||
|
||||
# ── make sure no old Chromium instance is owning the same port/profile ──
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
if psutil is None:
|
||||
raise RuntimeError("psutil not available, cannot clean old browser")
|
||||
for p in psutil.process_iter(["pid", "name", "cmdline"]):
|
||||
cl = " ".join(p.info.get("cmdline") or [])
|
||||
if (
|
||||
f"--remote-debugging-port={self.debugging_port}" in cl
|
||||
and f"--user-data-dir={self.user_data_dir}" in cl
|
||||
):
|
||||
p.kill()
|
||||
p.wait(timeout=5)
|
||||
else: # macOS / Linux
|
||||
# kill any process listening on the same debugging port
|
||||
pids = (
|
||||
subprocess.check_output(shlex.split(f"lsof -t -i:{self.debugging_port}"))
|
||||
.decode()
|
||||
.strip()
|
||||
.splitlines()
|
||||
)
|
||||
for pid in pids:
|
||||
try:
|
||||
os.kill(int(pid), signal.SIGTERM)
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
# remove Chromium singleton locks, or new launch exits with
|
||||
# “Opening in existing browser session.”
|
||||
for f in ("SingletonLock", "SingletonSocket", "SingletonCookie"):
|
||||
fp = os.path.join(self.user_data_dir, f)
|
||||
if os.path.exists(fp):
|
||||
os.remove(fp)
|
||||
except Exception as _e:
|
||||
# non-fatal — we'll try to start anyway, but log what happened
|
||||
self.logger.warning(f"pre-launch cleanup failed: {_e}", tag="BROWSER")
|
||||
|
||||
|
||||
# Start browser process
|
||||
try:
|
||||
self.browser_process = subprocess.Popen(
|
||||
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
# Monitor browser process output for errors
|
||||
asyncio.create_task(self._monitor_browser_process())
|
||||
# Use DETACHED_PROCESS flag on Windows to fully detach the process
|
||||
# On Unix, we'll use preexec_fn=os.setpgrp to start the process in a new process group
|
||||
if sys.platform == "win32":
|
||||
self.browser_process = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
creationflags=subprocess.DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
)
|
||||
else:
|
||||
self.browser_process = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
preexec_fn=os.setpgrp # Start in a new process group
|
||||
)
|
||||
|
||||
# We'll monitor for a short time to make sure it starts properly, but won't keep monitoring
|
||||
await asyncio.sleep(0.5) # Give browser time to start
|
||||
await self._initial_startup_check()
|
||||
await asyncio.sleep(2) # Give browser time to start
|
||||
return f"http://{self.host}:{self.debugging_port}"
|
||||
except Exception as e:
|
||||
await self.cleanup()
|
||||
raise Exception(f"Failed to start browser: {e}")
|
||||
|
||||
async def _initial_startup_check(self):
|
||||
"""
|
||||
Perform a quick check to make sure the browser started successfully.
|
||||
This only runs once at startup rather than continuously monitoring.
|
||||
"""
|
||||
if not self.browser_process:
|
||||
return
|
||||
|
||||
# Check that process started without immediate termination
|
||||
await asyncio.sleep(0.5)
|
||||
if self.browser_process.poll() is not None:
|
||||
# Process already terminated
|
||||
stdout, stderr = b"", b""
|
||||
try:
|
||||
stdout, stderr = self.browser_process.communicate(timeout=0.5)
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
|
||||
self.logger.error(
|
||||
message="Browser process terminated during startup | Code: {code} | STDOUT: {stdout} | STDERR: {stderr}",
|
||||
tag="ERROR",
|
||||
params={
|
||||
"code": self.browser_process.returncode,
|
||||
"stdout": stdout.decode() if stdout else "",
|
||||
"stderr": stderr.decode() if stderr else "",
|
||||
},
|
||||
)
|
||||
|
||||
async def _monitor_browser_process(self):
|
||||
"""
|
||||
Monitor the browser process for unexpected termination.
|
||||
@@ -167,6 +303,7 @@ class ManagedBrowser:
|
||||
4. If any other error occurs, log the error message.
|
||||
|
||||
Note: This method should be called in a separate task to avoid blocking the main event loop.
|
||||
This is DEPRECATED and should not be used for builtin browsers that need to outlive the Python process.
|
||||
"""
|
||||
if self.browser_process:
|
||||
try:
|
||||
@@ -230,29 +367,29 @@ class ManagedBrowser:
|
||||
return browser_path
|
||||
|
||||
async def _get_browser_args(self) -> List[str]:
|
||||
"""Returns browser-specific command line arguments"""
|
||||
base_args = [await self._get_browser_path()]
|
||||
|
||||
"""Returns full CLI args for launching the browser"""
|
||||
base = [await self._get_browser_path()]
|
||||
if self.browser_type == "chromium":
|
||||
args = [
|
||||
flags = [
|
||||
f"--remote-debugging-port={self.debugging_port}",
|
||||
f"--user-data-dir={self.user_data_dir}",
|
||||
]
|
||||
if self.headless:
|
||||
args.append("--headless=new")
|
||||
flags.append("--headless=new")
|
||||
# merge common launch flags
|
||||
flags.extend(self.build_browser_flags(self.browser_config))
|
||||
elif self.browser_type == "firefox":
|
||||
args = [
|
||||
flags = [
|
||||
"--remote-debugging-port",
|
||||
str(self.debugging_port),
|
||||
"--profile",
|
||||
self.user_data_dir,
|
||||
]
|
||||
if self.headless:
|
||||
args.append("--headless")
|
||||
flags.append("--headless")
|
||||
else:
|
||||
raise NotImplementedError(f"Browser type {self.browser_type} not supported")
|
||||
|
||||
return base_args + args
|
||||
return base + flags
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup browser process and temporary directory"""
|
||||
@@ -261,22 +398,33 @@ class ManagedBrowser:
|
||||
|
||||
if self.browser_process:
|
||||
try:
|
||||
self.browser_process.terminate()
|
||||
# Wait for process to end gracefully
|
||||
for _ in range(10): # 10 attempts, 100ms each
|
||||
if self.browser_process.poll() is not None:
|
||||
break
|
||||
await asyncio.sleep(0.1)
|
||||
# For builtin browsers that should persist, we should check if it's a detached process
|
||||
# Only terminate if we have proper control over the process
|
||||
if not self.browser_process.poll():
|
||||
# Process is still running
|
||||
self.browser_process.terminate()
|
||||
# Wait for process to end gracefully
|
||||
for _ in range(10): # 10 attempts, 100ms each
|
||||
if self.browser_process.poll() is not None:
|
||||
break
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Force kill if still running
|
||||
if self.browser_process.poll() is None:
|
||||
self.browser_process.kill()
|
||||
await asyncio.sleep(0.1) # Brief wait for kill to take effect
|
||||
# Force kill if still running
|
||||
if self.browser_process.poll() is None:
|
||||
if sys.platform == "win32":
|
||||
# On Windows we might need taskkill for detached processes
|
||||
try:
|
||||
subprocess.run(["taskkill", "/F", "/PID", str(self.browser_process.pid)])
|
||||
except Exception:
|
||||
self.browser_process.kill()
|
||||
else:
|
||||
self.browser_process.kill()
|
||||
await asyncio.sleep(0.1) # Brief wait for kill to take effect
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error terminating browser: {error}",
|
||||
tag="ERROR",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
@@ -379,7 +527,14 @@ class BrowserManager:
|
||||
sessions (dict): Dictionary to store session information
|
||||
session_ttl (int): Session timeout in seconds
|
||||
"""
|
||||
|
||||
_playwright_instance = None
|
||||
|
||||
@classmethod
|
||||
async def get_playwright(cls):
|
||||
from playwright.async_api import async_playwright
|
||||
cls._playwright_instance = await async_playwright().start()
|
||||
return cls._playwright_instance
|
||||
|
||||
def __init__(self, browser_config: BrowserConfig, logger=None):
|
||||
"""
|
||||
@@ -415,6 +570,7 @@ class BrowserManager:
|
||||
logger=self.logger,
|
||||
debugging_port=self.config.debugging_port,
|
||||
cdp_url=self.config.cdp_url,
|
||||
browser_config=self.config,
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
@@ -429,32 +585,22 @@ class BrowserManager:
|
||||
|
||||
Note: This method should be called in a separate task to avoid blocking the main event loop.
|
||||
"""
|
||||
if self.playwright is None:
|
||||
from playwright.async_api import async_playwright
|
||||
if self.playwright is not None:
|
||||
await self.close()
|
||||
|
||||
from playwright.async_api import async_playwright
|
||||
|
||||
self.playwright = await async_playwright().start()
|
||||
self.playwright = await async_playwright().start()
|
||||
|
||||
if self.config.use_managed_browser:
|
||||
cdp_url = await self.managed_browser.start()
|
||||
if self.config.cdp_url or self.config.use_managed_browser:
|
||||
self.config.use_managed_browser = True
|
||||
cdp_url = await self.managed_browser.start() if not self.config.cdp_url else self.config.cdp_url
|
||||
self.browser = await self.playwright.chromium.connect_over_cdp(cdp_url)
|
||||
contexts = self.browser.contexts
|
||||
if contexts:
|
||||
self.default_context = contexts[0]
|
||||
else:
|
||||
self.default_context = await self.create_browser_context()
|
||||
# self.default_context = await self.browser.new_context(
|
||||
# viewport={
|
||||
# "width": self.config.viewport_width,
|
||||
# "height": self.config.viewport_height,
|
||||
# },
|
||||
# storage_state=self.config.storage_state,
|
||||
# user_agent=self.config.headers.get(
|
||||
# "User-Agent", self.config.user_agent
|
||||
# ),
|
||||
# accept_downloads=self.config.accept_downloads,
|
||||
# ignore_https_errors=self.config.ignore_https_errors,
|
||||
# java_script_enabled=self.config.java_script_enabled,
|
||||
# )
|
||||
await self.setup_context(self.default_context)
|
||||
else:
|
||||
browser_args = self._build_browser_args()
|
||||
@@ -469,6 +615,7 @@ class BrowserManager:
|
||||
|
||||
self.default_context = self.browser
|
||||
|
||||
|
||||
def _build_browser_args(self) -> dict:
|
||||
"""Build browser launch arguments from config."""
|
||||
args = [
|
||||
@@ -512,6 +659,9 @@ class BrowserManager:
|
||||
if self.config.extra_args:
|
||||
args.extend(self.config.extra_args)
|
||||
|
||||
# Deduplicate args
|
||||
args = list(dict.fromkeys(args))
|
||||
|
||||
browser_args = {"headless": self.config.headless, "args": args}
|
||||
|
||||
if self.config.chrome_channel:
|
||||
@@ -530,9 +680,9 @@ class BrowserManager:
|
||||
ProxySettings(server=self.config.proxy)
|
||||
if self.config.proxy
|
||||
else ProxySettings(
|
||||
server=self.config.proxy_config.get("server"),
|
||||
username=self.config.proxy_config.get("username"),
|
||||
password=self.config.proxy_config.get("password"),
|
||||
server=self.config.proxy_config.server,
|
||||
username=self.config.proxy_config.username,
|
||||
password=self.config.proxy_config.password,
|
||||
)
|
||||
)
|
||||
browser_args["proxy"] = proxy_settings
|
||||
@@ -607,7 +757,7 @@ class BrowserManager:
|
||||
"name": "cookiesEnabled",
|
||||
"value": "true",
|
||||
"url": crawlerRunConfig.url
|
||||
if crawlerRunConfig
|
||||
if crawlerRunConfig and crawlerRunConfig.url
|
||||
else "https://crawl4ai.com/",
|
||||
}
|
||||
]
|
||||
@@ -726,6 +876,23 @@ class BrowserManager:
|
||||
# Update context settings with text mode settings
|
||||
context_settings.update(text_mode_settings)
|
||||
|
||||
# inject locale / tz / geo if user provided them
|
||||
if crawlerRunConfig:
|
||||
if crawlerRunConfig.locale:
|
||||
context_settings["locale"] = crawlerRunConfig.locale
|
||||
if crawlerRunConfig.timezone_id:
|
||||
context_settings["timezone_id"] = crawlerRunConfig.timezone_id
|
||||
if crawlerRunConfig.geolocation:
|
||||
context_settings["geolocation"] = {
|
||||
"latitude": crawlerRunConfig.geolocation.latitude,
|
||||
"longitude": crawlerRunConfig.geolocation.longitude,
|
||||
"accuracy": crawlerRunConfig.geolocation.accuracy,
|
||||
}
|
||||
# ensure geolocation permission
|
||||
perms = context_settings.get("permissions", [])
|
||||
perms.append("geolocation")
|
||||
context_settings["permissions"] = perms
|
||||
|
||||
# Create and return the context with all settings
|
||||
context = await self.browser.new_context(**context_settings)
|
||||
|
||||
@@ -758,6 +925,10 @@ class BrowserManager:
|
||||
"semaphore_count",
|
||||
"url"
|
||||
]
|
||||
|
||||
# Do NOT exclude locale, timezone_id, or geolocation as these DO affect browser context
|
||||
# and should cause a new context to be created if they change
|
||||
|
||||
for key in ephemeral_keys:
|
||||
if key in config_dict:
|
||||
del config_dict[key]
|
||||
@@ -790,7 +961,10 @@ class BrowserManager:
|
||||
# If using a managed browser, just grab the shared default_context
|
||||
if self.config.use_managed_browser:
|
||||
context = self.default_context
|
||||
page = await context.new_page()
|
||||
pages = context.pages
|
||||
page = next((p for p in pages if p.url == crawlerRunConfig.url), None)
|
||||
if not page:
|
||||
page = context.pages[0] # await context.new_page()
|
||||
else:
|
||||
# Otherwise, check if we have an existing context for this config
|
||||
config_signature = self._make_config_signature(crawlerRunConfig)
|
||||
@@ -840,6 +1014,9 @@ class BrowserManager:
|
||||
|
||||
async def close(self):
|
||||
"""Close all browser resources and clean up."""
|
||||
if self.config.cdp_url:
|
||||
return
|
||||
|
||||
if self.config.sleep_on_close:
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
|
||||
@@ -12,12 +12,15 @@ import sys
|
||||
import datetime
|
||||
import uuid
|
||||
import shutil
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
from typing import List, Dict, Optional, Any
|
||||
from colorama import Fore, Style, init
|
||||
from rich.console import Console
|
||||
|
||||
from .async_configs import BrowserConfig
|
||||
from .browser_manager import ManagedBrowser
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase, LogColor
|
||||
from .utils import get_home_folder
|
||||
|
||||
|
||||
@@ -42,8 +45,8 @@ class BrowserProfiler:
|
||||
logger (AsyncLoggerBase, optional): Logger for outputting messages.
|
||||
If None, a default AsyncLogger will be created.
|
||||
"""
|
||||
# Initialize colorama for colorful terminal output
|
||||
init()
|
||||
# Initialize rich console for colorful input prompts
|
||||
self.console = Console()
|
||||
|
||||
# Create a logger if not provided
|
||||
if logger is None:
|
||||
@@ -56,6 +59,11 @@ class BrowserProfiler:
|
||||
# Ensure profiles directory exists
|
||||
self.profiles_dir = os.path.join(get_home_folder(), "profiles")
|
||||
os.makedirs(self.profiles_dir, exist_ok=True)
|
||||
|
||||
# Builtin browser config file
|
||||
self.builtin_browser_dir = os.path.join(get_home_folder(), "builtin-browser")
|
||||
self.builtin_config_file = os.path.join(self.builtin_browser_dir, "browser_config.json")
|
||||
os.makedirs(self.builtin_browser_dir, exist_ok=True)
|
||||
|
||||
async def create_profile(self,
|
||||
profile_name: Optional[str] = None,
|
||||
@@ -119,26 +127,30 @@ class BrowserProfiler:
|
||||
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
|
||||
# Print instructions for the user with colorama formatting
|
||||
border = f"{Fore.CYAN}{'='*80}{Style.RESET_ALL}"
|
||||
self.logger.info(f"\n{border}", tag="PROFILE")
|
||||
self.logger.info(f"Creating browser profile: {Fore.GREEN}{profile_name}{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info(f"Profile directory: {Fore.YELLOW}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||
# Print instructions for the user with rich formatting
|
||||
border = f"{'='*80}"
|
||||
self.logger.info("{border}", tag="PROFILE", params={"border": f"\n{border}"}, colors={"border": LogColor.CYAN})
|
||||
self.logger.info("Creating browser profile: {profile_name}", tag="PROFILE", params={"profile_name": profile_name}, colors={"profile_name": LogColor.GREEN})
|
||||
self.logger.info("Profile directory: {profile_path}", tag="PROFILE", params={"profile_path": profile_path}, colors={"profile_path": LogColor.YELLOW})
|
||||
|
||||
self.logger.info("\nInstructions:", tag="PROFILE")
|
||||
self.logger.info("1. A browser window will open for you to set up your profile.", tag="PROFILE")
|
||||
self.logger.info(f"2. {Fore.CYAN}Log in to websites{Style.RESET_ALL}, configure settings, etc. as needed.", tag="PROFILE")
|
||||
self.logger.info(f"3. When you're done, {Fore.YELLOW}press 'q' in this terminal{Style.RESET_ALL} to close the browser.", tag="PROFILE")
|
||||
self.logger.info("{segment}, configure settings, etc. as needed.", tag="PROFILE", params={"segment": "2. Log in to websites"}, colors={"segment": LogColor.CYAN})
|
||||
self.logger.info("3. When you're done, {segment} to close the browser.", tag="PROFILE", params={"segment": "press 'q' in this terminal"}, colors={"segment": LogColor.YELLOW})
|
||||
self.logger.info("4. The profile will be saved and ready to use with Crawl4AI.", tag="PROFILE")
|
||||
self.logger.info(f"{border}\n", tag="PROFILE")
|
||||
self.logger.info("{border}", tag="PROFILE", params={"border": f"{border}\n"}, colors={"border": LogColor.CYAN})
|
||||
|
||||
browser_config.headless = False
|
||||
browser_config.user_data_dir = profile_path
|
||||
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_config.browser_type,
|
||||
user_data_dir=profile_path,
|
||||
headless=False, # Must be visible
|
||||
browser_config=browser_config,
|
||||
# user_data_dir=profile_path,
|
||||
# headless=False, # Must be visible
|
||||
logger=self.logger,
|
||||
debugging_port=browser_config.debugging_port
|
||||
# debugging_port=browser_config.debugging_port
|
||||
)
|
||||
|
||||
# Set up signal handlers to ensure cleanup on interrupt
|
||||
@@ -173,7 +185,7 @@ class BrowserProfiler:
|
||||
import select
|
||||
|
||||
# First output the prompt
|
||||
self.logger.info(f"{Fore.CYAN}Press '{Fore.WHITE}q{Fore.CYAN}' when you've finished using the browser...{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info("Press 'q' when you've finished using the browser...", tag="PROFILE")
|
||||
|
||||
# Save original terminal settings
|
||||
fd = sys.stdin.fileno()
|
||||
@@ -189,7 +201,7 @@ class BrowserProfiler:
|
||||
if readable:
|
||||
key = sys.stdin.read(1)
|
||||
if key.lower() == 'q':
|
||||
self.logger.info(f"{Fore.GREEN}Closing browser and saving profile...{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info("Closing browser and saving profile...", tag="PROFILE", base_color=LogColor.GREEN)
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
@@ -215,7 +227,7 @@ class BrowserProfiler:
|
||||
self.logger.error("Failed to start browser process.", tag="PROFILE")
|
||||
return None
|
||||
|
||||
self.logger.info(f"Browser launched. {Fore.CYAN}Waiting for you to finish...{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info("Browser launched. Waiting for you to finish...", tag="PROFILE")
|
||||
|
||||
# Start listening for keyboard input
|
||||
listener_task = asyncio.create_task(listen_for_quit_command())
|
||||
@@ -237,10 +249,10 @@ class BrowserProfiler:
|
||||
self.logger.info("Terminating browser process...", tag="PROFILE")
|
||||
await managed_browser.cleanup()
|
||||
|
||||
self.logger.success(f"Browser closed. Profile saved at: {Fore.GREEN}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.success(f"Browser closed. Profile saved at: {profile_path}", tag="PROFILE")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error creating profile: {str(e)}", tag="PROFILE")
|
||||
self.logger.error(f"Error creating profile: {e!s}", tag="PROFILE")
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
finally:
|
||||
@@ -342,7 +354,11 @@ class BrowserProfiler:
|
||||
|
||||
# Check if path exists and is a valid profile
|
||||
if not os.path.isdir(profile_path):
|
||||
return None
|
||||
# Chrck if profile_name itself is full path
|
||||
if os.path.isabs(profile_name):
|
||||
profile_path = profile_name
|
||||
else:
|
||||
return None
|
||||
|
||||
# Look for profile indicators
|
||||
is_profile = (
|
||||
@@ -428,25 +444,27 @@ class BrowserProfiler:
|
||||
```
|
||||
"""
|
||||
while True:
|
||||
self.logger.info(f"\n{Fore.CYAN}Profile Management Options:{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"1. {Fore.GREEN}Create a new profile{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"2. {Fore.YELLOW}List available profiles{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"3. {Fore.RED}Delete a profile{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info("\nProfile Management Options:", tag="MENU")
|
||||
self.logger.info("1. Create a new profile", tag="MENU", base_color=LogColor.GREEN)
|
||||
self.logger.info("2. List available profiles", tag="MENU", base_color=LogColor.YELLOW)
|
||||
self.logger.info("3. Delete a profile", tag="MENU", base_color=LogColor.RED)
|
||||
|
||||
# Only show crawl option if callback provided
|
||||
if crawl_callback:
|
||||
self.logger.info(f"4. {Fore.CYAN}Use a profile to crawl a website{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"5. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info("4. Use a profile to crawl a website", tag="MENU", base_color=LogColor.CYAN)
|
||||
self.logger.info("5. Exit", tag="MENU", base_color=LogColor.MAGENTA)
|
||||
exit_option = "5"
|
||||
else:
|
||||
self.logger.info(f"4. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info("4. Exit", tag="MENU", base_color=LogColor.MAGENTA)
|
||||
exit_option = "4"
|
||||
|
||||
choice = input(f"\n{Fore.CYAN}Enter your choice (1-{exit_option}): {Style.RESET_ALL}")
|
||||
self.logger.print(f"\n[cyan]Enter your choice (1-{exit_option}): [/cyan]", end="")
|
||||
choice = input()
|
||||
|
||||
if choice == "1":
|
||||
# Create new profile
|
||||
name = input(f"{Fore.GREEN}Enter a name for the new profile (or press Enter for auto-generated name): {Style.RESET_ALL}")
|
||||
self.console.print("[green]Enter a name for the new profile (or press Enter for auto-generated name): [/green]", end="")
|
||||
name = input()
|
||||
await self.create_profile(name or None)
|
||||
|
||||
elif choice == "2":
|
||||
@@ -457,11 +475,11 @@ class BrowserProfiler:
|
||||
self.logger.warning(" No profiles found. Create one first with option 1.", tag="PROFILES")
|
||||
continue
|
||||
|
||||
# Print profile information with colorama formatting
|
||||
# Print profile information
|
||||
self.logger.info("\nAvailable profiles:", tag="PROFILES")
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {Fore.CYAN}{profile['name']}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f" Path: {Fore.YELLOW}{profile['path']}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
self.logger.info(f" Path: {profile['path']}", tag="PROFILES", base_color=LogColor.YELLOW)
|
||||
self.logger.info(f" Created: {profile['created'].strftime('%Y-%m-%d %H:%M:%S')}", tag="PROFILES")
|
||||
self.logger.info(f" Browser type: {profile['type']}", tag="PROFILES")
|
||||
self.logger.info("", tag="PROFILES") # Empty line for spacing
|
||||
@@ -474,12 +492,13 @@ class BrowserProfiler:
|
||||
continue
|
||||
|
||||
# Display numbered list
|
||||
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info("\nAvailable profiles:", tag="PROFILES", base_color=LogColor.YELLOW)
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
|
||||
# Get profile to delete
|
||||
profile_idx = input(f"{Fore.RED}Enter the number of the profile to delete (or 'c' to cancel): {Style.RESET_ALL}")
|
||||
self.console.print("[red]Enter the number of the profile to delete (or 'c' to cancel): [/red]", end="")
|
||||
profile_idx = input()
|
||||
if profile_idx.lower() == 'c':
|
||||
continue
|
||||
|
||||
@@ -487,17 +506,18 @@ class BrowserProfiler:
|
||||
idx = int(profile_idx) - 1
|
||||
if 0 <= idx < len(profiles):
|
||||
profile_name = profiles[idx]["name"]
|
||||
self.logger.info(f"Deleting profile: {Fore.YELLOW}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f"Deleting profile: [yellow]{profile_name}[/yellow]", tag="PROFILES")
|
||||
|
||||
# Confirm deletion
|
||||
confirm = input(f"{Fore.RED}Are you sure you want to delete this profile? (y/n): {Style.RESET_ALL}")
|
||||
self.console.print("[red]Are you sure you want to delete this profile? (y/n): [/red]", end="")
|
||||
confirm = input()
|
||||
if confirm.lower() == 'y':
|
||||
success = self.delete_profile(profiles[idx]["path"])
|
||||
|
||||
if success:
|
||||
self.logger.success(f"Profile {Fore.GREEN}{profile_name}{Style.RESET_ALL} deleted successfully", tag="PROFILES")
|
||||
self.logger.success(f"Profile {profile_name} deleted successfully", tag="PROFILES")
|
||||
else:
|
||||
self.logger.error(f"Failed to delete profile {Fore.RED}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.error(f"Failed to delete profile {profile_name}", tag="PROFILES")
|
||||
else:
|
||||
self.logger.error("Invalid profile number", tag="PROFILES")
|
||||
except ValueError:
|
||||
@@ -511,12 +531,13 @@ class BrowserProfiler:
|
||||
continue
|
||||
|
||||
# Display numbered list
|
||||
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info("\nAvailable profiles:", tag="PROFILES", base_color=LogColor.YELLOW)
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
|
||||
# Get profile to use
|
||||
profile_idx = input(f"{Fore.CYAN}Enter the number of the profile to use (or 'c' to cancel): {Style.RESET_ALL}")
|
||||
self.console.print("[cyan]Enter the number of the profile to use (or 'c' to cancel): [/cyan]", end="")
|
||||
profile_idx = input()
|
||||
if profile_idx.lower() == 'c':
|
||||
continue
|
||||
|
||||
@@ -524,7 +545,8 @@ class BrowserProfiler:
|
||||
idx = int(profile_idx) - 1
|
||||
if 0 <= idx < len(profiles):
|
||||
profile_path = profiles[idx]["path"]
|
||||
url = input(f"{Fore.CYAN}Enter the URL to crawl: {Style.RESET_ALL}")
|
||||
self.console.print("[cyan]Enter the URL to crawl: [/cyan]", end="")
|
||||
url = input()
|
||||
if url:
|
||||
# Call the provided crawl callback
|
||||
await crawl_callback(profile_path, url)
|
||||
@@ -541,4 +563,449 @@ class BrowserProfiler:
|
||||
break
|
||||
|
||||
else:
|
||||
self.logger.error(f"Invalid choice. Please enter a number between 1 and {exit_option}.", tag="MENU")
|
||||
self.logger.error(f"Invalid choice. Please enter a number between 1 and {exit_option}.", tag="MENU")
|
||||
|
||||
async def launch_standalone_browser(self,
|
||||
browser_type: str = "chromium",
|
||||
user_data_dir: Optional[str] = None,
|
||||
debugging_port: int = 9222,
|
||||
headless: bool = False,
|
||||
save_as_builtin: bool = False) -> Optional[str]:
|
||||
"""
|
||||
Launch a standalone browser with CDP debugging enabled and keep it running
|
||||
until the user presses 'q'. Returns and displays the CDP URL.
|
||||
|
||||
Args:
|
||||
browser_type (str): Type of browser to launch ('chromium' or 'firefox')
|
||||
user_data_dir (str, optional): Path to user profile directory
|
||||
debugging_port (int): Port to use for CDP debugging
|
||||
headless (bool): Whether to run in headless mode
|
||||
|
||||
Returns:
|
||||
str: CDP URL for the browser, or None if launch failed
|
||||
|
||||
Example:
|
||||
```python
|
||||
profiler = BrowserProfiler()
|
||||
cdp_url = await profiler.launch_standalone_browser(
|
||||
user_data_dir="/path/to/profile",
|
||||
debugging_port=9222
|
||||
)
|
||||
# Use cdp_url to connect to the browser
|
||||
```
|
||||
"""
|
||||
# Use the provided directory if specified, otherwise create a temporary directory
|
||||
if user_data_dir:
|
||||
# Directory is provided directly, ensure it exists
|
||||
profile_path = user_data_dir
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
else:
|
||||
# Create a temporary profile directory
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
profile_name = f"temp_{timestamp}_{uuid.uuid4().hex[:6]}"
|
||||
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
|
||||
# Print initial information
|
||||
border = f"{'='*80}"
|
||||
self.logger.info("{border}", tag="CDP", params={"border": border}, colors={"border": LogColor.CYAN})
|
||||
self.logger.info("Launching standalone browser with CDP debugging", tag="CDP")
|
||||
self.logger.info("Browser type: {browser_type}", tag="CDP", params={"browser_type": browser_type}, colors={"browser_type": LogColor.CYAN})
|
||||
self.logger.info("Profile path: {profile_path}", tag="CDP", params={"profile_path": profile_path}, colors={"profile_path": LogColor.YELLOW})
|
||||
self.logger.info(f"Debugging port: {debugging_port}", tag="CDP")
|
||||
self.logger.info(f"Headless mode: {headless}", tag="CDP")
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_type,
|
||||
user_data_dir=profile_path,
|
||||
headless=headless,
|
||||
logger=self.logger,
|
||||
debugging_port=debugging_port
|
||||
)
|
||||
|
||||
# Set up signal handlers to ensure cleanup on interrupt
|
||||
original_sigint = signal.getsignal(signal.SIGINT)
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
|
||||
# Define cleanup handler for signals
|
||||
async def cleanup_handler(sig, frame):
|
||||
self.logger.warning("\nCleaning up browser process...", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
# Restore original signal handlers
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
if sig == signal.SIGINT:
|
||||
self.logger.error("Browser terminated by user.", tag="CDP")
|
||||
sys.exit(1)
|
||||
|
||||
# Set signal handlers
|
||||
def sigint_handler(sig, frame):
|
||||
asyncio.create_task(cleanup_handler(sig, frame))
|
||||
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
signal.signal(signal.SIGTERM, sigint_handler)
|
||||
|
||||
# Event to signal when user wants to exit
|
||||
user_done_event = asyncio.Event()
|
||||
|
||||
# Run keyboard input loop in a separate task
|
||||
async def listen_for_quit_command():
|
||||
import termios
|
||||
import tty
|
||||
import select
|
||||
|
||||
# First output the prompt
|
||||
self.logger.info("Press 'q' to stop the browser and exit...", tag="CDP")
|
||||
|
||||
# Save original terminal settings
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
|
||||
try:
|
||||
# Switch to non-canonical mode (no line buffering)
|
||||
tty.setcbreak(fd)
|
||||
|
||||
while True:
|
||||
# Check if input is available (non-blocking)
|
||||
readable, _, _ = select.select([sys.stdin], [], [], 0.5)
|
||||
if readable:
|
||||
key = sys.stdin.read(1)
|
||||
if key.lower() == 'q':
|
||||
self.logger.info("Closing browser...", tag="CDP")
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
# Check if the browser process has already exited
|
||||
if managed_browser.browser_process and managed_browser.browser_process.poll() is not None:
|
||||
self.logger.info("Browser already closed. Ending input listener.", tag="CDP")
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
finally:
|
||||
# Restore terminal settings
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
# Function to retrieve and display CDP JSON config
|
||||
async def get_cdp_json(port):
|
||||
import aiohttp
|
||||
cdp_url = f"http://localhost:{port}"
|
||||
json_url = f"{cdp_url}/json/version"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Try multiple times in case the browser is still starting up
|
||||
for _ in range(10):
|
||||
try:
|
||||
async with session.get(json_url) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
return cdp_url, data
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
return cdp_url, None
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error fetching CDP JSON: {str(e)}", tag="CDP")
|
||||
return cdp_url, None
|
||||
|
||||
cdp_url = None
|
||||
config_json = None
|
||||
|
||||
try:
|
||||
# Start the browser
|
||||
await managed_browser.start()
|
||||
|
||||
# Check if browser started successfully
|
||||
browser_process = managed_browser.browser_process
|
||||
if not browser_process:
|
||||
self.logger.error("Failed to start browser process.", tag="CDP")
|
||||
return None
|
||||
|
||||
self.logger.info("Browser launched successfully. Retrieving CDP information...", tag="CDP")
|
||||
|
||||
# Get CDP URL and JSON config
|
||||
cdp_url, config_json = await get_cdp_json(debugging_port)
|
||||
|
||||
if cdp_url:
|
||||
self.logger.success(f"CDP URL: {cdp_url}", tag="CDP")
|
||||
|
||||
if config_json:
|
||||
# Display relevant CDP information
|
||||
self.logger.info(f"Browser: {config_json.get('Browser', 'Unknown')}", tag="CDP", colors={"Browser": LogColor.CYAN})
|
||||
self.logger.info(f"Protocol Version: {config_json.get('Protocol-Version', 'Unknown')}", tag="CDP", colors={"Protocol-Version": LogColor.CYAN})
|
||||
if 'webSocketDebuggerUrl' in config_json:
|
||||
self.logger.info("WebSocket URL: {webSocketDebuggerUrl}", tag="CDP", params={"webSocketDebuggerUrl": config_json['webSocketDebuggerUrl']}, colors={"webSocketDebuggerUrl": LogColor.GREEN})
|
||||
else:
|
||||
self.logger.warning("Could not retrieve CDP configuration JSON", tag="CDP")
|
||||
else:
|
||||
self.logger.error(f"Failed to get CDP URL on port {debugging_port}", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
|
||||
# Start listening for keyboard input
|
||||
listener_task = asyncio.create_task(listen_for_quit_command())
|
||||
|
||||
# Wait for the user to press 'q' or for the browser process to exit naturally
|
||||
while not user_done_event.is_set() and browser_process.poll() is None:
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Cancel the listener task if it's still running
|
||||
if not listener_task.done():
|
||||
listener_task.cancel()
|
||||
try:
|
||||
await listener_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# If the browser is still running and the user pressed 'q', terminate it
|
||||
if browser_process.poll() is None and user_done_event.is_set():
|
||||
self.logger.info("Terminating browser process...", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
|
||||
self.logger.success("Browser closed.", tag="CDP")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error launching standalone browser: {str(e)}", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
finally:
|
||||
# Restore original signal handlers
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
|
||||
# Make sure browser is fully cleaned up
|
||||
await managed_browser.cleanup()
|
||||
|
||||
# Return the CDP URL
|
||||
return cdp_url
|
||||
|
||||
async def launch_builtin_browser(self,
|
||||
browser_type: str = "chromium",
|
||||
debugging_port: int = 9222,
|
||||
headless: bool = True) -> Optional[str]:
|
||||
"""
|
||||
Launch a browser in the background for use as the builtin browser.
|
||||
|
||||
Args:
|
||||
browser_type (str): Type of browser to launch ('chromium' or 'firefox')
|
||||
debugging_port (int): Port to use for CDP debugging
|
||||
headless (bool): Whether to run in headless mode
|
||||
|
||||
Returns:
|
||||
str: CDP URL for the browser, or None if launch failed
|
||||
"""
|
||||
# Check if there's an existing browser still running
|
||||
browser_info = self.get_builtin_browser_info()
|
||||
if browser_info and self._is_browser_running(browser_info.get('pid')):
|
||||
self.logger.info("Builtin browser is already running", tag="BUILTIN")
|
||||
return browser_info.get('cdp_url')
|
||||
|
||||
# Create a user data directory for the builtin browser
|
||||
user_data_dir = os.path.join(self.builtin_browser_dir, "user_data")
|
||||
os.makedirs(user_data_dir, exist_ok=True)
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_type,
|
||||
user_data_dir=user_data_dir,
|
||||
headless=headless,
|
||||
logger=self.logger,
|
||||
debugging_port=debugging_port
|
||||
)
|
||||
|
||||
try:
|
||||
# Start the browser
|
||||
await managed_browser.start()
|
||||
|
||||
# Check if browser started successfully
|
||||
browser_process = managed_browser.browser_process
|
||||
if not browser_process:
|
||||
self.logger.error("Failed to start browser process.", tag="BUILTIN")
|
||||
return None
|
||||
|
||||
# Get CDP URL
|
||||
cdp_url = f"http://localhost:{debugging_port}"
|
||||
|
||||
# Try to verify browser is responsive by fetching version info
|
||||
import aiohttp
|
||||
json_url = f"{cdp_url}/json/version"
|
||||
config_json = None
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for _ in range(10): # Try multiple times
|
||||
try:
|
||||
async with session.get(json_url) as response:
|
||||
if response.status == 200:
|
||||
config_json = await response.json()
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Could not verify browser: {str(e)}", tag="BUILTIN")
|
||||
|
||||
# Save browser info
|
||||
browser_info = {
|
||||
'pid': browser_process.pid,
|
||||
'cdp_url': cdp_url,
|
||||
'user_data_dir': user_data_dir,
|
||||
'browser_type': browser_type,
|
||||
'debugging_port': debugging_port,
|
||||
'start_time': time.time(),
|
||||
'config': config_json
|
||||
}
|
||||
|
||||
with open(self.builtin_config_file, 'w') as f:
|
||||
json.dump(browser_info, f, indent=2)
|
||||
|
||||
# Detach from the browser process - don't keep any references
|
||||
# This is important to allow the Python script to exit while the browser continues running
|
||||
# We'll just record the PID and other info, and the browser will run independently
|
||||
managed_browser.browser_process = None
|
||||
|
||||
self.logger.success(f"Builtin browser launched at CDP URL: {cdp_url}", tag="BUILTIN")
|
||||
return cdp_url
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error launching builtin browser: {str(e)}", tag="BUILTIN")
|
||||
if managed_browser:
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
|
||||
def get_builtin_browser_info(self) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get information about the builtin browser.
|
||||
|
||||
Returns:
|
||||
dict: Browser information or None if no builtin browser is configured
|
||||
"""
|
||||
if not os.path.exists(self.builtin_config_file):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(self.builtin_config_file, 'r') as f:
|
||||
browser_info = json.load(f)
|
||||
|
||||
# Check if the browser is still running
|
||||
if not self._is_browser_running(browser_info.get('pid')):
|
||||
self.logger.warning("Builtin browser is not running", tag="BUILTIN")
|
||||
return None
|
||||
|
||||
return browser_info
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading builtin browser config: {str(e)}", tag="BUILTIN")
|
||||
return None
|
||||
|
||||
def _is_browser_running(self, pid: Optional[int]) -> bool:
|
||||
"""Check if a process with the given PID is running"""
|
||||
if not pid:
|
||||
return False
|
||||
|
||||
try:
|
||||
# Check if the process exists
|
||||
if sys.platform == "win32":
|
||||
process = subprocess.run(["tasklist", "/FI", f"PID eq {pid}"],
|
||||
capture_output=True, text=True)
|
||||
return str(pid) in process.stdout
|
||||
else:
|
||||
# Unix-like systems
|
||||
os.kill(pid, 0) # This doesn't actually kill the process, just checks if it exists
|
||||
return True
|
||||
except (ProcessLookupError, PermissionError, OSError):
|
||||
return False
|
||||
|
||||
async def kill_builtin_browser(self) -> bool:
|
||||
"""
|
||||
Kill the builtin browser if it's running.
|
||||
|
||||
Returns:
|
||||
bool: True if the browser was killed, False otherwise
|
||||
"""
|
||||
browser_info = self.get_builtin_browser_info()
|
||||
if not browser_info:
|
||||
self.logger.warning("No builtin browser found", tag="BUILTIN")
|
||||
return False
|
||||
|
||||
pid = browser_info.get('pid')
|
||||
if not pid:
|
||||
return False
|
||||
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
subprocess.run(["taskkill", "/F", "/PID", str(pid)], check=True)
|
||||
else:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
# Wait for termination
|
||||
for _ in range(5):
|
||||
if not self._is_browser_running(pid):
|
||||
break
|
||||
await asyncio.sleep(0.5)
|
||||
else:
|
||||
# Force kill if still running
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
|
||||
# Remove config file
|
||||
if os.path.exists(self.builtin_config_file):
|
||||
os.unlink(self.builtin_config_file)
|
||||
|
||||
self.logger.success("Builtin browser terminated", tag="BUILTIN")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error killing builtin browser: {str(e)}", tag="BUILTIN")
|
||||
return False
|
||||
|
||||
async def get_builtin_browser_status(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get status information about the builtin browser.
|
||||
|
||||
Returns:
|
||||
dict: Status information with running, cdp_url, and info fields
|
||||
"""
|
||||
browser_info = self.get_builtin_browser_info()
|
||||
|
||||
if not browser_info:
|
||||
return {
|
||||
'running': False,
|
||||
'cdp_url': None,
|
||||
'info': None
|
||||
}
|
||||
|
||||
return {
|
||||
'running': True,
|
||||
'cdp_url': browser_info.get('cdp_url'),
|
||||
'info': browser_info
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example usage
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# Create a new profile
|
||||
import os
|
||||
from pathlib import Path
|
||||
home_dir = Path.home()
|
||||
profile_path = asyncio.run(profiler.create_profile( str(home_dir / ".crawl4ai/profiles/test-profile")))
|
||||
|
||||
|
||||
|
||||
# Launch a standalone browser
|
||||
asyncio.run(profiler.launch_standalone_browser())
|
||||
|
||||
# List profiles
|
||||
profiles = profiler.list_profiles()
|
||||
for profile in profiles:
|
||||
print(f"Profile: {profile['name']}, Path: {profile['path']}")
|
||||
|
||||
# Delete a profile
|
||||
success = profiler.delete_profile("my-profile")
|
||||
if success:
|
||||
print("Profile deleted successfully")
|
||||
else:
|
||||
print("Failed to delete profile")
|
||||
708
crawl4ai/cli.py
708
crawl4ai/cli.py
@@ -1,9 +1,8 @@
|
||||
import click
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import sys
|
||||
import shutil
|
||||
import time
|
||||
|
||||
import humanize
|
||||
from typing import Dict, Any, Optional, List
|
||||
import json
|
||||
@@ -13,7 +12,6 @@ from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Prompt, Confirm
|
||||
from rich.style import Style
|
||||
|
||||
from crawl4ai import (
|
||||
CacheMode,
|
||||
@@ -22,16 +20,19 @@ from crawl4ai import (
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
LLMExtractionStrategy,
|
||||
LXMLWebScrapingStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy,
|
||||
BM25ContentFilter,
|
||||
PruningContentFilter,
|
||||
BrowserProfiler
|
||||
BrowserProfiler,
|
||||
DefaultMarkdownGenerator,
|
||||
LLMConfig
|
||||
)
|
||||
from crawl4ai.config import USER_SETTINGS
|
||||
from litellm import completion
|
||||
from pathlib import Path
|
||||
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
|
||||
# Initialize rich console
|
||||
console = Console()
|
||||
@@ -177,8 +178,12 @@ def show_examples():
|
||||
# CSS-based extraction
|
||||
crwl https://example.com -e extract_css.yml -s css_schema.json -o json
|
||||
|
||||
# LLM-based extraction
|
||||
# LLM-based extraction with config file
|
||||
crwl https://example.com -e extract_llm.yml -s llm_schema.json -o json
|
||||
|
||||
# Quick LLM-based JSON extraction (prompts for LLM provider first time)
|
||||
crwl https://example.com -j # Auto-extracts structured data
|
||||
crwl https://example.com -j "Extract product details including name, price, and features" # With specific instructions
|
||||
|
||||
3️⃣ Direct Parameters:
|
||||
# Browser settings
|
||||
@@ -201,7 +206,24 @@ def show_examples():
|
||||
# 2. Then use that profile to crawl the authenticated site:
|
||||
crwl https://site-requiring-login.com/dashboard -p my-profile-name
|
||||
|
||||
5️⃣ Sample Config Files:
|
||||
5️⃣ CDP Mode for Browser Automation:
|
||||
# Launch browser with CDP debugging on default port 9222
|
||||
crwl cdp
|
||||
|
||||
# Use a specific profile and custom port
|
||||
crwl cdp -p my-profile -P 9223
|
||||
|
||||
# Launch headless browser with CDP enabled
|
||||
crwl cdp --headless
|
||||
|
||||
# Launch in incognito mode (ignores profile)
|
||||
crwl cdp --incognito
|
||||
|
||||
# Use the CDP URL with other tools (Puppeteer, Playwright, etc.)
|
||||
# The URL will be displayed in the terminal when the browser starts
|
||||
|
||||
|
||||
6️⃣ Sample Config Files:
|
||||
|
||||
browser.yml:
|
||||
headless: true
|
||||
@@ -259,11 +281,11 @@ llm_schema.json:
|
||||
}
|
||||
}
|
||||
|
||||
6️⃣ Advanced Usage:
|
||||
7️⃣ Advanced Usage:
|
||||
# Combine configs with direct parameters
|
||||
crwl https://example.com -B browser.yml -b "headless=false,viewport_width=1920"
|
||||
|
||||
# Full extraction pipeline
|
||||
# Full extraction pipeline with config files
|
||||
crwl https://example.com \\
|
||||
-B browser.yml \\
|
||||
-C crawler.yml \\
|
||||
@@ -271,6 +293,12 @@ llm_schema.json:
|
||||
-s llm_schema.json \\
|
||||
-o json \\
|
||||
-v
|
||||
|
||||
# Quick LLM-based extraction with specific instructions
|
||||
crwl https://amazon.com/dp/B01DFKC2SO \\
|
||||
-j "Extract product title, current price, original price, rating, and all product specifications" \\
|
||||
-b "headless=true,viewport_width=1280" \\
|
||||
-v
|
||||
|
||||
# Content filtering with BM25
|
||||
crwl https://example.com \\
|
||||
@@ -285,7 +313,7 @@ llm_schema.json:
|
||||
|
||||
For more documentation visit: https://github.com/unclecode/crawl4ai
|
||||
|
||||
7️⃣ Q&A with LLM:
|
||||
8️⃣ Q&A with LLM:
|
||||
# Ask a question about the content
|
||||
crwl https://example.com -q "What is the main topic discussed?"
|
||||
|
||||
@@ -312,8 +340,16 @@ For more documentation visit: https://github.com/unclecode/crawl4ai
|
||||
- google/gemini-pro
|
||||
|
||||
See full list of providers: https://docs.litellm.ai/docs/providers
|
||||
|
||||
# Set default LLM provider and token in advance
|
||||
crwl config set DEFAULT_LLM_PROVIDER "anthropic/claude-3-sonnet"
|
||||
crwl config set DEFAULT_LLM_PROVIDER_TOKEN "your-api-token-here"
|
||||
|
||||
# Set default browser behavior
|
||||
crwl config set BROWSER_HEADLESS false # Always show browser window
|
||||
crwl config set USER_AGENT_MODE random # Use random user agent
|
||||
|
||||
8️⃣ Profile Management:
|
||||
9️⃣ Profile Management:
|
||||
# Launch interactive profile manager
|
||||
crwl profiles
|
||||
|
||||
@@ -326,6 +362,32 @@ For more documentation visit: https://github.com/unclecode/crawl4ai
|
||||
crwl profiles # Select "Create new profile" option
|
||||
# 2. Then use that profile to crawl authenticated content:
|
||||
crwl https://site-requiring-login.com/dashboard -p my-profile-name
|
||||
|
||||
🔄 Builtin Browser Management:
|
||||
# Start a builtin browser (runs in the background)
|
||||
crwl browser start
|
||||
|
||||
# Check builtin browser status
|
||||
crwl browser status
|
||||
|
||||
# Open a visible window to see the browser
|
||||
crwl browser view --url https://example.com
|
||||
|
||||
# Stop the builtin browser
|
||||
crwl browser stop
|
||||
|
||||
# Restart with different options
|
||||
crwl browser restart --browser-type chromium --port 9223 --no-headless
|
||||
|
||||
# Use the builtin browser in your code
|
||||
# (Just set browser_mode="builtin" in your BrowserConfig)
|
||||
browser_config = BrowserConfig(
|
||||
browser_mode="builtin",
|
||||
headless=True
|
||||
)
|
||||
|
||||
# Usage via CLI:
|
||||
crwl https://example.com -b "browser_mode=builtin"
|
||||
"""
|
||||
click.echo(examples)
|
||||
|
||||
@@ -552,28 +614,409 @@ async def manage_profiles():
|
||||
# Add a separator between operations
|
||||
console.print("\n")
|
||||
|
||||
|
||||
|
||||
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
|
||||
def cli():
|
||||
"""Crawl4AI CLI - Web content extraction and browser profile management tool"""
|
||||
pass
|
||||
|
||||
|
||||
@cli.group("browser")
|
||||
def browser_cmd():
|
||||
"""Manage browser instances for Crawl4AI
|
||||
|
||||
Commands to manage browser instances for Crawl4AI, including:
|
||||
- status - Check status of the builtin browser
|
||||
- start - Start a new builtin browser
|
||||
- stop - Stop the running builtin browser
|
||||
- restart - Restart the builtin browser
|
||||
"""
|
||||
pass
|
||||
|
||||
@browser_cmd.command("status")
|
||||
def browser_status_cmd():
|
||||
"""Show status of the builtin browser"""
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
try:
|
||||
status = anyio.run(profiler.get_builtin_browser_status)
|
||||
|
||||
if status["running"]:
|
||||
info = status["info"]
|
||||
console.print(Panel(
|
||||
f"[green]Builtin browser is running[/green]\n\n"
|
||||
f"CDP URL: [cyan]{info['cdp_url']}[/cyan]\n"
|
||||
f"Process ID: [yellow]{info['pid']}[/yellow]\n"
|
||||
f"Browser type: [blue]{info['browser_type']}[/blue]\n"
|
||||
f"User data directory: [magenta]{info['user_data_dir']}[/magenta]\n"
|
||||
f"Started: [cyan]{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(info['start_time']))}[/cyan]",
|
||||
title="Builtin Browser Status",
|
||||
border_style="green"
|
||||
))
|
||||
else:
|
||||
console.print(Panel(
|
||||
"[yellow]Builtin browser is not running[/yellow]\n\n"
|
||||
"Use 'crwl browser start' to start a builtin browser",
|
||||
title="Builtin Browser Status",
|
||||
border_style="yellow"
|
||||
))
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error checking browser status: {str(e)}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
@browser_cmd.command("start")
|
||||
@click.option("--browser-type", "-b", type=click.Choice(["chromium", "firefox"]), default="chromium",
|
||||
help="Browser type (default: chromium)")
|
||||
@click.option("--port", "-p", type=int, default=9222, help="Debugging port (default: 9222)")
|
||||
@click.option("--headless/--no-headless", default=True, help="Run browser in headless mode")
|
||||
def browser_start_cmd(browser_type: str, port: int, headless: bool):
|
||||
"""Start a builtin browser instance
|
||||
|
||||
This will start a persistent browser instance that can be used by Crawl4AI
|
||||
by setting browser_mode="builtin" in BrowserConfig.
|
||||
"""
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# First check if browser is already running
|
||||
status = anyio.run(profiler.get_builtin_browser_status)
|
||||
if status["running"]:
|
||||
console.print(Panel(
|
||||
"[yellow]Builtin browser is already running[/yellow]\n\n"
|
||||
f"CDP URL: [cyan]{status['cdp_url']}[/cyan]\n\n"
|
||||
"Use 'crwl browser restart' to restart the browser",
|
||||
title="Builtin Browser Start",
|
||||
border_style="yellow"
|
||||
))
|
||||
return
|
||||
|
||||
try:
|
||||
console.print(Panel(
|
||||
f"[cyan]Starting builtin browser[/cyan]\n\n"
|
||||
f"Browser type: [green]{browser_type}[/green]\n"
|
||||
f"Debugging port: [yellow]{port}[/yellow]\n"
|
||||
f"Headless: [cyan]{'Yes' if headless else 'No'}[/cyan]",
|
||||
title="Builtin Browser Start",
|
||||
border_style="cyan"
|
||||
))
|
||||
|
||||
cdp_url = anyio.run(
|
||||
profiler.launch_builtin_browser,
|
||||
browser_type,
|
||||
port,
|
||||
headless
|
||||
)
|
||||
|
||||
if cdp_url:
|
||||
console.print(Panel(
|
||||
f"[green]Builtin browser started successfully[/green]\n\n"
|
||||
f"CDP URL: [cyan]{cdp_url}[/cyan]\n\n"
|
||||
"This browser will be used automatically when setting browser_mode='builtin'",
|
||||
title="Builtin Browser Start",
|
||||
border_style="green"
|
||||
))
|
||||
else:
|
||||
console.print(Panel(
|
||||
"[red]Failed to start builtin browser[/red]",
|
||||
title="Builtin Browser Start",
|
||||
border_style="red"
|
||||
))
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error starting builtin browser: {str(e)}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
@browser_cmd.command("stop")
|
||||
def browser_stop_cmd():
|
||||
"""Stop the running builtin browser"""
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
try:
|
||||
# First check if browser is running
|
||||
status = anyio.run(profiler.get_builtin_browser_status)
|
||||
if not status["running"]:
|
||||
console.print(Panel(
|
||||
"[yellow]No builtin browser is currently running[/yellow]",
|
||||
title="Builtin Browser Stop",
|
||||
border_style="yellow"
|
||||
))
|
||||
return
|
||||
|
||||
console.print(Panel(
|
||||
"[cyan]Stopping builtin browser...[/cyan]",
|
||||
title="Builtin Browser Stop",
|
||||
border_style="cyan"
|
||||
))
|
||||
|
||||
success = anyio.run(profiler.kill_builtin_browser)
|
||||
|
||||
if success:
|
||||
console.print(Panel(
|
||||
"[green]Builtin browser stopped successfully[/green]",
|
||||
title="Builtin Browser Stop",
|
||||
border_style="green"
|
||||
))
|
||||
else:
|
||||
console.print(Panel(
|
||||
"[red]Failed to stop builtin browser[/red]",
|
||||
title="Builtin Browser Stop",
|
||||
border_style="red"
|
||||
))
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error stopping builtin browser: {str(e)}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
@browser_cmd.command("view")
|
||||
@click.option("--url", "-u", help="URL to navigate to (defaults to about:blank)")
|
||||
def browser_view_cmd(url: Optional[str]):
|
||||
"""
|
||||
Open a visible window of the builtin browser
|
||||
|
||||
This command connects to the running builtin browser and opens a visible window,
|
||||
allowing you to see what the browser is currently viewing or navigate to a URL.
|
||||
"""
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
try:
|
||||
# First check if browser is running
|
||||
status = anyio.run(profiler.get_builtin_browser_status)
|
||||
if not status["running"]:
|
||||
console.print(Panel(
|
||||
"[yellow]No builtin browser is currently running[/yellow]\n\n"
|
||||
"Use 'crwl browser start' to start a builtin browser first",
|
||||
title="Builtin Browser View",
|
||||
border_style="yellow"
|
||||
))
|
||||
return
|
||||
|
||||
info = status["info"]
|
||||
cdp_url = info["cdp_url"]
|
||||
|
||||
console.print(Panel(
|
||||
f"[cyan]Opening visible window connected to builtin browser[/cyan]\n\n"
|
||||
f"CDP URL: [green]{cdp_url}[/green]\n"
|
||||
f"URL to load: [yellow]{url or 'about:blank'}[/yellow]",
|
||||
title="Builtin Browser View",
|
||||
border_style="cyan"
|
||||
))
|
||||
|
||||
# Use the CDP URL to launch a new visible window
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
# Determine the browser command based on platform
|
||||
if sys.platform == "darwin": # macOS
|
||||
browser_cmd = ["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"]
|
||||
elif sys.platform == "win32": # Windows
|
||||
browser_cmd = ["C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe"]
|
||||
else: # Linux
|
||||
browser_cmd = ["google-chrome"]
|
||||
|
||||
# Add arguments
|
||||
browser_args = [
|
||||
f"--remote-debugging-port={info['debugging_port']}",
|
||||
"--remote-debugging-address=localhost",
|
||||
"--no-first-run",
|
||||
"--no-default-browser-check"
|
||||
]
|
||||
|
||||
# Add URL if provided
|
||||
if url:
|
||||
browser_args.append(url)
|
||||
|
||||
# Launch browser
|
||||
try:
|
||||
subprocess.Popen(browser_cmd + browser_args)
|
||||
console.print("[green]Browser window opened. Close it when finished viewing.[/green]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error launching browser: {str(e)}[/red]")
|
||||
console.print(f"[yellow]Try connecting manually to {cdp_url} in Chrome or using the '--remote-debugging-port' flag.[/yellow]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error viewing builtin browser: {str(e)}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
@browser_cmd.command("restart")
|
||||
@click.option("--browser-type", "-b", type=click.Choice(["chromium", "firefox"]), default=None,
|
||||
help="Browser type (defaults to same as current)")
|
||||
@click.option("--port", "-p", type=int, default=None, help="Debugging port (defaults to same as current)")
|
||||
@click.option("--headless/--no-headless", default=None, help="Run browser in headless mode")
|
||||
def browser_restart_cmd(browser_type: Optional[str], port: Optional[int], headless: Optional[bool]):
|
||||
"""Restart the builtin browser
|
||||
|
||||
Stops the current builtin browser if running and starts a new one.
|
||||
By default, uses the same configuration as the current browser.
|
||||
"""
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
try:
|
||||
# First check if browser is running and get its config
|
||||
status = anyio.run(profiler.get_builtin_browser_status)
|
||||
current_config = {}
|
||||
|
||||
if status["running"]:
|
||||
info = status["info"]
|
||||
current_config = {
|
||||
"browser_type": info["browser_type"],
|
||||
"port": info["debugging_port"],
|
||||
"headless": True # Default assumption
|
||||
}
|
||||
|
||||
# Stop the browser
|
||||
console.print(Panel(
|
||||
"[cyan]Stopping current builtin browser...[/cyan]",
|
||||
title="Builtin Browser Restart",
|
||||
border_style="cyan"
|
||||
))
|
||||
|
||||
success = anyio.run(profiler.kill_builtin_browser)
|
||||
if not success:
|
||||
console.print(Panel(
|
||||
"[red]Failed to stop current browser[/red]",
|
||||
title="Builtin Browser Restart",
|
||||
border_style="red"
|
||||
))
|
||||
sys.exit(1)
|
||||
|
||||
# Use provided options or defaults from current config
|
||||
browser_type = browser_type or current_config.get("browser_type", "chromium")
|
||||
port = port or current_config.get("port", 9222)
|
||||
headless = headless if headless is not None else current_config.get("headless", True)
|
||||
|
||||
# Start a new browser
|
||||
console.print(Panel(
|
||||
f"[cyan]Starting new builtin browser[/cyan]\n\n"
|
||||
f"Browser type: [green]{browser_type}[/green]\n"
|
||||
f"Debugging port: [yellow]{port}[/yellow]\n"
|
||||
f"Headless: [cyan]{'Yes' if headless else 'No'}[/cyan]",
|
||||
title="Builtin Browser Restart",
|
||||
border_style="cyan"
|
||||
))
|
||||
|
||||
cdp_url = anyio.run(
|
||||
profiler.launch_builtin_browser,
|
||||
browser_type,
|
||||
port,
|
||||
headless
|
||||
)
|
||||
|
||||
if cdp_url:
|
||||
console.print(Panel(
|
||||
f"[green]Builtin browser restarted successfully[/green]\n\n"
|
||||
f"CDP URL: [cyan]{cdp_url}[/cyan]",
|
||||
title="Builtin Browser Restart",
|
||||
border_style="green"
|
||||
))
|
||||
else:
|
||||
console.print(Panel(
|
||||
"[red]Failed to restart builtin browser[/red]",
|
||||
title="Builtin Browser Restart",
|
||||
border_style="red"
|
||||
))
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error restarting builtin browser: {str(e)}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
@cli.command("cdp")
|
||||
@click.option("--user-data-dir", "-d", help="Directory to use for browser data (will be created if it doesn't exist)")
|
||||
@click.option("--port", "-P", type=int, default=9222, help="Debugging port (default: 9222)")
|
||||
@click.option("--browser-type", "-b", type=click.Choice(["chromium", "firefox"]), default="chromium",
|
||||
help="Browser type (default: chromium)")
|
||||
@click.option("--headless", is_flag=True, help="Run browser in headless mode")
|
||||
@click.option("--incognito", is_flag=True, help="Run in incognito/private mode (ignores user-data-dir)")
|
||||
def cdp_cmd(user_data_dir: Optional[str], port: int, browser_type: str, headless: bool, incognito: bool):
|
||||
"""Launch a standalone browser with CDP debugging enabled
|
||||
|
||||
This command launches a browser with Chrome DevTools Protocol (CDP) debugging enabled,
|
||||
prints the CDP URL, and keeps the browser running until you press 'q'.
|
||||
|
||||
The CDP URL can be used for various automation and debugging tasks.
|
||||
|
||||
Examples:
|
||||
# Launch Chromium with CDP on default port 9222
|
||||
crwl cdp
|
||||
|
||||
# Use a specific directory for browser data and custom port
|
||||
crwl cdp --user-data-dir ~/browser-data --port 9223
|
||||
|
||||
# Launch in headless mode
|
||||
crwl cdp --headless
|
||||
|
||||
# Launch in incognito mode (ignores user-data-dir)
|
||||
crwl cdp --incognito
|
||||
"""
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
try:
|
||||
# Handle data directory
|
||||
data_dir = None
|
||||
if not incognito and user_data_dir:
|
||||
# Expand user path (~/something)
|
||||
expanded_path = os.path.expanduser(user_data_dir)
|
||||
|
||||
# Create directory if it doesn't exist
|
||||
if not os.path.exists(expanded_path):
|
||||
console.print(f"[yellow]Directory '{expanded_path}' doesn't exist. Creating it.[/yellow]")
|
||||
os.makedirs(expanded_path, exist_ok=True)
|
||||
|
||||
data_dir = expanded_path
|
||||
|
||||
# Print launch info
|
||||
console.print(Panel(
|
||||
f"[cyan]Launching browser with CDP debugging[/cyan]\n\n"
|
||||
f"Browser type: [green]{browser_type}[/green]\n"
|
||||
f"Debugging port: [yellow]{port}[/yellow]\n"
|
||||
f"User data directory: [cyan]{data_dir or 'Temporary directory'}[/cyan]\n"
|
||||
f"Headless: [cyan]{'Yes' if headless else 'No'}[/cyan]\n"
|
||||
f"Incognito: [cyan]{'Yes' if incognito else 'No'}[/cyan]\n\n"
|
||||
f"[yellow]Press 'q' to quit when done[/yellow]",
|
||||
title="CDP Browser",
|
||||
border_style="cyan"
|
||||
))
|
||||
|
||||
# Run the browser
|
||||
cdp_url = anyio.run(
|
||||
profiler.launch_standalone_browser,
|
||||
browser_type,
|
||||
data_dir,
|
||||
port,
|
||||
headless
|
||||
)
|
||||
|
||||
if not cdp_url:
|
||||
console.print("[red]Failed to launch browser or get CDP URL[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error launching CDP browser: {str(e)}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command("crawl")
|
||||
@click.argument("url", required=True)
|
||||
@click.option("--browser-config", "-B", type=click.Path(exists=True), help="Browser config file (YAML/JSON)")
|
||||
@click.option("--crawler-config", "-C", type=click.Path(exists=True), help="Crawler config file (YAML/JSON)")
|
||||
@click.option("--filter-config", "-f", type=click.Path(exists=True), help="Content filter config file")
|
||||
@click.option("--extraction-config", "-e", type=click.Path(exists=True), help="Extraction strategy config file")
|
||||
@click.option("--json-extract", "-j", is_flag=False, flag_value="", default=None, help="Extract structured data using LLM with optional description")
|
||||
@click.option("--schema", "-s", type=click.Path(exists=True), help="JSON schema for extraction")
|
||||
@click.option("--browser", "-b", type=str, callback=parse_key_values, help="Browser parameters as key1=value1,key2=value2")
|
||||
@click.option("--crawler", "-c", type=str, callback=parse_key_values, help="Crawler parameters as key1=value1,key2=value2")
|
||||
@click.option("--output", "-o", type=click.Choice(["all", "json", "markdown", "md", "markdown-fit", "md-fit"]), default="all")
|
||||
@click.option("--bypass-cache", is_flag=True, default=True, help="Bypass cache when crawling")
|
||||
@click.option("--output-file", "-O", type=click.Path(), help="Output file path (default: stdout)")
|
||||
@click.option("--bypass-cache", "-b", is_flag=True, default=True, help="Bypass cache when crawling")
|
||||
@click.option("--question", "-q", help="Ask a question about the crawled content")
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
@click.option("--profile", "-p", help="Use a specific browser profile (by name)")
|
||||
def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config: str,
|
||||
extraction_config: str, schema: str, browser: Dict, crawler: Dict,
|
||||
output: str, bypass_cache: bool, question: str, verbose: bool, profile: str):
|
||||
extraction_config: str, json_extract: str, schema: str, browser: Dict, crawler: Dict,
|
||||
output: str, output_file: str, bypass_cache: bool, question: str, verbose: bool, profile: str):
|
||||
"""Crawl a website and extract content
|
||||
|
||||
Simple Usage:
|
||||
@@ -617,21 +1060,65 @@ def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config:
|
||||
crawler_cfg = crawler_cfg.clone(**crawler)
|
||||
|
||||
# Handle content filter config
|
||||
if filter_config:
|
||||
filter_conf = load_config_file(filter_config)
|
||||
if filter_config or output in ["markdown-fit", "md-fit"]:
|
||||
if filter_config:
|
||||
filter_conf = load_config_file(filter_config)
|
||||
elif not filter_config and output in ["markdown-fit", "md-fit"]:
|
||||
filter_conf = {
|
||||
"type": "pruning",
|
||||
"query": "",
|
||||
"threshold": 0.48
|
||||
}
|
||||
if filter_conf["type"] == "bm25":
|
||||
crawler_cfg.content_filter = BM25ContentFilter(
|
||||
user_query=filter_conf.get("query"),
|
||||
bm25_threshold=filter_conf.get("threshold", 1.0)
|
||||
crawler_cfg.markdown_generator = DefaultMarkdownGenerator(
|
||||
content_filter = BM25ContentFilter(
|
||||
user_query=filter_conf.get("query"),
|
||||
bm25_threshold=filter_conf.get("threshold", 1.0)
|
||||
)
|
||||
)
|
||||
elif filter_conf["type"] == "pruning":
|
||||
crawler_cfg.content_filter = PruningContentFilter(
|
||||
user_query=filter_conf.get("query"),
|
||||
threshold=filter_conf.get("threshold", 0.48)
|
||||
crawler_cfg.markdown_generator = DefaultMarkdownGenerator(
|
||||
content_filter = PruningContentFilter(
|
||||
user_query=filter_conf.get("query"),
|
||||
threshold=filter_conf.get("threshold", 0.48)
|
||||
)
|
||||
)
|
||||
|
||||
# Handle json-extract option (takes precedence over extraction-config)
|
||||
if json_extract is not None:
|
||||
# Get LLM provider and token
|
||||
provider, token = setup_llm_config()
|
||||
|
||||
# Default sophisticated instruction for structured data extraction
|
||||
default_instruction = """Analyze the web page content and extract structured data as JSON.
|
||||
If the page contains a list of items with repeated patterns, extract all items in an array.
|
||||
If the page is an article or contains unique content, extract a comprehensive JSON object with all relevant information.
|
||||
Look at the content, intention of content, what it offers and find the data item(s) in the page.
|
||||
Always return valid, properly formatted JSON."""
|
||||
|
||||
|
||||
default_instruction_with_user_query = """Analyze the web page content and extract structured data as JSON, following the below instruction and explanation of schema and always return valid, properly formatted JSON. \n\nInstruction:\n\n""" + json_extract
|
||||
|
||||
# Determine instruction based on whether json_extract is empty or has content
|
||||
instruction = default_instruction_with_user_query if json_extract else default_instruction
|
||||
|
||||
# Create LLM extraction strategy
|
||||
crawler_cfg.extraction_strategy = LLMExtractionStrategy(
|
||||
llm_config=LLMConfig(provider=provider, api_token=token),
|
||||
instruction=instruction,
|
||||
schema=load_schema_file(schema), # Will be None if no schema is provided
|
||||
extraction_type="schema", #if schema else "block",
|
||||
apply_chunking=False,
|
||||
force_json_response=True,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Set output to JSON if not explicitly specified
|
||||
if output == "all":
|
||||
output = "json"
|
||||
|
||||
# Handle extraction strategy
|
||||
if extraction_config:
|
||||
# Handle extraction strategy from config file (only if json-extract wasn't used)
|
||||
elif extraction_config:
|
||||
extract_conf = load_config_file(extraction_config)
|
||||
schema_data = load_schema_file(schema)
|
||||
|
||||
@@ -647,7 +1134,7 @@ def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config:
|
||||
raise click.ClickException("LLM provider and API token are required for LLM extraction")
|
||||
|
||||
crawler_cfg.extraction_strategy = LLMExtractionStrategy(
|
||||
llmConfig=LlmConfig(provider=extract_conf["provider"], api_token=extract_conf["api_token"]),
|
||||
llm_config=LLMConfig(provider=extract_conf["provider"], api_token=extract_conf["api_token"]),
|
||||
instruction=extract_conf["instruction"],
|
||||
schema=schema_data,
|
||||
**extract_conf.get("params", {})
|
||||
@@ -665,6 +1152,13 @@ def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config:
|
||||
# No cache
|
||||
if bypass_cache:
|
||||
crawler_cfg.cache_mode = CacheMode.BYPASS
|
||||
|
||||
crawler_cfg.scraping_strategy = LXMLWebScrapingStrategy()
|
||||
|
||||
config = get_global_config()
|
||||
|
||||
browser_cfg.verbose = config.get("VERBOSE", False)
|
||||
crawler_cfg.verbose = config.get("VERBOSE", False)
|
||||
|
||||
# Run crawler
|
||||
result : CrawlResult = anyio.run(
|
||||
@@ -683,14 +1177,31 @@ def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config:
|
||||
return
|
||||
|
||||
# Handle output
|
||||
if output == "all":
|
||||
click.echo(json.dumps(result.model_dump(), indent=2))
|
||||
elif output == "json":
|
||||
click.echo(json.dumps(json.loads(result.extracted_content), indent=2))
|
||||
elif output in ["markdown", "md"]:
|
||||
click.echo(result.markdown.raw_markdown)
|
||||
elif output in ["markdown-fit", "md-fit"]:
|
||||
click.echo(result.markdown.fit_markdown)
|
||||
if not output_file:
|
||||
if output == "all":
|
||||
click.echo(json.dumps(result.model_dump(), indent=2))
|
||||
elif output == "json":
|
||||
print(result.extracted_content)
|
||||
extracted_items = json.loads(result.extracted_content)
|
||||
click.echo(json.dumps(extracted_items, indent=2))
|
||||
|
||||
elif output in ["markdown", "md"]:
|
||||
click.echo(result.markdown.raw_markdown)
|
||||
elif output in ["markdown-fit", "md-fit"]:
|
||||
click.echo(result.markdown.fit_markdown)
|
||||
else:
|
||||
if output == "all":
|
||||
with open(output_file, "w") as f:
|
||||
f.write(json.dumps(result.model_dump(), indent=2))
|
||||
elif output == "json":
|
||||
with open(output_file, "w") as f:
|
||||
f.write(result.extracted_content)
|
||||
elif output in ["markdown", "md"]:
|
||||
with open(output_file, "w") as f:
|
||||
f.write(result.markdown.raw_markdown)
|
||||
elif output in ["markdown-fit", "md-fit"]:
|
||||
with open(output_file, "w") as f:
|
||||
f.write(result.markdown.fit_markdown)
|
||||
|
||||
except Exception as e:
|
||||
raise click.ClickException(str(e))
|
||||
@@ -700,6 +1211,120 @@ def examples_cmd():
|
||||
"""Show usage examples"""
|
||||
show_examples()
|
||||
|
||||
@cli.group("config")
|
||||
def config_cmd():
|
||||
"""Manage global configuration settings
|
||||
|
||||
Commands to view and update global configuration settings:
|
||||
- list: Display all current configuration settings
|
||||
- get: Get the value of a specific setting
|
||||
- set: Set the value of a specific setting
|
||||
"""
|
||||
pass
|
||||
|
||||
@config_cmd.command("list")
|
||||
def config_list_cmd():
|
||||
"""List all configuration settings"""
|
||||
config = get_global_config()
|
||||
|
||||
table = Table(title="Crawl4AI Configuration", show_header=True, header_style="bold cyan", border_style="blue")
|
||||
table.add_column("Setting", style="cyan")
|
||||
table.add_column("Value", style="green")
|
||||
table.add_column("Default", style="yellow")
|
||||
table.add_column("Description", style="white")
|
||||
|
||||
for key, setting in USER_SETTINGS.items():
|
||||
value = config.get(key, setting["default"])
|
||||
|
||||
# Handle secret values
|
||||
display_value = value
|
||||
if setting.get("secret", False) and value:
|
||||
display_value = "********"
|
||||
|
||||
# Handle boolean values
|
||||
if setting["type"] == "boolean":
|
||||
display_value = str(value).lower()
|
||||
default_value = str(setting["default"]).lower()
|
||||
else:
|
||||
default_value = str(setting["default"])
|
||||
|
||||
table.add_row(
|
||||
key,
|
||||
str(display_value),
|
||||
default_value,
|
||||
setting["description"]
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
@config_cmd.command("get")
|
||||
@click.argument("key", required=True)
|
||||
def config_get_cmd(key: str):
|
||||
"""Get a specific configuration setting"""
|
||||
config = get_global_config()
|
||||
|
||||
# Normalize key to uppercase
|
||||
key = key.upper()
|
||||
|
||||
if key not in USER_SETTINGS:
|
||||
console.print(f"[red]Error: Unknown setting '{key}'[/red]")
|
||||
return
|
||||
|
||||
value = config.get(key, USER_SETTINGS[key]["default"])
|
||||
|
||||
# Handle secret values
|
||||
display_value = value
|
||||
if USER_SETTINGS[key].get("secret", False) and value:
|
||||
display_value = "********"
|
||||
|
||||
console.print(f"[cyan]{key}[/cyan] = [green]{display_value}[/green]")
|
||||
console.print(f"[dim]Description: {USER_SETTINGS[key]['description']}[/dim]")
|
||||
|
||||
@config_cmd.command("set")
|
||||
@click.argument("key", required=True)
|
||||
@click.argument("value", required=True)
|
||||
def config_set_cmd(key: str, value: str):
|
||||
"""Set a configuration setting"""
|
||||
config = get_global_config()
|
||||
|
||||
# Normalize key to uppercase
|
||||
key = key.upper()
|
||||
|
||||
if key not in USER_SETTINGS:
|
||||
console.print(f"[red]Error: Unknown setting '{key}'[/red]")
|
||||
console.print(f"[yellow]Available settings: {', '.join(USER_SETTINGS.keys())}[/yellow]")
|
||||
return
|
||||
|
||||
setting = USER_SETTINGS[key]
|
||||
|
||||
# Type conversion and validation
|
||||
if setting["type"] == "boolean":
|
||||
if value.lower() in ["true", "yes", "1", "y"]:
|
||||
typed_value = True
|
||||
elif value.lower() in ["false", "no", "0", "n"]:
|
||||
typed_value = False
|
||||
else:
|
||||
console.print(f"[red]Error: Invalid boolean value. Use 'true' or 'false'.[/red]")
|
||||
return
|
||||
elif setting["type"] == "string":
|
||||
typed_value = value
|
||||
|
||||
# Check if the value should be one of the allowed options
|
||||
if "options" in setting and value not in setting["options"]:
|
||||
console.print(f"[red]Error: Value must be one of: {', '.join(setting['options'])}[/red]")
|
||||
return
|
||||
|
||||
# Update config
|
||||
config[key] = typed_value
|
||||
save_global_config(config)
|
||||
|
||||
# Handle secret values for display
|
||||
display_value = typed_value
|
||||
if setting.get("secret", False) and typed_value:
|
||||
display_value = "********"
|
||||
|
||||
console.print(f"[green]Successfully set[/green] [cyan]{key}[/cyan] = [green]{display_value}[/green]")
|
||||
|
||||
@cli.command("profiles")
|
||||
def profiles_cmd():
|
||||
"""Manage browser profiles interactively
|
||||
@@ -719,6 +1344,7 @@ def profiles_cmd():
|
||||
@click.option("--crawler-config", "-C", type=click.Path(exists=True), help="Crawler config file (YAML/JSON)")
|
||||
@click.option("--filter-config", "-f", type=click.Path(exists=True), help="Content filter config file")
|
||||
@click.option("--extraction-config", "-e", type=click.Path(exists=True), help="Extraction strategy config file")
|
||||
@click.option("--json-extract", "-j", is_flag=False, flag_value="", default=None, help="Extract structured data using LLM with optional description")
|
||||
@click.option("--schema", "-s", type=click.Path(exists=True), help="JSON schema for extraction")
|
||||
@click.option("--browser", "-b", type=str, callback=parse_key_values, help="Browser parameters as key1=value1,key2=value2")
|
||||
@click.option("--crawler", "-c", type=str, callback=parse_key_values, help="Crawler parameters as key1=value1,key2=value2")
|
||||
@@ -728,7 +1354,7 @@ def profiles_cmd():
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
@click.option("--profile", "-p", help="Use a specific browser profile (by name)")
|
||||
def default(url: str, example: bool, browser_config: str, crawler_config: str, filter_config: str,
|
||||
extraction_config: str, schema: str, browser: Dict, crawler: Dict,
|
||||
extraction_config: str, json_extract: str, schema: str, browser: Dict, crawler: Dict,
|
||||
output: str, bypass_cache: bool, question: str, verbose: bool, profile: str):
|
||||
"""Crawl4AI CLI - Web content extraction tool
|
||||
|
||||
@@ -740,7 +1366,16 @@ def default(url: str, example: bool, browser_config: str, crawler_config: str, f
|
||||
Other commands:
|
||||
crwl profiles - Manage browser profiles for identity-based crawling
|
||||
crwl crawl - Crawl a website with advanced options
|
||||
crwl cdp - Launch browser with CDP debugging enabled
|
||||
crwl browser - Manage builtin browser (start, stop, status, restart)
|
||||
crwl config - Manage global configuration settings
|
||||
crwl examples - Show more usage examples
|
||||
|
||||
Configuration Examples:
|
||||
crwl config list - List all configuration settings
|
||||
crwl config get DEFAULT_LLM_PROVIDER - Show current LLM provider
|
||||
crwl config set VERBOSE true - Enable verbose mode globally
|
||||
crwl config set BROWSER_HEADLESS false - Default to visible browser
|
||||
"""
|
||||
|
||||
if example:
|
||||
@@ -761,7 +1396,8 @@ def default(url: str, example: bool, browser_config: str, crawler_config: str, f
|
||||
browser_config=browser_config,
|
||||
crawler_config=crawler_config,
|
||||
filter_config=filter_config,
|
||||
extraction_config=extraction_config,
|
||||
extraction_config=extraction_config,
|
||||
json_extract=json_extract,
|
||||
schema=schema,
|
||||
browser=browser,
|
||||
crawler=crawler,
|
||||
|
||||
837
crawl4ai/components/crawler_monitor.py
Normal file
837
crawl4ai/components/crawler_monitor.py
Normal file
@@ -0,0 +1,837 @@
|
||||
import time
|
||||
import uuid
|
||||
import threading
|
||||
import psutil
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Optional, List
|
||||
import threading
|
||||
from rich.console import Console
|
||||
from rich.layout import Layout
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
from rich.live import Live
|
||||
from rich import box
|
||||
from ..models import CrawlStatus
|
||||
|
||||
class TerminalUI:
|
||||
"""Terminal user interface for CrawlerMonitor using rich library."""
|
||||
|
||||
def __init__(self, refresh_rate: float = 1.0, max_width: int = 120):
|
||||
"""
|
||||
Initialize the terminal UI.
|
||||
|
||||
Args:
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
self.console = Console(width=max_width)
|
||||
self.layout = Layout()
|
||||
self.refresh_rate = refresh_rate
|
||||
self.stop_event = threading.Event()
|
||||
self.ui_thread = None
|
||||
self.monitor = None # Will be set by CrawlerMonitor
|
||||
self.max_width = max_width
|
||||
|
||||
# Setup layout - vertical layout (top to bottom)
|
||||
self.layout.split(
|
||||
Layout(name="header", size=3),
|
||||
Layout(name="pipeline_status", size=10),
|
||||
Layout(name="task_details", ratio=1),
|
||||
Layout(name="footer", size=3) # Increased footer size to fit all content
|
||||
)
|
||||
|
||||
def start(self, monitor):
|
||||
"""Start the UI thread."""
|
||||
self.monitor = monitor
|
||||
self.stop_event.clear()
|
||||
self.ui_thread = threading.Thread(target=self._ui_loop)
|
||||
self.ui_thread.daemon = True
|
||||
self.ui_thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the UI thread."""
|
||||
if self.ui_thread and self.ui_thread.is_alive():
|
||||
self.stop_event.set()
|
||||
# Only try to join if we're not in the UI thread
|
||||
# This prevents "cannot join current thread" errors
|
||||
if threading.current_thread() != self.ui_thread:
|
||||
self.ui_thread.join(timeout=5.0)
|
||||
|
||||
def _ui_loop(self):
|
||||
"""Main UI rendering loop."""
|
||||
import sys
|
||||
import select
|
||||
import termios
|
||||
import tty
|
||||
|
||||
# Setup terminal for non-blocking input
|
||||
old_settings = termios.tcgetattr(sys.stdin)
|
||||
try:
|
||||
tty.setcbreak(sys.stdin.fileno())
|
||||
|
||||
# Use Live display to render the UI
|
||||
with Live(self.layout, refresh_per_second=1/self.refresh_rate, screen=True) as live:
|
||||
self.live = live # Store the live display for updates
|
||||
|
||||
# Main UI loop
|
||||
while not self.stop_event.is_set():
|
||||
self._update_display()
|
||||
|
||||
# Check for key press (non-blocking)
|
||||
if select.select([sys.stdin], [], [], 0)[0]:
|
||||
key = sys.stdin.read(1)
|
||||
# Check for 'q' to quit
|
||||
if key == 'q':
|
||||
# Signal stop but don't call monitor.stop() from UI thread
|
||||
# as it would cause the thread to try to join itself
|
||||
self.stop_event.set()
|
||||
self.monitor.is_running = False
|
||||
break
|
||||
|
||||
time.sleep(self.refresh_rate)
|
||||
|
||||
# Just check if the monitor was stopped
|
||||
if not self.monitor.is_running:
|
||||
break
|
||||
finally:
|
||||
# Restore terminal settings
|
||||
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
|
||||
|
||||
def _update_display(self):
|
||||
"""Update the terminal display with current statistics."""
|
||||
if not self.monitor:
|
||||
return
|
||||
|
||||
# Update crawler status panel
|
||||
self.layout["header"].update(self._create_status_panel())
|
||||
|
||||
# Update pipeline status panel and task details panel
|
||||
self.layout["pipeline_status"].update(self._create_pipeline_panel())
|
||||
self.layout["task_details"].update(self._create_task_details_panel())
|
||||
|
||||
# Update footer
|
||||
self.layout["footer"].update(self._create_footer())
|
||||
|
||||
def _create_status_panel(self) -> Panel:
|
||||
"""Create the crawler status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
|
||||
# Format memory status with icon
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Get current memory usage
|
||||
current_memory = psutil.Process().memory_info().rss / (1024 * 1024) # MB
|
||||
memory_percent = (current_memory / psutil.virtual_memory().total) * 100
|
||||
|
||||
# Format runtime
|
||||
runtime = self.monitor._format_time(time.time() - self.monitor.start_time if self.monitor.start_time else 0)
|
||||
|
||||
# Create the status text
|
||||
status_text = Text()
|
||||
status_text.append(f"Web Crawler Dashboard | Runtime: {runtime} | Memory: {memory_percent:.1f}% {memory_icon}\n")
|
||||
status_text.append(f"Status: {memory_status} | URLs: {summary['urls_completed']}/{summary['urls_total']} | ")
|
||||
status_text.append(f"Peak Mem: {summary['peak_memory_percent']:.1f}% at {self.monitor._format_time(summary['peak_memory_time'])}")
|
||||
|
||||
return Panel(status_text, title="Crawler Status", border_style="blue")
|
||||
|
||||
def _create_pipeline_panel(self) -> Panel:
|
||||
"""Create the pipeline status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
queue_stats = self.monitor.get_queue_stats()
|
||||
|
||||
# Create a table for status counts
|
||||
table = Table(show_header=True, box=None)
|
||||
table.add_column("Status", style="cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
table.add_column("Stat", style="cyan")
|
||||
table.add_column("Value", justify="right")
|
||||
|
||||
# Calculate overall progress
|
||||
progress = f"{summary['urls_completed']}/{summary['urls_total']}"
|
||||
progress_percent = f"{summary['completion_percentage']:.1f}%"
|
||||
|
||||
# Add rows for each status
|
||||
table.add_row(
|
||||
"Overall Progress",
|
||||
progress,
|
||||
progress_percent,
|
||||
"Est. Completion",
|
||||
summary.get('estimated_completion_time', "N/A")
|
||||
)
|
||||
|
||||
# Add rows for each status
|
||||
status_counts = summary['status_counts']
|
||||
total = summary['urls_total'] or 1 # Avoid division by zero
|
||||
|
||||
# Status rows
|
||||
table.add_row(
|
||||
"Completed",
|
||||
str(status_counts.get(CrawlStatus.COMPLETED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.COMPLETED.name, 0) / total * 100:.1f}%",
|
||||
"Avg. Time/URL",
|
||||
f"{summary.get('avg_task_duration', 0):.2f}s"
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Failed",
|
||||
str(status_counts.get(CrawlStatus.FAILED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.FAILED.name, 0) / total * 100:.1f}%",
|
||||
"Concurrent Tasks",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0))
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"In Progress",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.IN_PROGRESS.name, 0) / total * 100:.1f}%",
|
||||
"Queue Size",
|
||||
str(queue_stats['total_queued'])
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Queued",
|
||||
str(status_counts.get(CrawlStatus.QUEUED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.QUEUED.name, 0) / total * 100:.1f}%",
|
||||
"Max Wait Time",
|
||||
f"{queue_stats['highest_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Requeued is a special case as it's not a status
|
||||
requeued_count = summary.get('requeued_count', 0)
|
||||
table.add_row(
|
||||
"Requeued",
|
||||
str(requeued_count),
|
||||
f"{summary.get('requeue_rate', 0):.1f}%",
|
||||
"Avg Wait Time",
|
||||
f"{queue_stats['avg_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Add empty row for spacing
|
||||
table.add_row(
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"Requeue Rate",
|
||||
f"{summary.get('requeue_rate', 0):.1f}%"
|
||||
)
|
||||
|
||||
return Panel(table, title="Pipeline Status", border_style="green")
|
||||
|
||||
def _create_task_details_panel(self) -> Panel:
|
||||
"""Create the task details panel."""
|
||||
# Create a table for task details
|
||||
table = Table(show_header=True, expand=True)
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True, width=10)
|
||||
table.add_column("URL", style="blue", ratio=3)
|
||||
table.add_column("Status", style="green", width=15)
|
||||
table.add_column("Memory", justify="right", width=8)
|
||||
table.add_column("Peak", justify="right", width=8)
|
||||
table.add_column("Duration", justify="right", width=10)
|
||||
|
||||
# Get all task stats
|
||||
task_stats = self.monitor.get_all_task_stats()
|
||||
|
||||
# Add summary row
|
||||
active_tasks = sum(1 for stats in task_stats.values()
|
||||
if stats['status'] == CrawlStatus.IN_PROGRESS.name)
|
||||
|
||||
total_memory = sum(stats['memory_usage'] for stats in task_stats.values())
|
||||
total_peak = sum(stats['peak_memory'] for stats in task_stats.values())
|
||||
|
||||
# Summary row with separators
|
||||
table.add_row(
|
||||
"SUMMARY",
|
||||
f"Total: {len(task_stats)}",
|
||||
f"Active: {active_tasks}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{total_peak:.1f}",
|
||||
"N/A"
|
||||
)
|
||||
|
||||
# Add a separator
|
||||
table.add_row("—" * 10, "—" * 20, "—" * 10, "—" * 8, "—" * 8, "—" * 10)
|
||||
|
||||
# Status icons
|
||||
status_icons = {
|
||||
CrawlStatus.QUEUED.name: "⏳",
|
||||
CrawlStatus.IN_PROGRESS.name: "🔄",
|
||||
CrawlStatus.COMPLETED.name: "✅",
|
||||
CrawlStatus.FAILED.name: "❌"
|
||||
}
|
||||
|
||||
# Calculate how many rows we can display based on available space
|
||||
# We can display more rows now that we have a dedicated panel
|
||||
display_count = min(len(task_stats), 20) # Display up to 20 tasks
|
||||
|
||||
# Add rows for each task
|
||||
for task_id, stats in sorted(
|
||||
list(task_stats.items())[:display_count],
|
||||
# Sort: 1. IN_PROGRESS first, 2. QUEUED, 3. COMPLETED/FAILED by recency
|
||||
key=lambda x: (
|
||||
0 if x[1]['status'] == CrawlStatus.IN_PROGRESS.name else
|
||||
1 if x[1]['status'] == CrawlStatus.QUEUED.name else
|
||||
2,
|
||||
-1 * (x[1].get('end_time', 0) or 0) # Most recent first
|
||||
)
|
||||
):
|
||||
# Truncate task_id and URL for display
|
||||
short_id = task_id[:8]
|
||||
url = stats['url']
|
||||
if len(url) > 50: # Allow longer URLs in the dedicated panel
|
||||
url = url[:47] + "..."
|
||||
|
||||
# Format status with icon
|
||||
status = f"{status_icons.get(stats['status'], '?')} {stats['status']}"
|
||||
|
||||
# Add row
|
||||
table.add_row(
|
||||
short_id,
|
||||
url,
|
||||
status,
|
||||
f"{stats['memory_usage']:.1f}",
|
||||
f"{stats['peak_memory']:.1f}",
|
||||
stats['duration'] if 'duration' in stats else "0:00"
|
||||
)
|
||||
|
||||
return Panel(table, title="Task Details", border_style="yellow")
|
||||
|
||||
def _create_footer(self) -> Panel:
|
||||
"""Create the footer panel."""
|
||||
from rich.columns import Columns
|
||||
from rich.align import Align
|
||||
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Left section - memory status
|
||||
left_text = Text()
|
||||
left_text.append("Memory Status: ", style="bold")
|
||||
status_style = "green" if memory_status == "NORMAL" else "yellow" if memory_status == "PRESSURE" else "red bold"
|
||||
left_text.append(f"{memory_icon} {memory_status}", style=status_style)
|
||||
|
||||
# Center section - copyright
|
||||
center_text = Text("© Crawl4AI 2025 | Made by UnclecCode", style="cyan italic")
|
||||
|
||||
# Right section - quit instruction
|
||||
right_text = Text()
|
||||
right_text.append("Press ", style="bold")
|
||||
right_text.append("q", style="white on blue")
|
||||
right_text.append(" to quit", style="bold")
|
||||
|
||||
# Create columns with the three sections
|
||||
footer_content = Columns(
|
||||
[
|
||||
Align.left(left_text),
|
||||
Align.center(center_text),
|
||||
Align.right(right_text)
|
||||
],
|
||||
expand=True
|
||||
)
|
||||
|
||||
# Create a more visible footer panel
|
||||
return Panel(
|
||||
footer_content,
|
||||
border_style="white",
|
||||
padding=(0, 1) # Add padding for better visibility
|
||||
)
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
"""
|
||||
Comprehensive monitoring and visualization system for tracking web crawler operations in real-time.
|
||||
Provides a terminal-based dashboard that displays task statuses, memory usage, queue statistics,
|
||||
and performance metrics.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
urls_total: int = 0,
|
||||
refresh_rate: float = 1.0,
|
||||
enable_ui: bool = True,
|
||||
max_width: int = 120
|
||||
):
|
||||
"""
|
||||
Initialize the CrawlerMonitor.
|
||||
|
||||
Args:
|
||||
urls_total: Total number of URLs to be crawled
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
enable_ui: Whether to display the terminal UI
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
# Core monitoring attributes
|
||||
self.stats = {} # Task ID -> stats dict
|
||||
self.memory_status = "NORMAL"
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.is_running = False
|
||||
self.queue_stats = {
|
||||
"total_queued": 0,
|
||||
"highest_wait_time": 0.0,
|
||||
"avg_wait_time": 0.0
|
||||
}
|
||||
self.urls_total = urls_total
|
||||
self.urls_completed = 0
|
||||
self.peak_memory_percent = 0.0
|
||||
self.peak_memory_time = 0.0
|
||||
|
||||
# Status counts
|
||||
self.status_counts = {
|
||||
CrawlStatus.QUEUED.name: 0,
|
||||
CrawlStatus.IN_PROGRESS.name: 0,
|
||||
CrawlStatus.COMPLETED.name: 0,
|
||||
CrawlStatus.FAILED.name: 0
|
||||
}
|
||||
|
||||
# Requeue tracking
|
||||
self.requeued_count = 0
|
||||
|
||||
# Thread-safety
|
||||
self._lock = threading.RLock()
|
||||
|
||||
# Terminal UI
|
||||
self.enable_ui = enable_ui
|
||||
self.terminal_ui = TerminalUI(
|
||||
refresh_rate=refresh_rate,
|
||||
max_width=max_width
|
||||
) if enable_ui else None
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start the monitoring session.
|
||||
|
||||
- Initializes the start_time
|
||||
- Sets is_running to True
|
||||
- Starts the terminal UI if enabled
|
||||
"""
|
||||
with self._lock:
|
||||
self.start_time = time.time()
|
||||
self.is_running = True
|
||||
|
||||
# Start the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.start(self)
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the monitoring session.
|
||||
|
||||
- Records end_time
|
||||
- Sets is_running to False
|
||||
- Stops the terminal UI
|
||||
- Generates final summary statistics
|
||||
"""
|
||||
with self._lock:
|
||||
self.end_time = time.time()
|
||||
self.is_running = False
|
||||
|
||||
# Stop the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
"""
|
||||
Register a new task with the monitor.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
url: URL being crawled
|
||||
|
||||
The task is initialized with:
|
||||
- status: QUEUED
|
||||
- url: The URL to crawl
|
||||
- enqueue_time: Current time
|
||||
- memory_usage: 0
|
||||
- peak_memory: 0
|
||||
- wait_time: 0
|
||||
- retry_count: 0
|
||||
"""
|
||||
with self._lock:
|
||||
self.stats[task_id] = {
|
||||
"task_id": task_id,
|
||||
"url": url,
|
||||
"status": CrawlStatus.QUEUED.name,
|
||||
"enqueue_time": time.time(),
|
||||
"start_time": None,
|
||||
"end_time": None,
|
||||
"memory_usage": 0.0,
|
||||
"peak_memory": 0.0,
|
||||
"error_message": "",
|
||||
"wait_time": 0.0,
|
||||
"retry_count": 0,
|
||||
"duration": "0:00",
|
||||
"counted_requeue": False
|
||||
}
|
||||
|
||||
# Update status counts
|
||||
self.status_counts[CrawlStatus.QUEUED.name] += 1
|
||||
|
||||
def update_task(
|
||||
self,
|
||||
task_id: str,
|
||||
status: Optional[CrawlStatus] = None,
|
||||
start_time: Optional[float] = None,
|
||||
end_time: Optional[float] = None,
|
||||
memory_usage: Optional[float] = None,
|
||||
peak_memory: Optional[float] = None,
|
||||
error_message: Optional[str] = None,
|
||||
retry_count: Optional[int] = None,
|
||||
wait_time: Optional[float] = None
|
||||
):
|
||||
"""
|
||||
Update statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
status: New status (QUEUED, IN_PROGRESS, COMPLETED, FAILED)
|
||||
start_time: When task execution started
|
||||
end_time: When task execution ended
|
||||
memory_usage: Current memory usage in MB
|
||||
peak_memory: Maximum memory usage in MB
|
||||
error_message: Error description if failed
|
||||
retry_count: Number of retry attempts
|
||||
wait_time: Time spent in queue
|
||||
|
||||
Updates task statistics and updates status counts.
|
||||
If status changes, decrements old status count and
|
||||
increments new status count.
|
||||
"""
|
||||
with self._lock:
|
||||
# Check if task exists
|
||||
if task_id not in self.stats:
|
||||
return
|
||||
|
||||
task_stats = self.stats[task_id]
|
||||
|
||||
# Update status counts if status is changing
|
||||
old_status = task_stats["status"]
|
||||
if status and status.name != old_status:
|
||||
self.status_counts[old_status] -= 1
|
||||
self.status_counts[status.name] += 1
|
||||
|
||||
# Track completion
|
||||
if status == CrawlStatus.COMPLETED:
|
||||
self.urls_completed += 1
|
||||
|
||||
# Track requeues
|
||||
if old_status in [CrawlStatus.COMPLETED.name, CrawlStatus.FAILED.name] and not task_stats.get("counted_requeue", False):
|
||||
self.requeued_count += 1
|
||||
task_stats["counted_requeue"] = True
|
||||
|
||||
# Update task statistics
|
||||
if status:
|
||||
task_stats["status"] = status.name
|
||||
if start_time is not None:
|
||||
task_stats["start_time"] = start_time
|
||||
if end_time is not None:
|
||||
task_stats["end_time"] = end_time
|
||||
if memory_usage is not None:
|
||||
task_stats["memory_usage"] = memory_usage
|
||||
|
||||
# Update peak memory if necessary
|
||||
current_percent = (memory_usage / psutil.virtual_memory().total) * 100
|
||||
if current_percent > self.peak_memory_percent:
|
||||
self.peak_memory_percent = current_percent
|
||||
self.peak_memory_time = time.time()
|
||||
|
||||
if peak_memory is not None:
|
||||
task_stats["peak_memory"] = peak_memory
|
||||
if error_message is not None:
|
||||
task_stats["error_message"] = error_message
|
||||
if retry_count is not None:
|
||||
task_stats["retry_count"] = retry_count
|
||||
if wait_time is not None:
|
||||
task_stats["wait_time"] = wait_time
|
||||
|
||||
# Calculate duration
|
||||
if task_stats["start_time"]:
|
||||
end = task_stats["end_time"] or time.time()
|
||||
duration = end - task_stats["start_time"]
|
||||
task_stats["duration"] = self._format_time(duration)
|
||||
|
||||
def update_memory_status(self, status: str):
|
||||
"""
|
||||
Update the current memory status.
|
||||
|
||||
Args:
|
||||
status: Memory status (NORMAL, PRESSURE, CRITICAL, or custom)
|
||||
|
||||
Also updates the UI to reflect the new status.
|
||||
"""
|
||||
with self._lock:
|
||||
self.memory_status = status
|
||||
|
||||
def update_queue_statistics(
|
||||
self,
|
||||
total_queued: int,
|
||||
highest_wait_time: float,
|
||||
avg_wait_time: float
|
||||
):
|
||||
"""
|
||||
Update statistics related to the task queue.
|
||||
|
||||
Args:
|
||||
total_queued: Number of tasks currently in queue
|
||||
highest_wait_time: Longest wait time of any queued task
|
||||
avg_wait_time: Average wait time across all queued tasks
|
||||
"""
|
||||
with self._lock:
|
||||
self.queue_stats = {
|
||||
"total_queued": total_queued,
|
||||
"highest_wait_time": highest_wait_time,
|
||||
"avg_wait_time": avg_wait_time
|
||||
}
|
||||
|
||||
def get_task_stats(self, task_id: str) -> Dict:
|
||||
"""
|
||||
Get statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
|
||||
Returns:
|
||||
Dictionary containing all task statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.get(task_id, {}).copy()
|
||||
|
||||
def get_all_task_stats(self) -> Dict[str, Dict]:
|
||||
"""
|
||||
Get statistics for all tasks.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping task_ids to their statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.copy()
|
||||
|
||||
def get_memory_status(self) -> str:
|
||||
"""
|
||||
Get the current memory status.
|
||||
|
||||
Returns:
|
||||
Current memory status string
|
||||
"""
|
||||
with self._lock:
|
||||
return self.memory_status
|
||||
|
||||
def get_queue_stats(self) -> Dict:
|
||||
"""
|
||||
Get current queue statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with queue statistics including:
|
||||
- total_queued: Number of tasks in queue
|
||||
- highest_wait_time: Longest wait time
|
||||
- avg_wait_time: Average wait time
|
||||
"""
|
||||
with self._lock:
|
||||
return self.queue_stats.copy()
|
||||
|
||||
def get_summary(self) -> Dict:
|
||||
"""
|
||||
Get a summary of all crawler statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- runtime: Total runtime in seconds
|
||||
- urls_total: Total URLs to process
|
||||
- urls_completed: Number of completed URLs
|
||||
- completion_percentage: Percentage complete
|
||||
- status_counts: Count of tasks in each status
|
||||
- memory_status: Current memory status
|
||||
- peak_memory_percent: Highest memory usage
|
||||
- peak_memory_time: When peak memory occurred
|
||||
- avg_task_duration: Average task processing time
|
||||
- estimated_completion_time: Projected finish time
|
||||
- requeue_rate: Percentage of tasks requeued
|
||||
"""
|
||||
with self._lock:
|
||||
# Calculate runtime
|
||||
current_time = time.time()
|
||||
runtime = current_time - (self.start_time or current_time)
|
||||
|
||||
# Calculate completion percentage
|
||||
completion_percentage = 0
|
||||
if self.urls_total > 0:
|
||||
completion_percentage = (self.urls_completed / self.urls_total) * 100
|
||||
|
||||
# Calculate average task duration for completed tasks
|
||||
completed_tasks = [
|
||||
task for task in self.stats.values()
|
||||
if task["status"] == CrawlStatus.COMPLETED.name and task.get("start_time") and task.get("end_time")
|
||||
]
|
||||
|
||||
avg_task_duration = 0
|
||||
if completed_tasks:
|
||||
total_duration = sum(task["end_time"] - task["start_time"] for task in completed_tasks)
|
||||
avg_task_duration = total_duration / len(completed_tasks)
|
||||
|
||||
# Calculate requeue rate
|
||||
requeue_rate = 0
|
||||
if len(self.stats) > 0:
|
||||
requeue_rate = (self.requeued_count / len(self.stats)) * 100
|
||||
|
||||
# Calculate estimated completion time
|
||||
estimated_completion_time = "N/A"
|
||||
if avg_task_duration > 0 and self.urls_total > 0 and self.urls_completed > 0:
|
||||
remaining_tasks = self.urls_total - self.urls_completed
|
||||
estimated_seconds = remaining_tasks * avg_task_duration
|
||||
estimated_completion_time = self._format_time(estimated_seconds)
|
||||
|
||||
return {
|
||||
"runtime": runtime,
|
||||
"urls_total": self.urls_total,
|
||||
"urls_completed": self.urls_completed,
|
||||
"completion_percentage": completion_percentage,
|
||||
"status_counts": self.status_counts.copy(),
|
||||
"memory_status": self.memory_status,
|
||||
"peak_memory_percent": self.peak_memory_percent,
|
||||
"peak_memory_time": self.peak_memory_time,
|
||||
"avg_task_duration": avg_task_duration,
|
||||
"estimated_completion_time": estimated_completion_time,
|
||||
"requeue_rate": requeue_rate,
|
||||
"requeued_count": self.requeued_count
|
||||
}
|
||||
|
||||
def render(self):
|
||||
"""
|
||||
Render the terminal UI.
|
||||
|
||||
This is the main UI rendering loop that:
|
||||
1. Updates all statistics
|
||||
2. Formats the display
|
||||
3. Renders the ASCII interface
|
||||
4. Handles keyboard input
|
||||
|
||||
Note: The actual rendering is handled by the TerminalUI class
|
||||
which uses the rich library's Live display.
|
||||
"""
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
# Force an update of the UI
|
||||
if hasattr(self.terminal_ui, '_update_display'):
|
||||
self.terminal_ui._update_display()
|
||||
|
||||
def _format_time(self, seconds: float) -> str:
|
||||
"""
|
||||
Format time in hours:minutes:seconds.
|
||||
|
||||
Args:
|
||||
seconds: Time in seconds
|
||||
|
||||
Returns:
|
||||
Formatted time string (e.g., "1:23:45")
|
||||
"""
|
||||
delta = timedelta(seconds=int(seconds))
|
||||
hours, remainder = divmod(delta.seconds, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
|
||||
if hours > 0:
|
||||
return f"{hours}:{minutes:02}:{seconds:02}"
|
||||
else:
|
||||
return f"{minutes}:{seconds:02}"
|
||||
|
||||
def _calculate_estimated_completion(self) -> str:
|
||||
"""
|
||||
Calculate estimated completion time based on current progress.
|
||||
|
||||
Returns:
|
||||
Formatted time string
|
||||
"""
|
||||
summary = self.get_summary()
|
||||
return summary.get("estimated_completion_time", "N/A")
|
||||
|
||||
|
||||
# Example code for testing
|
||||
if __name__ == "__main__":
|
||||
# Initialize the monitor
|
||||
monitor = CrawlerMonitor(urls_total=100)
|
||||
|
||||
# Start monitoring
|
||||
monitor.start()
|
||||
|
||||
try:
|
||||
# Simulate some tasks
|
||||
for i in range(20):
|
||||
task_id = str(uuid.uuid4())
|
||||
url = f"https://example.com/page{i}"
|
||||
monitor.add_task(task_id, url)
|
||||
|
||||
# Simulate 20% of tasks are already running
|
||||
if i < 4:
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=time.time() - 30, # Started 30 seconds ago
|
||||
memory_usage=10.5
|
||||
)
|
||||
|
||||
# Simulate 10% of tasks are completed
|
||||
if i >= 4 and i < 6:
|
||||
start_time = time.time() - 60
|
||||
end_time = time.time() - 15
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=8.2
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.COMPLETED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=15.7
|
||||
)
|
||||
|
||||
# Simulate 5% of tasks fail
|
||||
if i >= 6 and i < 7:
|
||||
start_time = time.time() - 45
|
||||
end_time = time.time() - 20
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=12.3
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=18.2,
|
||||
error_message="Connection timeout"
|
||||
)
|
||||
|
||||
# Simulate memory pressure
|
||||
monitor.update_memory_status("PRESSURE")
|
||||
|
||||
# Simulate queue statistics
|
||||
monitor.update_queue_statistics(
|
||||
total_queued=16, # 20 - 4 (in progress)
|
||||
highest_wait_time=120.5,
|
||||
avg_wait_time=60.2
|
||||
)
|
||||
|
||||
# Keep the monitor running for a demonstration
|
||||
print("Crawler Monitor is running. Press 'q' to exit.")
|
||||
while monitor.is_running:
|
||||
time.sleep(0.1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nExiting crawler monitor...")
|
||||
finally:
|
||||
# Stop the monitor
|
||||
monitor.stop()
|
||||
print("Crawler monitor exited successfully.")
|
||||
@@ -4,7 +4,8 @@ from dotenv import load_dotenv
|
||||
load_dotenv() # Load environment variables from .env file
|
||||
|
||||
# Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
DEFAULT_PROVIDER = "openai/gpt-4o-mini"
|
||||
DEFAULT_PROVIDER = "openai/gpt-4o"
|
||||
DEFAULT_PROVIDER_API_KEY = "OPENAI_API_KEY"
|
||||
MODEL_REPO_BRANCH = "new-release-0.0.2"
|
||||
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
PROVIDER_MODELS = {
|
||||
@@ -28,6 +29,14 @@ PROVIDER_MODELS = {
|
||||
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
PROVIDER_MODELS_PREFIXES = {
|
||||
"ollama": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq": os.getenv("GROQ_API_KEY"),
|
||||
"openai": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini": os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
|
||||
# Chunk token threshold
|
||||
CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens
|
||||
@@ -92,3 +101,46 @@ SHOW_DEPRECATION_WARNINGS = True
|
||||
SCREENSHOT_HEIGHT_TRESHOLD = 10000
|
||||
PAGE_TIMEOUT = 60000
|
||||
DOWNLOAD_PAGE_TIMEOUT = 60000
|
||||
|
||||
# Global user settings with descriptions and default values
|
||||
USER_SETTINGS = {
|
||||
"DEFAULT_LLM_PROVIDER": {
|
||||
"default": "openai/gpt-4o",
|
||||
"description": "Default LLM provider in 'company/model' format (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')",
|
||||
"type": "string"
|
||||
},
|
||||
"DEFAULT_LLM_PROVIDER_TOKEN": {
|
||||
"default": "",
|
||||
"description": "API token for the default LLM provider",
|
||||
"type": "string",
|
||||
"secret": True
|
||||
},
|
||||
"VERBOSE": {
|
||||
"default": False,
|
||||
"description": "Enable verbose output for all commands",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_HEADLESS": {
|
||||
"default": True,
|
||||
"description": "Run browser in headless mode by default",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_TYPE": {
|
||||
"default": "chromium",
|
||||
"description": "Default browser type (chromium or firefox)",
|
||||
"type": "string",
|
||||
"options": ["chromium", "firefox"]
|
||||
},
|
||||
"CACHE_MODE": {
|
||||
"default": "bypass",
|
||||
"description": "Default cache mode (bypass, use, or refresh)",
|
||||
"type": "string",
|
||||
"options": ["bypass", "use", "refresh"]
|
||||
},
|
||||
"USER_AGENT_MODE": {
|
||||
"default": "default",
|
||||
"description": "Default user agent mode (default, random, or mobile)",
|
||||
"type": "string",
|
||||
"options": ["default", "random", "mobile"]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
from .proxy_config import ProxyConfig
|
||||
__all__ = ["ProxyConfig"]
|
||||
@@ -1,113 +0,0 @@
|
||||
import os
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
server: str,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
):
|
||||
"""Configuration class for a single proxy.
|
||||
|
||||
Args:
|
||||
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||
username: Optional username for proxy authentication
|
||||
password: Optional password for proxy authentication
|
||||
ip: Optional IP address for verification purposes
|
||||
"""
|
||||
self.server = server
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
# Extract IP from server if not explicitly provided
|
||||
self.ip = ip or self._extract_ip_from_server()
|
||||
|
||||
def _extract_ip_from_server(self) -> Optional[str]:
|
||||
"""Extract IP address from server URL."""
|
||||
try:
|
||||
# Simple extraction assuming http://ip:port format
|
||||
if "://" in self.server:
|
||||
parts = self.server.split("://")[1].split(":")
|
||||
return parts[0]
|
||||
else:
|
||||
parts = self.server.split(":")
|
||||
return parts[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||
parts = proxy_str.split(":")
|
||||
if len(parts) == 4: # ip:port:username:password
|
||||
ip, port, username, password = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
username=username,
|
||||
password=password,
|
||||
ip=ip
|
||||
)
|
||||
elif len(parts) == 2: # ip:port only
|
||||
ip, port = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
ip=ip
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a dictionary."""
|
||||
return ProxyConfig(
|
||||
server=proxy_dict.get("server"),
|
||||
username=proxy_dict.get("username"),
|
||||
password=proxy_dict.get("password"),
|
||||
ip=proxy_dict.get("ip")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||
"""Load proxies from environment variable.
|
||||
|
||||
Args:
|
||||
env_var: Name of environment variable containing comma-separated proxy strings
|
||||
|
||||
Returns:
|
||||
List of ProxyConfig objects
|
||||
"""
|
||||
proxies = []
|
||||
try:
|
||||
proxy_list = os.getenv(env_var, "").split(",")
|
||||
for proxy in proxy_list:
|
||||
if not proxy:
|
||||
continue
|
||||
proxies.append(ProxyConfig.from_string(proxy))
|
||||
except Exception as e:
|
||||
print(f"Error loading proxies from environment: {e}")
|
||||
return proxies
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"server": self.server,
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"ip": self.ip
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "ProxyConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
ProxyConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return ProxyConfig.from_dict(config_dict)
|
||||
@@ -16,19 +16,18 @@ from .utils import (
|
||||
extract_xml_data,
|
||||
merge_chunks,
|
||||
)
|
||||
from .types import LLMConfig
|
||||
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE
|
||||
from abc import ABC, abstractmethod
|
||||
import math
|
||||
from snowballstemmer import stemmer
|
||||
from .config import DEFAULT_PROVIDER, OVERLAP_RATE, WORD_TOKEN_RATE, PROVIDER_MODELS
|
||||
from .models import TokenUsage
|
||||
from .prompts import PROMPT_FILTER_CONTENT
|
||||
import os
|
||||
import json
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from colorama import Fore, Style
|
||||
from .async_logger import AsyncLogger, LogLevel, LogColor
|
||||
|
||||
|
||||
class RelevantContentFilter(ABC):
|
||||
@@ -770,37 +769,56 @@ class PruningContentFilter(RelevantContentFilter):
|
||||
|
||||
|
||||
class LLMContentFilter(RelevantContentFilter):
|
||||
"""Content filtering using LLMs to generate relevant markdown."""
|
||||
"""Content filtering using LLMs to generate relevant markdown.
|
||||
|
||||
How it works:
|
||||
1. Extracts page metadata with fallbacks.
|
||||
2. Extracts text chunks from the body element.
|
||||
3. Applies LLMs to generate markdown for each chunk.
|
||||
4. Filters out chunks below the threshold.
|
||||
5. Sorts chunks by score in descending order.
|
||||
6. Returns the top N chunks.
|
||||
|
||||
Attributes:
|
||||
llm_config (LLMConfig): LLM configuration object.
|
||||
instruction (str): Instruction for LLM markdown generation
|
||||
chunk_token_threshold (int): Chunk token threshold for splitting (default: 1e9).
|
||||
overlap_rate (float): Overlap rate for chunking (default: 0.5).
|
||||
word_token_rate (float): Word token rate for chunking (default: 0.2).
|
||||
verbose (bool): Enable verbose logging (default: False).
|
||||
logger (AsyncLogger): Custom logger for LLM operations (optional).
|
||||
"""
|
||||
_UNWANTED_PROPS = {
|
||||
'provider' : 'Instead, use llmConfig=LlmConfig(provider="...")',
|
||||
'api_token' : 'Instead, use llmConfig=LlMConfig(api_token="...")',
|
||||
'base_url' : 'Instead, use llmConfig=LlmConfig(base_url="...")',
|
||||
'api_base' : 'Instead, use llmConfig=LlmConfig(base_url="...")',
|
||||
'provider' : 'Instead, use llm_config=LLMConfig(provider="...")',
|
||||
'api_token' : 'Instead, use llm_config=LlMConfig(api_token="...")',
|
||||
'base_url' : 'Instead, use llm_config=LLMConfig(base_url="...")',
|
||||
'api_base' : 'Instead, use llm_config=LLMConfig(base_url="...")',
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
llmConfig: "LlmConfig" = None,
|
||||
llm_config: "LLMConfig" = None,
|
||||
instruction: str = None,
|
||||
chunk_token_threshold: int = int(1e9),
|
||||
overlap_rate: float = OVERLAP_RATE,
|
||||
word_token_rate: float = WORD_TOKEN_RATE,
|
||||
base_url: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
extra_args: Dict = None,
|
||||
# char_token_rate: float = WORD_TOKEN_RATE * 5,
|
||||
# chunk_mode: str = "char",
|
||||
verbose: bool = False,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
ignore_cache: bool = True,
|
||||
# Deprecated properties
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
extra_args: Dict = None,
|
||||
):
|
||||
super().__init__(None)
|
||||
self.provider = provider
|
||||
self.api_token = api_token
|
||||
self.base_url = base_url or api_base
|
||||
self.llmConfig = llmConfig
|
||||
self.llm_config = llm_config
|
||||
self.instruction = instruction
|
||||
self.chunk_token_threshold = chunk_token_threshold
|
||||
self.overlap_rate = overlap_rate
|
||||
@@ -827,8 +845,7 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
},
|
||||
colors={
|
||||
**AsyncLogger.DEFAULT_COLORS,
|
||||
LogLevel.INFO: Fore.MAGENTA
|
||||
+ Style.DIM, # Dimmed purple for LLM ops
|
||||
LogLevel.INFO: LogColor.DIM_MAGENTA # Dimmed purple for LLM ops
|
||||
},
|
||||
)
|
||||
else:
|
||||
@@ -872,8 +889,8 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
self.logger.info(
|
||||
"Starting LLM markdown content filtering process",
|
||||
tag="LLM",
|
||||
params={"provider": self.llmConfig.provider},
|
||||
colors={"provider": Fore.CYAN},
|
||||
params={"provider": self.llm_config.provider},
|
||||
colors={"provider": LogColor.CYAN},
|
||||
)
|
||||
|
||||
# Cache handling
|
||||
@@ -910,7 +927,7 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
"LLM markdown: Split content into {chunk_count} chunks",
|
||||
tag="CHUNK",
|
||||
params={"chunk_count": len(html_chunks)},
|
||||
colors={"chunk_count": Fore.YELLOW},
|
||||
colors={"chunk_count": LogColor.YELLOW},
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
@@ -959,10 +976,10 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
|
||||
future = executor.submit(
|
||||
_proceed_with_chunk,
|
||||
self.llmConfig.provider,
|
||||
self.llm_config.provider,
|
||||
prompt,
|
||||
self.llmConfig.api_token,
|
||||
self.llmConfig.base_url,
|
||||
self.llm_config.api_token,
|
||||
self.llm_config.base_url,
|
||||
self.extra_args,
|
||||
)
|
||||
futures.append((i, future))
|
||||
@@ -1019,7 +1036,7 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
"LLM markdown: Completed processing in {time:.2f}s",
|
||||
tag="LLM",
|
||||
params={"time": end_time - start_time},
|
||||
colors={"time": Fore.YELLOW},
|
||||
colors={"time": LogColor.YELLOW},
|
||||
)
|
||||
|
||||
result = ordered_results if ordered_results else []
|
||||
|
||||
@@ -28,6 +28,7 @@ from lxml import etree
|
||||
from lxml import html as lhtml
|
||||
from typing import List
|
||||
from .models import ScrapingResult, MediaItem, Link, Media, Links
|
||||
import copy
|
||||
|
||||
# Pre-compile regular expressions for Open Graph and Twitter metadata
|
||||
OG_REGEX = re.compile(r"^og:")
|
||||
@@ -48,7 +49,7 @@ def parse_srcset(s: str) -> List[Dict]:
|
||||
if len(parts) >= 1:
|
||||
url = parts[0]
|
||||
width = (
|
||||
parts[1].rstrip("w")
|
||||
parts[1].rstrip("w").split('.')[0]
|
||||
if len(parts) > 1 and parts[1].endswith("w")
|
||||
else None
|
||||
)
|
||||
@@ -128,7 +129,8 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
Returns:
|
||||
ScrapingResult: A structured result containing the scraped content.
|
||||
"""
|
||||
raw_result = self._scrap(url, html, is_async=False, **kwargs)
|
||||
actual_url = kwargs.get("redirected_url", url)
|
||||
raw_result = self._scrap(actual_url, html, is_async=False, **kwargs)
|
||||
if raw_result is None:
|
||||
return ScrapingResult(
|
||||
cleaned_html="",
|
||||
@@ -155,6 +157,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
for aud in raw_result.get("media", {}).get("audios", [])
|
||||
if aud
|
||||
],
|
||||
tables=raw_result.get("media", {}).get("tables", [])
|
||||
)
|
||||
|
||||
# Convert links
|
||||
@@ -193,6 +196,153 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
"""
|
||||
return await asyncio.to_thread(self._scrap, url, html, **kwargs)
|
||||
|
||||
def is_data_table(self, table: Tag, **kwargs) -> bool:
|
||||
"""
|
||||
Determine if a table element is a data table (not a layout table).
|
||||
|
||||
Args:
|
||||
table (Tag): BeautifulSoup Tag representing a table element
|
||||
**kwargs: Additional keyword arguments including table_score_threshold
|
||||
|
||||
Returns:
|
||||
bool: True if the table is a data table, False otherwise
|
||||
"""
|
||||
score = 0
|
||||
|
||||
# Check for thead and tbody
|
||||
has_thead = len(table.select('thead')) > 0
|
||||
has_tbody = len(table.select('tbody')) > 0
|
||||
if has_thead:
|
||||
score += 2
|
||||
if has_tbody:
|
||||
score += 1
|
||||
|
||||
# Check for th elements
|
||||
th_count = len(table.select('th'))
|
||||
if th_count > 0:
|
||||
score += 2
|
||||
if has_thead or len(table.select('tr:first-child th')) > 0:
|
||||
score += 1
|
||||
|
||||
# Check for nested tables
|
||||
if len(table.select('table')) > 0:
|
||||
score -= 3
|
||||
|
||||
# Role attribute check
|
||||
role = table.get('role', '').lower()
|
||||
if role in {'presentation', 'none'}:
|
||||
score -= 3
|
||||
|
||||
# Column consistency
|
||||
rows = table.select('tr')
|
||||
if not rows:
|
||||
return False
|
||||
|
||||
col_counts = [len(row.select('td, th')) for row in rows]
|
||||
avg_cols = sum(col_counts) / len(col_counts)
|
||||
variance = sum((c - avg_cols)**2 for c in col_counts) / len(col_counts)
|
||||
if variance < 1:
|
||||
score += 2
|
||||
|
||||
# Caption and summary
|
||||
if table.select('caption'):
|
||||
score += 2
|
||||
if table.has_attr('summary') and table['summary']:
|
||||
score += 1
|
||||
|
||||
# Text density
|
||||
total_text = sum(len(cell.get_text().strip()) for row in rows for cell in row.select('td, th'))
|
||||
total_tags = sum(1 for _ in table.descendants if isinstance(_, Tag))
|
||||
text_ratio = total_text / (total_tags + 1e-5)
|
||||
if text_ratio > 20:
|
||||
score += 3
|
||||
elif text_ratio > 10:
|
||||
score += 2
|
||||
|
||||
# Data attributes
|
||||
data_attrs = sum(1 for attr in table.attrs if attr.startswith('data-'))
|
||||
score += data_attrs * 0.5
|
||||
|
||||
# Size check
|
||||
if avg_cols >= 2 and len(rows) >= 2:
|
||||
score += 2
|
||||
|
||||
threshold = kwargs.get('table_score_threshold', 7)
|
||||
return score >= threshold
|
||||
|
||||
def extract_table_data(self, table: Tag) -> dict:
|
||||
"""
|
||||
Extract structured data from a table element.
|
||||
|
||||
Args:
|
||||
table (Tag): BeautifulSoup Tag representing a table element
|
||||
|
||||
Returns:
|
||||
dict: Dictionary containing table data (headers, rows, caption, summary)
|
||||
"""
|
||||
caption_elem = table.select_one('caption')
|
||||
caption = caption_elem.get_text().strip() if caption_elem else ""
|
||||
summary = table.get('summary', '').strip()
|
||||
|
||||
# Extract headers with colspan handling
|
||||
headers = []
|
||||
thead_rows = table.select('thead tr')
|
||||
if thead_rows:
|
||||
header_cells = thead_rows[0].select('th')
|
||||
for cell in header_cells:
|
||||
text = cell.get_text().strip()
|
||||
colspan = int(cell.get('colspan', 1))
|
||||
headers.extend([text] * colspan)
|
||||
else:
|
||||
first_row = table.select('tr:first-child')
|
||||
if first_row:
|
||||
for cell in first_row[0].select('th, td'):
|
||||
text = cell.get_text().strip()
|
||||
colspan = int(cell.get('colspan', 1))
|
||||
headers.extend([text] * colspan)
|
||||
|
||||
# Extract rows with colspan handling
|
||||
rows = []
|
||||
all_rows = table.select('tr')
|
||||
thead = table.select_one('thead')
|
||||
tbody_rows = []
|
||||
|
||||
if thead:
|
||||
thead_rows = thead.select('tr')
|
||||
tbody_rows = [row for row in all_rows if row not in thead_rows]
|
||||
else:
|
||||
if all_rows and all_rows[0].select('th'):
|
||||
tbody_rows = all_rows[1:]
|
||||
else:
|
||||
tbody_rows = all_rows
|
||||
|
||||
for row in tbody_rows:
|
||||
# for row in table.select('tr:not(:has(ancestor::thead))'):
|
||||
row_data = []
|
||||
for cell in row.select('td'):
|
||||
text = cell.get_text().strip()
|
||||
colspan = int(cell.get('colspan', 1))
|
||||
row_data.extend([text] * colspan)
|
||||
if row_data:
|
||||
rows.append(row_data)
|
||||
|
||||
# Align rows with headers
|
||||
max_columns = len(headers) if headers else (max(len(row) for row in rows) if rows else 0)
|
||||
aligned_rows = []
|
||||
for row in rows:
|
||||
aligned = row[:max_columns] + [''] * (max_columns - len(row))
|
||||
aligned_rows.append(aligned)
|
||||
|
||||
if not headers:
|
||||
headers = [f"Column {i+1}" for i in range(max_columns)]
|
||||
|
||||
return {
|
||||
"headers": headers,
|
||||
"rows": aligned_rows,
|
||||
"caption": caption,
|
||||
"summary": summary,
|
||||
}
|
||||
|
||||
def flatten_nested_elements(self, node):
|
||||
"""
|
||||
Flatten nested elements in a HTML tree.
|
||||
@@ -431,7 +581,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
Returns:
|
||||
dict: A dictionary containing the processed element information.
|
||||
"""
|
||||
media = {"images": [], "videos": [], "audios": []}
|
||||
media = {"images": [], "videos": [], "audios": [], "tables": []}
|
||||
internal_links_dict = {}
|
||||
external_links_dict = {}
|
||||
self._process_element(
|
||||
@@ -471,6 +621,9 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
return False
|
||||
|
||||
keep_element = False
|
||||
# Special case for table elements - always preserve structure
|
||||
if element.name in ["tr", "td", "th"]:
|
||||
keep_element = True
|
||||
|
||||
exclude_domains = kwargs.get("exclude_domains", [])
|
||||
# exclude_social_media_domains = kwargs.get('exclude_social_media_domains', set(SOCIAL_MEDIA_DOMAINS))
|
||||
@@ -688,6 +841,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
html: str,
|
||||
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
||||
css_selector: str = None,
|
||||
target_elements: List[str] = None,
|
||||
**kwargs,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
@@ -710,7 +864,15 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
parser_type = kwargs.get("parser", "lxml")
|
||||
soup = BeautifulSoup(html, parser_type)
|
||||
body = soup.body
|
||||
if body is None:
|
||||
raise Exception("'<body>' tag is not found in fetched html. Consider adding wait_for=\"css:body\" to wait for body tag to be loaded into DOM.")
|
||||
base_domain = get_base_domain(url)
|
||||
|
||||
# Early removal of all images if exclude_all_images is set
|
||||
# This happens before any processing to minimize memory usage
|
||||
if kwargs.get("exclude_all_images", False):
|
||||
for img in body.find_all('img'):
|
||||
img.decompose()
|
||||
|
||||
try:
|
||||
meta = extract_metadata("", soup)
|
||||
@@ -742,22 +904,20 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
for element in body.select(excluded_selector):
|
||||
element.extract()
|
||||
|
||||
if css_selector:
|
||||
selected_elements = body.select(css_selector)
|
||||
if not selected_elements:
|
||||
return {
|
||||
"markdown": "",
|
||||
"cleaned_html": "",
|
||||
"success": True,
|
||||
"media": {"images": [], "videos": [], "audios": []},
|
||||
"links": {"internal": [], "external": []},
|
||||
"metadata": {},
|
||||
"message": f"No elements found for CSS selector: {css_selector}",
|
||||
}
|
||||
# raise InvalidCSSSelectorError(f"Invalid CSS selector, No elements found for CSS selector: {css_selector}")
|
||||
body = soup.new_tag("div")
|
||||
for el in selected_elements:
|
||||
body.append(el)
|
||||
content_element = None
|
||||
if target_elements:
|
||||
try:
|
||||
for_content_targeted_element = []
|
||||
for target_element in target_elements:
|
||||
for_content_targeted_element.extend(body.select(target_element))
|
||||
content_element = soup.new_tag("div")
|
||||
for el in for_content_targeted_element:
|
||||
content_element.append(copy.deepcopy(el))
|
||||
except Exception as e:
|
||||
self._log("error", f"Error with target element detection: {str(e)}", "SCRAPE")
|
||||
return None
|
||||
else:
|
||||
content_element = body
|
||||
|
||||
kwargs["exclude_social_media_domains"] = set(
|
||||
kwargs.get("exclude_social_media_domains", []) + SOCIAL_MEDIA_DOMAINS
|
||||
@@ -797,6 +957,15 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
if result is not None
|
||||
for img in result
|
||||
]
|
||||
|
||||
# Process tables if not excluded
|
||||
excluded_tags = set(kwargs.get("excluded_tags", []) or [])
|
||||
if 'table' not in excluded_tags:
|
||||
tables = body.find_all('table')
|
||||
for table in tables:
|
||||
if self.is_data_table(table, **kwargs):
|
||||
table_data = self.extract_table_data(table)
|
||||
media["tables"].append(table_data)
|
||||
|
||||
body = self.flatten_nested_elements(body)
|
||||
base64_pattern = re.compile(r'data:image/[^;]+;base64,([^"]+)')
|
||||
@@ -808,7 +977,7 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
|
||||
str_body = ""
|
||||
try:
|
||||
str_body = body.encode_contents().decode("utf-8")
|
||||
str_body = content_element.encode_contents().decode("utf-8")
|
||||
except Exception:
|
||||
# Reset body to the original HTML
|
||||
success = False
|
||||
@@ -847,7 +1016,6 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
cleaned_html = str_body.replace("\n\n", "\n").replace(" ", " ")
|
||||
|
||||
return {
|
||||
# **markdown_content,
|
||||
"cleaned_html": cleaned_html,
|
||||
"success": success,
|
||||
"media": media,
|
||||
@@ -1130,6 +1298,9 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
"source",
|
||||
"track",
|
||||
"wbr",
|
||||
"tr",
|
||||
"td",
|
||||
"th",
|
||||
}
|
||||
|
||||
for el in reversed(list(root.iterdescendants())):
|
||||
@@ -1187,12 +1358,125 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
|
||||
return root
|
||||
|
||||
def is_data_table(self, table: etree.Element, **kwargs) -> bool:
|
||||
score = 0
|
||||
# Check for thead and tbody
|
||||
has_thead = len(table.xpath(".//thead")) > 0
|
||||
has_tbody = len(table.xpath(".//tbody")) > 0
|
||||
if has_thead:
|
||||
score += 2
|
||||
if has_tbody:
|
||||
score += 1
|
||||
|
||||
# Check for th elements
|
||||
th_count = len(table.xpath(".//th"))
|
||||
if th_count > 0:
|
||||
score += 2
|
||||
if has_thead or table.xpath(".//tr[1]/th"):
|
||||
score += 1
|
||||
|
||||
# Check for nested tables
|
||||
if len(table.xpath(".//table")) > 0:
|
||||
score -= 3
|
||||
|
||||
# Role attribute check
|
||||
role = table.get("role", "").lower()
|
||||
if role in {"presentation", "none"}:
|
||||
score -= 3
|
||||
|
||||
# Column consistency
|
||||
rows = table.xpath(".//tr")
|
||||
if not rows:
|
||||
return False
|
||||
col_counts = [len(row.xpath(".//td|.//th")) for row in rows]
|
||||
avg_cols = sum(col_counts) / len(col_counts)
|
||||
variance = sum((c - avg_cols)**2 for c in col_counts) / len(col_counts)
|
||||
if variance < 1:
|
||||
score += 2
|
||||
|
||||
# Caption and summary
|
||||
if table.xpath(".//caption"):
|
||||
score += 2
|
||||
if table.get("summary"):
|
||||
score += 1
|
||||
|
||||
# Text density
|
||||
total_text = sum(len(''.join(cell.itertext()).strip()) for row in rows for cell in row.xpath(".//td|.//th"))
|
||||
total_tags = sum(1 for _ in table.iterdescendants())
|
||||
text_ratio = total_text / (total_tags + 1e-5)
|
||||
if text_ratio > 20:
|
||||
score += 3
|
||||
elif text_ratio > 10:
|
||||
score += 2
|
||||
|
||||
# Data attributes
|
||||
data_attrs = sum(1 for attr in table.attrib if attr.startswith('data-'))
|
||||
score += data_attrs * 0.5
|
||||
|
||||
# Size check
|
||||
if avg_cols >= 2 and len(rows) >= 2:
|
||||
score += 2
|
||||
|
||||
threshold = kwargs.get("table_score_threshold", 7)
|
||||
return score >= threshold
|
||||
|
||||
def extract_table_data(self, table: etree.Element) -> dict:
|
||||
caption = table.xpath(".//caption/text()")
|
||||
caption = caption[0].strip() if caption else ""
|
||||
summary = table.get("summary", "").strip()
|
||||
|
||||
# Extract headers with colspan handling
|
||||
headers = []
|
||||
thead_rows = table.xpath(".//thead/tr")
|
||||
if thead_rows:
|
||||
header_cells = thead_rows[0].xpath(".//th")
|
||||
for cell in header_cells:
|
||||
text = cell.text_content().strip()
|
||||
colspan = int(cell.get("colspan", 1))
|
||||
headers.extend([text] * colspan)
|
||||
else:
|
||||
first_row = table.xpath(".//tr[1]")
|
||||
if first_row:
|
||||
for cell in first_row[0].xpath(".//th|.//td"):
|
||||
text = cell.text_content().strip()
|
||||
colspan = int(cell.get("colspan", 1))
|
||||
headers.extend([text] * colspan)
|
||||
|
||||
# Extract rows with colspan handling
|
||||
rows = []
|
||||
for row in table.xpath(".//tr[not(ancestor::thead)]"):
|
||||
row_data = []
|
||||
for cell in row.xpath(".//td"):
|
||||
text = cell.text_content().strip()
|
||||
colspan = int(cell.get("colspan", 1))
|
||||
row_data.extend([text] * colspan)
|
||||
if row_data:
|
||||
rows.append(row_data)
|
||||
|
||||
# Align rows with headers
|
||||
max_columns = len(headers) if headers else (max(len(row) for row in rows) if rows else 0)
|
||||
aligned_rows = []
|
||||
for row in rows:
|
||||
aligned = row[:max_columns] + [''] * (max_columns - len(row))
|
||||
aligned_rows.append(aligned)
|
||||
|
||||
if not headers:
|
||||
headers = [f"Column {i+1}" for i in range(max_columns)]
|
||||
|
||||
return {
|
||||
"headers": headers,
|
||||
"rows": aligned_rows,
|
||||
"caption": caption,
|
||||
"summary": summary,
|
||||
}
|
||||
|
||||
def _scrap(
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
word_count_threshold: int = MIN_WORD_THRESHOLD,
|
||||
css_selector: str = None,
|
||||
target_elements: List[str] = None,
|
||||
**kwargs,
|
||||
) -> Dict[str, Any]:
|
||||
if not html:
|
||||
@@ -1206,6 +1490,13 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
body = doc
|
||||
|
||||
base_domain = get_base_domain(url)
|
||||
|
||||
# Early removal of all images if exclude_all_images is set
|
||||
# This is more efficient in lxml as we remove elements before any processing
|
||||
if kwargs.get("exclude_all_images", False):
|
||||
for img in body.xpath('//img'):
|
||||
if img.getparent() is not None:
|
||||
img.getparent().remove(img)
|
||||
|
||||
# Add comment removal
|
||||
if kwargs.get("remove_comments", False):
|
||||
@@ -1242,25 +1533,19 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
self._log("error", f"Error extracting metadata: {str(e)}", "SCRAPE")
|
||||
meta = {}
|
||||
|
||||
# Handle CSS selector targeting
|
||||
if css_selector:
|
||||
content_element = None
|
||||
if target_elements:
|
||||
try:
|
||||
selected_elements = body.cssselect(css_selector)
|
||||
if not selected_elements:
|
||||
return {
|
||||
"markdown": "",
|
||||
"cleaned_html": "",
|
||||
"success": True,
|
||||
"media": {"images": [], "videos": [], "audios": []},
|
||||
"links": {"internal": [], "external": []},
|
||||
"metadata": meta,
|
||||
"message": f"No elements found for CSS selector: {css_selector}",
|
||||
}
|
||||
body = lhtml.Element("div")
|
||||
body.extend(selected_elements)
|
||||
for_content_targeted_element = []
|
||||
for target_element in target_elements:
|
||||
for_content_targeted_element.extend(body.cssselect(target_element))
|
||||
content_element = lhtml.Element("div")
|
||||
content_element.extend(copy.deepcopy(for_content_targeted_element))
|
||||
except Exception as e:
|
||||
self._log("error", f"Error with CSS selector: {str(e)}", "SCRAPE")
|
||||
self._log("error", f"Error with target element detection: {str(e)}", "SCRAPE")
|
||||
return None
|
||||
else:
|
||||
content_element = body
|
||||
|
||||
# Remove script and style tags
|
||||
for tag in ["script", "style", "link", "meta", "noscript"]:
|
||||
@@ -1284,7 +1569,7 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
form.getparent().remove(form)
|
||||
|
||||
# Process content
|
||||
media = {"images": [], "videos": [], "audios": []}
|
||||
media = {"images": [], "videos": [], "audios": [], "tables": []}
|
||||
internal_links_dict = {}
|
||||
external_links_dict = {}
|
||||
|
||||
@@ -1298,6 +1583,13 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if 'table' not in excluded_tags:
|
||||
tables = body.xpath(".//table")
|
||||
for table in tables:
|
||||
if self.is_data_table(table, **kwargs):
|
||||
table_data = self.extract_table_data(table)
|
||||
media["tables"].append(table_data)
|
||||
|
||||
# Handle only_text option
|
||||
if kwargs.get("only_text", False):
|
||||
for tag in ONLY_TEXT_ELIGIBLE_TAGS:
|
||||
@@ -1317,14 +1609,15 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
# Remove empty elements
|
||||
self.remove_empty_elements_fast(body, 1)
|
||||
|
||||
# Remvoe unneeded attributes
|
||||
# Remove unneeded attributes
|
||||
self.remove_unwanted_attributes_fast(
|
||||
body, keep_data_attributes=kwargs.get("keep_data_attributes", False)
|
||||
)
|
||||
|
||||
# Generate output HTML
|
||||
cleaned_html = lhtml.tostring(
|
||||
body,
|
||||
# body,
|
||||
content_element,
|
||||
encoding="unicode",
|
||||
pretty_print=True,
|
||||
method="html",
|
||||
@@ -1369,7 +1662,12 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
return {
|
||||
"cleaned_html": cleaned_html,
|
||||
"success": False,
|
||||
"media": {"images": [], "videos": [], "audios": []},
|
||||
"media": {
|
||||
"images": [],
|
||||
"videos": [],
|
||||
"audios": [],
|
||||
"tables": []
|
||||
},
|
||||
"links": {"internal": [], "external": []},
|
||||
"metadata": {},
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
from crawl4ai.utils import optimize_html, get_home_folder
|
||||
from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from pathlib import Path
|
||||
import json
|
||||
@@ -68,7 +68,8 @@ class GoogleSearchCrawler(BaseCrawler):
|
||||
home_dir = get_home_folder() if not schema_cache_path else schema_cache_path
|
||||
os.makedirs(f"{home_dir}/schema", exist_ok=True)
|
||||
|
||||
cleaned_html = optimize_html(html, threshold=100)
|
||||
# cleaned_html = optimize_html(html, threshold=100)
|
||||
cleaned_html = preprocess_html_for_schema(html)
|
||||
|
||||
organic_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/organic_schema.json"):
|
||||
|
||||
@@ -11,6 +11,7 @@ from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
from ..utils import normalize_url_for_deep_crawl
|
||||
|
||||
from math import inf as infinity
|
||||
|
||||
@@ -106,13 +107,14 @@ class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||
valid_links = []
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
if url in visited:
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, new_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
valid_links.append(url)
|
||||
valid_links.append(base_url)
|
||||
|
||||
# If we have more valid links than capacity, limit them
|
||||
if len(valid_links) > remaining_capacity:
|
||||
|
||||
@@ -10,6 +10,7 @@ from .filters import FilterChain
|
||||
from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult
|
||||
from ..utils import normalize_url_for_deep_crawl, efficient_normalize_url_for_deep_crawl
|
||||
from math import inf as infinity
|
||||
|
||||
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
@@ -99,22 +100,26 @@ class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
# First collect all valid links
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
if url in visited:
|
||||
# Strip URL fragments to avoid duplicate crawling
|
||||
# base_url = url.split('#')[0] if url else url
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, next_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
# Score the URL if a scorer is provided
|
||||
score = self.url_scorer.score(url) if self.url_scorer else 0
|
||||
score = self.url_scorer.score(base_url) if self.url_scorer else 0
|
||||
|
||||
# Skip URLs with scores below the threshold
|
||||
if score < self.score_threshold:
|
||||
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
valid_links.append((url, score))
|
||||
|
||||
visited.add(base_url)
|
||||
valid_links.append((base_url, score))
|
||||
|
||||
# If we have more valid links than capacity, sort by score and take the top ones
|
||||
if len(valid_links) > remaining_capacity:
|
||||
@@ -154,7 +159,6 @@ class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
visited.update(urls)
|
||||
|
||||
# Clone the config to disable deep crawling recursion and enforce batch mode.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
|
||||
@@ -124,6 +124,7 @@ class URLPatternFilter(URLFilter):
|
||||
"_simple_prefixes",
|
||||
"_domain_patterns",
|
||||
"_path_patterns",
|
||||
"_reverse",
|
||||
)
|
||||
|
||||
PATTERN_TYPES = {
|
||||
@@ -138,8 +139,10 @@ class URLPatternFilter(URLFilter):
|
||||
self,
|
||||
patterns: Union[str, Pattern, List[Union[str, Pattern]]],
|
||||
use_glob: bool = True,
|
||||
reverse: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self._reverse = reverse
|
||||
patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns
|
||||
|
||||
self._simple_suffixes = set()
|
||||
@@ -205,36 +208,40 @@ class URLPatternFilter(URLFilter):
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def apply(self, url: str) -> bool:
|
||||
"""Hierarchical pattern matching"""
|
||||
# Quick suffix check (*.html)
|
||||
if self._simple_suffixes:
|
||||
path = url.split("?")[0]
|
||||
if path.split("/")[-1].split(".")[-1] in self._simple_suffixes:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Domain check
|
||||
if self._domain_patterns:
|
||||
for pattern in self._domain_patterns:
|
||||
if pattern.match(url):
|
||||
self._update_stats(True)
|
||||
return True
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Prefix check (/foo/*)
|
||||
if self._simple_prefixes:
|
||||
path = url.split("?")[0]
|
||||
if any(path.startswith(p) for p in self._simple_prefixes):
|
||||
self._update_stats(True)
|
||||
return True
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Complex patterns
|
||||
if self._path_patterns:
|
||||
if any(p.search(url) for p in self._path_patterns):
|
||||
self._update_stats(True)
|
||||
return True
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
self._update_stats(False)
|
||||
return False
|
||||
result = False
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
|
||||
class ContentTypeFilter(URLFilter):
|
||||
@@ -427,6 +434,11 @@ class DomainFilter(URLFilter):
|
||||
if isinstance(domains, str):
|
||||
return {domains.lower()}
|
||||
return {d.lower() for d in domains}
|
||||
|
||||
@staticmethod
|
||||
def _is_subdomain(domain: str, parent_domain: str) -> bool:
|
||||
"""Check if domain is a subdomain of parent_domain"""
|
||||
return domain == parent_domain or domain.endswith(f".{parent_domain}")
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
@@ -444,20 +456,26 @@ class DomainFilter(URLFilter):
|
||||
|
||||
domain = self._extract_domain(url)
|
||||
|
||||
# Early return for blocked domains
|
||||
if domain in self._blocked_domains:
|
||||
self._update_stats(False)
|
||||
return False
|
||||
# Check for blocked domains, including subdomains
|
||||
for blocked in self._blocked_domains:
|
||||
if self._is_subdomain(domain, blocked):
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
# If no allowed domains specified, accept all non-blocked
|
||||
if self._allowed_domains is None:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# Final allowed domains check
|
||||
result = domain in self._allowed_domains
|
||||
self._update_stats(result)
|
||||
return result
|
||||
# Check if domain matches any allowed domain (including subdomains)
|
||||
for allowed in self._allowed_domains:
|
||||
if self._is_subdomain(domain, allowed):
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# No matches found
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
|
||||
class ContentRelevanceFilter(URLFilter):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -40,12 +40,55 @@ def setup_home_directory():
|
||||
f.write("")
|
||||
|
||||
def post_install():
|
||||
"""Run all post-installation tasks"""
|
||||
"""
|
||||
Run all post-installation tasks.
|
||||
Checks CRAWL4AI_MODE environment variable. If set to 'api',
|
||||
skips Playwright browser installation.
|
||||
"""
|
||||
logger.info("Running post-installation setup...", tag="INIT")
|
||||
setup_home_directory()
|
||||
install_playwright()
|
||||
|
||||
# Check environment variable to conditionally skip Playwright install
|
||||
run_mode = os.getenv('CRAWL4AI_MODE')
|
||||
if run_mode == 'api':
|
||||
logger.warning(
|
||||
"CRAWL4AI_MODE=api detected. Skipping Playwright browser installation.",
|
||||
tag="SETUP"
|
||||
)
|
||||
else:
|
||||
# Proceed with installation only if mode is not 'api'
|
||||
install_playwright()
|
||||
|
||||
run_migration()
|
||||
# TODO: Will be added in the future
|
||||
# setup_builtin_browser()
|
||||
logger.success("Post-installation setup completed!", tag="COMPLETE")
|
||||
|
||||
def setup_builtin_browser():
|
||||
"""Set up a builtin browser for use with Crawl4AI"""
|
||||
try:
|
||||
logger.info("Setting up builtin browser...", tag="INIT")
|
||||
asyncio.run(_setup_builtin_browser())
|
||||
logger.success("Builtin browser setup completed!", tag="COMPLETE")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to set up builtin browser: {e}")
|
||||
logger.warning("You can manually set up a builtin browser using 'crawl4ai-doctor builtin-browser-start'")
|
||||
|
||||
async def _setup_builtin_browser():
|
||||
try:
|
||||
# Import BrowserProfiler here to avoid circular imports
|
||||
from .browser_profiler import BrowserProfiler
|
||||
profiler = BrowserProfiler(logger=logger)
|
||||
|
||||
# Launch the builtin browser
|
||||
cdp_url = await profiler.launch_builtin_browser(headless=True)
|
||||
if cdp_url:
|
||||
logger.success(f"Builtin browser launched at {cdp_url}", tag="BROWSER")
|
||||
else:
|
||||
logger.warning("Failed to launch builtin browser", tag="BROWSER")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error setting up builtin browser: {e}", tag="BROWSER")
|
||||
raise
|
||||
|
||||
|
||||
def install_playwright():
|
||||
|
||||
@@ -115,5 +115,6 @@ async () => {
|
||||
document.body.style.overflow = "auto";
|
||||
|
||||
// Wait a bit for any animations to complete
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
document.body.scrollIntoView(false);
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
};
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from tabnanny import verbose
|
||||
from typing import Optional, Dict, Any, Tuple
|
||||
from .models import MarkdownGenerationResult
|
||||
from .html2text import CustomHTML2Text
|
||||
# from .types import RelevantContentFilter
|
||||
from .content_filter_strategy import RelevantContentFilter
|
||||
import re
|
||||
from urllib.parse import urljoin
|
||||
@@ -31,22 +31,24 @@ class MarkdownGenerationStrategy(ABC):
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
verbose: bool = False,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
self.content_filter = content_filter
|
||||
self.options = options or {}
|
||||
self.verbose = verbose
|
||||
self.content_source = content_source
|
||||
|
||||
@abstractmethod
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""Generate markdown from cleaned HTML."""
|
||||
"""Generate markdown from the selected input HTML."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -63,6 +65,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
Args:
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
|
||||
content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html".
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
@@ -72,8 +75,9 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
super().__init__(content_filter, options)
|
||||
super().__init__(content_filter, options, verbose=False, content_source=content_source)
|
||||
|
||||
def convert_links_to_citations(
|
||||
self, markdown: str, base_url: str = ""
|
||||
@@ -143,7 +147,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
@@ -152,16 +156,16 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""
|
||||
Generate markdown with citations from cleaned HTML.
|
||||
Generate markdown with citations from the provided input HTML.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
1. Generate raw markdown from the input HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
Args:
|
||||
cleaned_html (str): Cleaned HTML content.
|
||||
input_html (str): The HTML content to process (selected based on content_source).
|
||||
base_url (str): Base URL for URL joins.
|
||||
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
|
||||
@@ -196,14 +200,14 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
h.update_params(**default_options)
|
||||
|
||||
# Ensure we have valid input
|
||||
if not cleaned_html:
|
||||
cleaned_html = ""
|
||||
elif not isinstance(cleaned_html, str):
|
||||
cleaned_html = str(cleaned_html)
|
||||
if not input_html:
|
||||
input_html = ""
|
||||
elif not isinstance(input_html, str):
|
||||
input_html = str(input_html)
|
||||
|
||||
# Generate raw markdown
|
||||
try:
|
||||
raw_markdown = h.handle(cleaned_html)
|
||||
raw_markdown = h.handle(input_html)
|
||||
except Exception as e:
|
||||
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
|
||||
|
||||
@@ -228,7 +232,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
if content_filter or self.content_filter:
|
||||
try:
|
||||
content_filter = content_filter or self.content_filter
|
||||
filtered_html = content_filter.filter_content(cleaned_html)
|
||||
filtered_html = content_filter.filter_content(input_html)
|
||||
filtered_html = "\n".join(
|
||||
"<div>{}</div>".format(s) for s in filtered_html
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from re import U
|
||||
from pydantic import BaseModel, HttpUrl, PrivateAttr
|
||||
from pydantic import BaseModel, HttpUrl, PrivateAttr, Field
|
||||
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
||||
from typing import AsyncGenerator
|
||||
from typing import Generic, TypeVar
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from .ssl_certificate import SSLCertificate
|
||||
@@ -28,7 +29,12 @@ class CrawlerTaskResult:
|
||||
start_time: Union[datetime, float]
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
|
||||
retry_count: int = 0
|
||||
wait_time: float = 0.0
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.result.success
|
||||
|
||||
class CrawlStatus(Enum):
|
||||
QUEUED = "QUEUED"
|
||||
@@ -36,27 +42,39 @@ class CrawlStatus(Enum):
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrawlStats:
|
||||
task_id: str
|
||||
url: str
|
||||
status: CrawlStatus
|
||||
start_time: Optional[datetime] = None
|
||||
end_time: Optional[datetime] = None
|
||||
start_time: Optional[Union[datetime, float]] = None
|
||||
end_time: Optional[Union[datetime, float]] = None
|
||||
memory_usage: float = 0.0
|
||||
peak_memory: float = 0.0
|
||||
error_message: str = ""
|
||||
wait_time: float = 0.0
|
||||
retry_count: int = 0
|
||||
counted_requeue: bool = False
|
||||
|
||||
@property
|
||||
def duration(self) -> str:
|
||||
if not self.start_time:
|
||||
return "0:00"
|
||||
|
||||
# Convert start_time to datetime if it's a float
|
||||
start = self.start_time
|
||||
if isinstance(start, float):
|
||||
start = datetime.fromtimestamp(start)
|
||||
|
||||
# Get end time or use current time
|
||||
end = self.end_time or datetime.now()
|
||||
duration = end - self.start_time
|
||||
# Convert end_time to datetime if it's a float
|
||||
if isinstance(end, float):
|
||||
end = datetime.fromtimestamp(end)
|
||||
|
||||
duration = end - start
|
||||
return str(timedelta(seconds=int(duration.total_seconds())))
|
||||
|
||||
|
||||
class DisplayMode(Enum):
|
||||
DETAILED = "DETAILED"
|
||||
AGGREGATED = "AGGREGATED"
|
||||
@@ -73,21 +91,11 @@ class TokenUsage:
|
||||
completion_tokens_details: Optional[dict] = None
|
||||
prompt_tokens_details: Optional[dict] = None
|
||||
|
||||
|
||||
class UrlModel(BaseModel):
|
||||
url: HttpUrl
|
||||
forced: bool = False
|
||||
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
references_markdown: str
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
|
||||
def __str__(self):
|
||||
return self.raw_markdown
|
||||
|
||||
@dataclass
|
||||
class TraversalStats:
|
||||
@@ -108,9 +116,20 @@ class DispatchResult(BaseModel):
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
references_markdown: str
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
|
||||
def __str__(self):
|
||||
return self.raw_markdown
|
||||
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
fit_html: Optional[str] = None
|
||||
success: bool
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
@@ -119,6 +138,7 @@ class CrawlResult(BaseModel):
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
mhtml: Optional[str] = None
|
||||
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
@@ -129,6 +149,9 @@ class CrawlResult(BaseModel):
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
tables: List[Dict] = Field(default_factory=list) # NEW – [{headers,rows,caption,summary}]
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
@@ -149,7 +172,11 @@ class CrawlResult(BaseModel):
|
||||
markdown_result = data.pop('markdown', None)
|
||||
super().__init__(**data)
|
||||
if markdown_result is not None:
|
||||
self._markdown = markdown_result
|
||||
self._markdown = (
|
||||
MarkdownGenerationResult(**markdown_result)
|
||||
if isinstance(markdown_result, dict)
|
||||
else markdown_result
|
||||
)
|
||||
|
||||
@property
|
||||
def markdown(self):
|
||||
@@ -241,6 +268,40 @@ class StringCompatibleMarkdown(str):
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._markdown_result, name)
|
||||
|
||||
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
|
||||
|
||||
class CrawlResultContainer(Generic[CrawlResultT]):
|
||||
def __init__(self, results: Union[CrawlResultT, List[CrawlResultT]]):
|
||||
# Normalize to a list
|
||||
if isinstance(results, list):
|
||||
self._results = results
|
||||
else:
|
||||
self._results = [results]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._results)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._results[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._results)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Delegate attribute access to the first element.
|
||||
if self._results:
|
||||
return getattr(self._results[0], attr)
|
||||
raise AttributeError(f"{self.__class__.__name__} object has no attribute '{attr}'")
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self._results!r})"
|
||||
|
||||
RunManyReturn = Union[
|
||||
CrawlResultContainer[CrawlResultT],
|
||||
AsyncGenerator[CrawlResultT, None]
|
||||
]
|
||||
|
||||
|
||||
# END of backward compatibility code for markdown/markdown_v2.
|
||||
# When removing this code in the future, make sure to:
|
||||
# 1. Replace the private attribute and property with a standard field
|
||||
@@ -253,15 +314,17 @@ class AsyncCrawlResponse(BaseModel):
|
||||
status_code: int
|
||||
screenshot: Optional[str] = None
|
||||
pdf_data: Optional[bytes] = None
|
||||
mhtml_data: Optional[str] = None
|
||||
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
|
||||
###############################
|
||||
# Scraping Models
|
||||
###############################
|
||||
@@ -292,6 +355,7 @@ class Media(BaseModel):
|
||||
audios: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
|
||||
tables: List[Dict] = [] # Table data extracted from HTML tables
|
||||
|
||||
|
||||
class Links(BaseModel):
|
||||
|
||||
@@ -203,6 +203,62 @@ Avoid Common Mistakes:
|
||||
Result
|
||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
|
||||
|
||||
PROMPT_EXTRACT_INFERRED_SCHEMA = """Here is the content from the URL:
|
||||
<url>{URL}</url>
|
||||
|
||||
<url_content>
|
||||
{HTML}
|
||||
</url_content>
|
||||
|
||||
Please carefully read the URL content and the user's request. Analyze the page structure and infer the most appropriate JSON schema based on the content and request.
|
||||
|
||||
Extraction Strategy:
|
||||
1. First, determine if the page contains repetitive items (like multiple products, articles, etc.) or a single content item (like a single article or page).
|
||||
2. For repetitive items: Identify the common pattern and extract each instance as a separate JSON object in an array.
|
||||
3. For single content: Extract the key information into a comprehensive JSON object that captures the essential details.
|
||||
|
||||
Extraction instructions:
|
||||
Return the extracted information as a list of JSON objects. For repetitive content, each object in the list should correspond to a distinct item. For single content, you may return just one detailed JSON object. Wrap the entire JSON list in <blocks>...</blocks> XML tags.
|
||||
|
||||
Schema Design Guidelines:
|
||||
- Create meaningful property names that clearly describe the data they contain
|
||||
- Use nested objects for hierarchical information
|
||||
- Use arrays for lists of related items
|
||||
- Include all information requested by the user
|
||||
- Maintain consistency in property names and data structures
|
||||
- Only include properties that are actually present in the content
|
||||
- For dates, prefer ISO format (YYYY-MM-DD)
|
||||
- For prices or numeric values, extract them without currency symbols when possible
|
||||
|
||||
Quality Reflection:
|
||||
Before outputting your final answer, double check that:
|
||||
1. The inferred schema makes logical sense for the type of content
|
||||
2. All requested information is included
|
||||
3. The JSON is valid and could be parsed without errors
|
||||
4. Property names are consistent and descriptive
|
||||
5. The structure is optimal for the type of data being represented
|
||||
|
||||
Avoid Common Mistakes:
|
||||
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
|
||||
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
|
||||
- Do not miss closing </blocks> tag at the end of the JSON output.
|
||||
- Do not generate Python code showing how to do the task; this is your task to extract the information and return it in JSON format.
|
||||
- Ensure consistency in property names across all objects
|
||||
- Don't include empty properties or null values unless they're meaningful
|
||||
- For repetitive content, ensure all objects follow the same schema
|
||||
|
||||
Important: If user specific instruction is provided, then stress significantly on what user is requesting and describing about the schema of end result (if any). If user is requesting to extract specific information, then focus on that and ignore the rest of the content.
|
||||
<user_request>
|
||||
{REQUEST}
|
||||
</user_request>
|
||||
|
||||
Result:
|
||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly.
|
||||
|
||||
DO NOT ADD ANY PRE OR POST COMMENTS. JUST RETURN THE JSON OBJECTS INSIDE <blocks>...</blocks> TAGS.
|
||||
|
||||
CRITICAL: The content inside the <blocks> tags MUST be a direct array of JSON objects (starting with '[' and ending with ']'), not a dictionary/object containing an array. For example, use <blocks>[{...}, {...}]</blocks> instead of <blocks>{"items": [{...}, {...}]}</blocks>. This is essential for proper parsing.
|
||||
"""
|
||||
|
||||
PROMPT_FILTER_CONTENT = """Your task is to filter and convert HTML content into clean, focused markdown that's optimized for use with LLMs and information retrieval systems.
|
||||
|
||||
|
||||
@@ -1,19 +1,133 @@
|
||||
from typing import List, Dict, Optional
|
||||
from abc import ABC, abstractmethod
|
||||
from itertools import cycle
|
||||
import os
|
||||
|
||||
|
||||
########### ATTENTION PEOPLE OF EARTH ###########
|
||||
# I have moved this config to async_configs.py, kept it here, in case someone still importing it, however
|
||||
# be a dear and follow `from crawl4ai import ProxyConfig` instead :)
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
server: str,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
):
|
||||
"""Configuration class for a single proxy.
|
||||
|
||||
Args:
|
||||
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||
username: Optional username for proxy authentication
|
||||
password: Optional password for proxy authentication
|
||||
ip: Optional IP address for verification purposes
|
||||
"""
|
||||
self.server = server
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
# Extract IP from server if not explicitly provided
|
||||
self.ip = ip or self._extract_ip_from_server()
|
||||
|
||||
def _extract_ip_from_server(self) -> Optional[str]:
|
||||
"""Extract IP address from server URL."""
|
||||
try:
|
||||
# Simple extraction assuming http://ip:port format
|
||||
if "://" in self.server:
|
||||
parts = self.server.split("://")[1].split(":")
|
||||
return parts[0]
|
||||
else:
|
||||
parts = self.server.split(":")
|
||||
return parts[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||
parts = proxy_str.split(":")
|
||||
if len(parts) == 4: # ip:port:username:password
|
||||
ip, port, username, password = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
username=username,
|
||||
password=password,
|
||||
ip=ip
|
||||
)
|
||||
elif len(parts) == 2: # ip:port only
|
||||
ip, port = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
ip=ip
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a dictionary."""
|
||||
return ProxyConfig(
|
||||
server=proxy_dict.get("server"),
|
||||
username=proxy_dict.get("username"),
|
||||
password=proxy_dict.get("password"),
|
||||
ip=proxy_dict.get("ip")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||
"""Load proxies from environment variable.
|
||||
|
||||
Args:
|
||||
env_var: Name of environment variable containing comma-separated proxy strings
|
||||
|
||||
Returns:
|
||||
List of ProxyConfig objects
|
||||
"""
|
||||
proxies = []
|
||||
try:
|
||||
proxy_list = os.getenv(env_var, "").split(",")
|
||||
for proxy in proxy_list:
|
||||
if not proxy:
|
||||
continue
|
||||
proxies.append(ProxyConfig.from_string(proxy))
|
||||
except Exception as e:
|
||||
print(f"Error loading proxies from environment: {e}")
|
||||
return proxies
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"server": self.server,
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"ip": self.ip
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "ProxyConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
ProxyConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return ProxyConfig.from_dict(config_dict)
|
||||
|
||||
from crawl4ai.configs import ProxyConfig
|
||||
|
||||
class ProxyRotationStrategy(ABC):
|
||||
"""Base abstract class for proxy rotation strategies"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_next_proxy(self) -> Optional[Dict]:
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy configuration from the strategy"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_proxies(self, proxies: List[Dict]):
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add proxy configurations to the strategy"""
|
||||
pass
|
||||
|
||||
|
||||
@@ -9,83 +9,44 @@ from urllib.parse import urlparse
|
||||
import OpenSSL.crypto
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class SSLCertificate:
|
||||
# === Inherit from dict ===
|
||||
class SSLCertificate(dict):
|
||||
"""
|
||||
A class representing an SSL certificate with methods to export in various formats.
|
||||
A class representing an SSL certificate, behaving like a dictionary
|
||||
for direct JSON serialization. It stores the certificate information internally
|
||||
and provides methods for export and property access.
|
||||
|
||||
Attributes:
|
||||
cert_info (Dict[str, Any]): The certificate information.
|
||||
|
||||
Methods:
|
||||
from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']: Create SSLCertificate instance from a URL.
|
||||
from_file(file_path: str) -> Optional['SSLCertificate']: Create SSLCertificate instance from a file.
|
||||
from_binary(binary_data: bytes) -> Optional['SSLCertificate']: Create SSLCertificate instance from binary data.
|
||||
export_as_pem() -> str: Export the certificate as PEM format.
|
||||
export_as_der() -> bytes: Export the certificate as DER format.
|
||||
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
|
||||
export_as_text() -> str: Export the certificate as text format.
|
||||
Inherits from dict, so instances are directly JSON serializable.
|
||||
"""
|
||||
|
||||
# Use __slots__ for potential memory optimization if desired, though less common when inheriting dict
|
||||
# __slots__ = ("_cert_info",) # If using slots, be careful with dict inheritance interaction
|
||||
|
||||
def __init__(self, cert_info: Dict[str, Any]):
|
||||
self._cert_info = self._decode_cert_data(cert_info)
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL.
|
||||
Initializes the SSLCertificate object.
|
||||
|
||||
Args:
|
||||
url (str): URL of the website.
|
||||
timeout (int): Timeout for the connection (default: 10).
|
||||
|
||||
Returns:
|
||||
Optional[SSLCertificate]: SSLCertificate instance if successful, None otherwise.
|
||||
cert_info (Dict[str, Any]): The raw certificate dictionary.
|
||||
"""
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
# 1. Decode the data (handle bytes -> str)
|
||||
decoded_info = self._decode_cert_data(cert_info)
|
||||
|
||||
context = ssl.create_default_context()
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
# 2. Store the decoded info internally (optional but good practice)
|
||||
# self._cert_info = decoded_info # You can keep this if methods rely on it
|
||||
|
||||
cert_info = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(),
|
||||
"not_after": x509.get_notAfter(),
|
||||
"fingerprint": x509.digest("sha256").hex(),
|
||||
"signature_algorithm": x509.get_signature_algorithm(),
|
||||
"raw_cert": base64.b64encode(cert_binary),
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info["extensions"] = extensions
|
||||
|
||||
return SSLCertificate(cert_info)
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
# 3. Initialize the dictionary part of the object with the decoded data
|
||||
super().__init__(decoded_info)
|
||||
|
||||
@staticmethod
|
||||
def _decode_cert_data(data: Any) -> Any:
|
||||
"""Helper method to decode bytes in certificate data."""
|
||||
if isinstance(data, bytes):
|
||||
return data.decode("utf-8")
|
||||
try:
|
||||
# Try UTF-8 first, fallback to latin-1 for arbitrary bytes
|
||||
return data.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
return data.decode("latin-1") # Or handle as needed, maybe hex representation
|
||||
elif isinstance(data, dict):
|
||||
return {
|
||||
(
|
||||
@@ -97,36 +58,119 @@ class SSLCertificate:
|
||||
return [SSLCertificate._decode_cert_data(item) for item in data]
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL. Fetches cert info and initializes.
|
||||
(Fetching logic remains the same)
|
||||
"""
|
||||
cert_info_raw = None # Variable to hold the fetched dict
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
|
||||
context = ssl.create_default_context()
|
||||
# Set check_hostname to False and verify_mode to CERT_NONE temporarily
|
||||
# for potentially problematic certificates during fetch, but parse the result regardless.
|
||||
# context.check_hostname = False
|
||||
# context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
if not cert_binary:
|
||||
print(f"Warning: No certificate returned for {hostname}")
|
||||
return None
|
||||
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
|
||||
# Create the dictionary directly
|
||||
cert_info_raw = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(), # Keep as bytes initially, _decode handles it
|
||||
"not_after": x509.get_notAfter(), # Keep as bytes initially
|
||||
"fingerprint": x509.digest("sha256").hex(), # hex() is already string
|
||||
"signature_algorithm": x509.get_signature_algorithm(), # Keep as bytes
|
||||
"raw_cert": base64.b64encode(cert_binary), # Base64 is bytes, _decode handles it
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
# get_short_name() returns bytes, str(ext) handles value conversion
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info_raw["extensions"] = extensions
|
||||
|
||||
except ssl.SSLCertVerificationError as e:
|
||||
print(f"SSL Verification Error for {url}: {e}")
|
||||
# Decide if you want to proceed or return None based on your needs
|
||||
# You might try fetching without verification here if needed, but be cautious.
|
||||
return None
|
||||
except socket.gaierror:
|
||||
print(f"Could not resolve hostname: {hostname}")
|
||||
return None
|
||||
except socket.timeout:
|
||||
print(f"Connection timed out for {url}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error fetching/processing certificate for {url}: {e}")
|
||||
# Log the full error details if needed: logging.exception("Cert fetch error")
|
||||
return None
|
||||
|
||||
# If successful, create the SSLCertificate instance from the dictionary
|
||||
if cert_info_raw:
|
||||
return SSLCertificate(cert_info_raw)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
# --- Properties now access the dictionary items directly via self[] ---
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
return self.get("issuer", {}) # Use self.get for safety
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
return self.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
return self.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
return self.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
return self.get("fingerprint", "")
|
||||
|
||||
# --- Export methods can use `self` directly as it is the dict ---
|
||||
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as JSON.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the JSON file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: JSON string if successful, None otherwise.
|
||||
"""
|
||||
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
|
||||
"""Export certificate as JSON."""
|
||||
# `self` is already the dictionary we want to serialize
|
||||
json_str = json.dumps(self, indent=2, ensure_ascii=False)
|
||||
if filepath:
|
||||
Path(filepath).write_text(json_str, encoding="utf-8")
|
||||
return None
|
||||
return json_str
|
||||
|
||||
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as PEM.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the PEM file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: PEM string if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as PEM."""
|
||||
try:
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
raw_cert_bytes = base64.b64decode(self.get("raw_cert", ""))
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1,
|
||||
base64.b64decode(self._cert_info["raw_cert"]),
|
||||
OpenSSL.crypto.FILETYPE_ASN1, raw_cert_bytes
|
||||
)
|
||||
pem_data = OpenSSL.crypto.dump_certificate(
|
||||
OpenSSL.crypto.FILETYPE_PEM, x509
|
||||
@@ -136,49 +180,25 @@ class SSLCertificate:
|
||||
Path(filepath).write_text(pem_data, encoding="utf-8")
|
||||
return None
|
||||
return pem_data
|
||||
except Exception:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error converting to PEM: {e}")
|
||||
return None
|
||||
|
||||
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
||||
"""
|
||||
Export certificate as DER.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the DER file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[bytes]: DER bytes if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as DER."""
|
||||
try:
|
||||
der_data = base64.b64decode(self._cert_info["raw_cert"])
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
der_data = base64.b64decode(self.get("raw_cert", ""))
|
||||
if filepath:
|
||||
Path(filepath).write_bytes(der_data)
|
||||
return None
|
||||
return der_data
|
||||
except Exception:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error converting to DER: {e}")
|
||||
return None
|
||||
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
"""Get certificate issuer information."""
|
||||
return self._cert_info.get("issuer", {})
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
"""Get certificate subject information."""
|
||||
return self._cert_info.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
"""Get certificate validity start date."""
|
||||
return self._cert_info.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
"""Get certificate validity end date."""
|
||||
return self._cert_info.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
"""Get certificate fingerprint."""
|
||||
return self._cert_info.get("fingerprint", "")
|
||||
# Optional: Add __repr__ for better debugging
|
||||
def __repr__(self) -> str:
|
||||
subject_cn = self.subject.get('CN', 'N/A')
|
||||
issuer_cn = self.issuer.get('CN', 'N/A')
|
||||
return f"<SSLCertificate Subject='{subject_cn}' Issuer='{issuer_cn}'>"
|
||||
@@ -1,14 +1,187 @@
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
AsyncWebCrawler = Union['AsyncWebCrawlerType'] # Note the string literal
|
||||
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||
# Logger types
|
||||
AsyncLoggerBase = Union['AsyncLoggerBaseType']
|
||||
AsyncLogger = Union['AsyncLoggerType']
|
||||
|
||||
# Crawler core types
|
||||
AsyncWebCrawler = Union['AsyncWebCrawlerType']
|
||||
CacheMode = Union['CacheModeType']
|
||||
CrawlResult = Union['CrawlResultType']
|
||||
CrawlerHub = Union['CrawlerHubType']
|
||||
BrowserProfiler = Union['BrowserProfilerType']
|
||||
|
||||
# Configuration types
|
||||
BrowserConfig = Union['BrowserConfigType']
|
||||
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
|
||||
LLMConfig = Union['LLMConfigType']
|
||||
|
||||
# Content scraping types
|
||||
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
|
||||
WebScrapingStrategy = Union['WebScrapingStrategyType']
|
||||
LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
|
||||
|
||||
# Proxy types
|
||||
ProxyRotationStrategy = Union['ProxyRotationStrategyType']
|
||||
RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType']
|
||||
|
||||
# Extraction types
|
||||
ExtractionStrategy = Union['ExtractionStrategyType']
|
||||
LLMExtractionStrategy = Union['LLMExtractionStrategyType']
|
||||
CosineStrategy = Union['CosineStrategyType']
|
||||
JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType']
|
||||
JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType']
|
||||
|
||||
# Chunking types
|
||||
ChunkingStrategy = Union['ChunkingStrategyType']
|
||||
RegexChunking = Union['RegexChunkingType']
|
||||
|
||||
# Markdown generation types
|
||||
DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType']
|
||||
MarkdownGenerationResult = Union['MarkdownGenerationResultType']
|
||||
|
||||
# Content filter types
|
||||
RelevantContentFilter = Union['RelevantContentFilterType']
|
||||
PruningContentFilter = Union['PruningContentFilterType']
|
||||
BM25ContentFilter = Union['BM25ContentFilterType']
|
||||
LLMContentFilter = Union['LLMContentFilterType']
|
||||
|
||||
# Dispatcher types
|
||||
BaseDispatcher = Union['BaseDispatcherType']
|
||||
MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType']
|
||||
SemaphoreDispatcher = Union['SemaphoreDispatcherType']
|
||||
RateLimiter = Union['RateLimiterType']
|
||||
CrawlerMonitor = Union['CrawlerMonitorType']
|
||||
DisplayMode = Union['DisplayModeType']
|
||||
RunManyReturn = Union['RunManyReturnType']
|
||||
|
||||
# Docker client
|
||||
Crawl4aiDockerClient = Union['Crawl4aiDockerClientType']
|
||||
|
||||
# Deep crawling types
|
||||
DeepCrawlStrategy = Union['DeepCrawlStrategyType']
|
||||
BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType']
|
||||
FilterChain = Union['FilterChainType']
|
||||
ContentTypeFilter = Union['ContentTypeFilterType']
|
||||
DomainFilter = Union['DomainFilterType']
|
||||
URLFilter = Union['URLFilterType']
|
||||
FilterStats = Union['FilterStatsType']
|
||||
SEOFilter = Union['SEOFilterType']
|
||||
KeywordRelevanceScorer = Union['KeywordRelevanceScorerType']
|
||||
URLScorer = Union['URLScorerType']
|
||||
CompositeScorer = Union['CompositeScorerType']
|
||||
DomainAuthorityScorer = Union['DomainAuthorityScorerType']
|
||||
FreshnessScorer = Union['FreshnessScorerType']
|
||||
PathDepthScorer = Union['PathDepthScorerType']
|
||||
BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType']
|
||||
DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType']
|
||||
DeepCrawlDecorator = Union['DeepCrawlDecoratorType']
|
||||
|
||||
# Only import types during type checking to avoid circular imports
|
||||
if TYPE_CHECKING:
|
||||
from . import (
|
||||
# Logger imports
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase as AsyncLoggerBaseType,
|
||||
AsyncLogger as AsyncLoggerType,
|
||||
)
|
||||
|
||||
# Crawler core imports
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler as AsyncWebCrawlerType,
|
||||
CacheMode as CacheModeType,
|
||||
)
|
||||
from .models import CrawlResult as CrawlResultType
|
||||
from .hub import CrawlerHub as CrawlerHubType
|
||||
from .browser_profiler import BrowserProfiler as BrowserProfilerType
|
||||
|
||||
# Configuration imports
|
||||
from .async_configs import (
|
||||
BrowserConfig as BrowserConfigType,
|
||||
CrawlerRunConfig as CrawlerRunConfigType,
|
||||
CrawlResult as CrawlResultType,
|
||||
HTTPCrawlerConfig as HTTPCrawlerConfigType,
|
||||
LLMConfig as LLMConfigType,
|
||||
)
|
||||
|
||||
# Content scraping imports
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy as ContentScrapingStrategyType,
|
||||
WebScrapingStrategy as WebScrapingStrategyType,
|
||||
LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType,
|
||||
)
|
||||
|
||||
# Proxy imports
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy as ProxyRotationStrategyType,
|
||||
RoundRobinProxyStrategy as RoundRobinProxyStrategyType,
|
||||
)
|
||||
|
||||
# Extraction imports
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy as ExtractionStrategyType,
|
||||
LLMExtractionStrategy as LLMExtractionStrategyType,
|
||||
CosineStrategy as CosineStrategyType,
|
||||
JsonCssExtractionStrategy as JsonCssExtractionStrategyType,
|
||||
JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType,
|
||||
)
|
||||
|
||||
# Chunking imports
|
||||
from .chunking_strategy import (
|
||||
ChunkingStrategy as ChunkingStrategyType,
|
||||
RegexChunking as RegexChunkingType,
|
||||
)
|
||||
|
||||
# Markdown generation imports
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator as DefaultMarkdownGeneratorType,
|
||||
)
|
||||
from .models import MarkdownGenerationResult as MarkdownGenerationResultType
|
||||
|
||||
# Content filter imports
|
||||
from .content_filter_strategy import (
|
||||
RelevantContentFilter as RelevantContentFilterType,
|
||||
PruningContentFilter as PruningContentFilterType,
|
||||
BM25ContentFilter as BM25ContentFilterType,
|
||||
LLMContentFilter as LLMContentFilterType,
|
||||
)
|
||||
|
||||
# Dispatcher imports
|
||||
from .async_dispatcher import (
|
||||
BaseDispatcher as BaseDispatcherType,
|
||||
MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType,
|
||||
SemaphoreDispatcher as SemaphoreDispatcherType,
|
||||
RateLimiter as RateLimiterType,
|
||||
CrawlerMonitor as CrawlerMonitorType,
|
||||
DisplayMode as DisplayModeType,
|
||||
RunManyReturn as RunManyReturnType,
|
||||
)
|
||||
)
|
||||
|
||||
# Docker client
|
||||
from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType
|
||||
|
||||
# Deep crawling imports
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy as DeepCrawlStrategyType,
|
||||
BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType,
|
||||
FilterChain as FilterChainType,
|
||||
ContentTypeFilter as ContentTypeFilterType,
|
||||
DomainFilter as DomainFilterType,
|
||||
URLFilter as URLFilterType,
|
||||
FilterStats as FilterStatsType,
|
||||
SEOFilter as SEOFilterType,
|
||||
KeywordRelevanceScorer as KeywordRelevanceScorerType,
|
||||
URLScorer as URLScorerType,
|
||||
CompositeScorer as CompositeScorerType,
|
||||
DomainAuthorityScorer as DomainAuthorityScorerType,
|
||||
FreshnessScorer as FreshnessScorerType,
|
||||
PathDepthScorer as PathDepthScorerType,
|
||||
BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType,
|
||||
DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType,
|
||||
DeepCrawlDecorator as DeepCrawlDecoratorType,
|
||||
)
|
||||
|
||||
|
||||
|
||||
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
|
||||
from .async_configs import LLMConfig
|
||||
return LLMConfig(*args, **kwargs)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from bs4 import BeautifulSoup, Comment, element, Tag, NavigableString
|
||||
import json
|
||||
@@ -21,18 +20,19 @@ from urllib.parse import urljoin
|
||||
import requests
|
||||
from requests.exceptions import InvalidSchema
|
||||
import xxhash
|
||||
from colorama import Fore, Style, init
|
||||
import textwrap
|
||||
import cProfile
|
||||
import pstats
|
||||
from functools import wraps
|
||||
import asyncio
|
||||
|
||||
from lxml import etree, html as lhtml
|
||||
import sqlite3
|
||||
import hashlib
|
||||
|
||||
from urllib.robotparser import RobotFileParser
|
||||
import aiohttp
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
from functools import lru_cache
|
||||
|
||||
from packaging import version
|
||||
from . import __version__
|
||||
@@ -440,14 +440,13 @@ def create_box_message(
|
||||
str: A formatted string containing the styled message box.
|
||||
"""
|
||||
|
||||
init()
|
||||
|
||||
# Define border and text colors for different types
|
||||
styles = {
|
||||
"warning": (Fore.YELLOW, Fore.LIGHTYELLOW_EX, "⚠"),
|
||||
"info": (Fore.BLUE, Fore.LIGHTBLUE_EX, "ℹ"),
|
||||
"success": (Fore.GREEN, Fore.LIGHTGREEN_EX, "✓"),
|
||||
"error": (Fore.RED, Fore.LIGHTRED_EX, "×"),
|
||||
"warning": ("yellow", "bright_yellow", "⚠"),
|
||||
"info": ("blue", "bright_blue", "ℹ"),
|
||||
"debug": ("lightblack", "bright_black", "⋯"),
|
||||
"success": ("green", "bright_green", "✓"),
|
||||
"error": ("red", "bright_red", "×"),
|
||||
}
|
||||
|
||||
border_color, text_color, prefix = styles.get(type.lower(), styles["info"])
|
||||
@@ -479,12 +478,12 @@ def create_box_message(
|
||||
# Create the box with colored borders and lighter text
|
||||
horizontal_line = h_line * (width - 1)
|
||||
box = [
|
||||
f"{border_color}{tl}{horizontal_line}{tr}",
|
||||
f"[{border_color}]{tl}{horizontal_line}{tr}[/{border_color}]",
|
||||
*[
|
||||
f"{border_color}{v_line}{text_color} {line:<{width-2}}{border_color}{v_line}"
|
||||
f"[{border_color}]{v_line}[{text_color}] {line:<{width-2}}[/{text_color}][{border_color}]{v_line}[/{border_color}]"
|
||||
for line in formatted_lines
|
||||
],
|
||||
f"{border_color}{bl}{horizontal_line}{br}{Style.RESET_ALL}",
|
||||
f"[{border_color}]{bl}{horizontal_line}{br}[/{border_color}]",
|
||||
]
|
||||
|
||||
result = "\n".join(box)
|
||||
@@ -1550,7 +1549,7 @@ def extract_xml_tags(string):
|
||||
return list(set(tags))
|
||||
|
||||
|
||||
def extract_xml_data(tags, string):
|
||||
def extract_xml_data_legacy(tags, string):
|
||||
"""
|
||||
Extract data for specified XML tags from a string.
|
||||
|
||||
@@ -1579,6 +1578,38 @@ def extract_xml_data(tags, string):
|
||||
|
||||
return data
|
||||
|
||||
def extract_xml_data(tags, string):
|
||||
"""
|
||||
Extract data for specified XML tags from a string, returning the longest content for each tag.
|
||||
|
||||
How it works:
|
||||
1. Finds all occurrences of each tag in the string using regex.
|
||||
2. For each tag, selects the occurrence with the longest content.
|
||||
3. Returns a dictionary of tag-content pairs.
|
||||
|
||||
Args:
|
||||
tags (List[str]): The list of XML tags to extract.
|
||||
string (str): The input string containing XML data.
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: A dictionary with tag names as keys and longest extracted content as values.
|
||||
"""
|
||||
|
||||
data = {}
|
||||
|
||||
for tag in tags:
|
||||
pattern = f"<{tag}>(.*?)</{tag}>"
|
||||
matches = re.findall(pattern, string, re.DOTALL)
|
||||
|
||||
if matches:
|
||||
# Find the longest content for this tag
|
||||
longest_content = max(matches, key=len).strip()
|
||||
data[tag] = longest_content
|
||||
else:
|
||||
data[tag] = ""
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def perform_completion_with_backoff(
|
||||
provider,
|
||||
@@ -1647,6 +1678,19 @@ def perform_completion_with_backoff(
|
||||
"content": ["Rate limit error. Please try again later."],
|
||||
}
|
||||
]
|
||||
except Exception as e:
|
||||
raise e # Raise any other exceptions immediately
|
||||
# print("Error during completion request:", str(e))
|
||||
# error_message = e.message
|
||||
# return [
|
||||
# {
|
||||
# "index": 0,
|
||||
# "tags": ["error"],
|
||||
# "content": [
|
||||
# f"Error during LLM completion request. {error_message}"
|
||||
# ],
|
||||
# }
|
||||
# ]
|
||||
|
||||
|
||||
def extract_blocks(url, html, provider=DEFAULT_PROVIDER, api_token=None, base_url=None):
|
||||
@@ -1957,11 +2001,91 @@ def normalize_url(href, base_url):
|
||||
if not parsed_base.scheme or not parsed_base.netloc:
|
||||
raise ValueError(f"Invalid base URL format: {base_url}")
|
||||
|
||||
# Ensure base_url ends with a trailing slash if it's a directory path
|
||||
if not base_url.endswith('/'):
|
||||
base_url = base_url + '/'
|
||||
|
||||
# Use urljoin to handle all cases
|
||||
normalized = urljoin(base_url, href.strip())
|
||||
return normalized
|
||||
|
||||
|
||||
def normalize_url_for_deep_crawl(href, base_url):
|
||||
"""Normalize URLs to ensure consistent format"""
|
||||
from urllib.parse import urljoin, urlparse, urlunparse, parse_qs, urlencode
|
||||
|
||||
# Handle None or empty values
|
||||
if not href:
|
||||
return None
|
||||
|
||||
# Use urljoin to handle relative URLs
|
||||
full_url = urljoin(base_url, href.strip())
|
||||
|
||||
# Parse the URL for normalization
|
||||
parsed = urlparse(full_url)
|
||||
|
||||
# Convert hostname to lowercase
|
||||
netloc = parsed.netloc.lower()
|
||||
|
||||
# Remove fragment entirely
|
||||
fragment = ''
|
||||
|
||||
# Normalize query parameters if needed
|
||||
query = parsed.query
|
||||
if query:
|
||||
# Parse query parameters
|
||||
params = parse_qs(query)
|
||||
|
||||
# Remove tracking parameters (example - customize as needed)
|
||||
tracking_params = ['utm_source', 'utm_medium', 'utm_campaign', 'ref', 'fbclid']
|
||||
for param in tracking_params:
|
||||
if param in params:
|
||||
del params[param]
|
||||
|
||||
# Rebuild query string, sorted for consistency
|
||||
query = urlencode(params, doseq=True) if params else ''
|
||||
|
||||
# Build normalized URL
|
||||
normalized = urlunparse((
|
||||
parsed.scheme,
|
||||
netloc,
|
||||
parsed.path.rstrip('/'), # Normalize trailing slash
|
||||
parsed.params,
|
||||
query,
|
||||
fragment
|
||||
))
|
||||
|
||||
return normalized
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def efficient_normalize_url_for_deep_crawl(href, base_url):
|
||||
"""Efficient URL normalization with proper parsing"""
|
||||
from urllib.parse import urljoin
|
||||
|
||||
if not href:
|
||||
return None
|
||||
|
||||
# Resolve relative URLs
|
||||
full_url = urljoin(base_url, href.strip())
|
||||
|
||||
# Use proper URL parsing
|
||||
parsed = urlparse(full_url)
|
||||
|
||||
# Only perform the most critical normalizations
|
||||
# 1. Lowercase hostname
|
||||
# 2. Remove fragment
|
||||
normalized = urlunparse((
|
||||
parsed.scheme,
|
||||
parsed.netloc.lower(),
|
||||
parsed.path.rstrip('/'),
|
||||
parsed.params,
|
||||
parsed.query,
|
||||
'' # Remove fragment
|
||||
))
|
||||
|
||||
return normalized
|
||||
|
||||
|
||||
def normalize_url_tmp(href, base_url):
|
||||
"""Normalize URLs to ensure consistent format"""
|
||||
# Extract protocol and domain from base URL
|
||||
@@ -2540,3 +2664,149 @@ class HeadPeekr:
|
||||
def get_title(head_content: str):
|
||||
title_match = re.search(r'<title>(.*?)</title>', head_content, re.IGNORECASE | re.DOTALL)
|
||||
return title_match.group(1) if title_match else None
|
||||
|
||||
def preprocess_html_for_schema(html_content, text_threshold=100, attr_value_threshold=200, max_size=100000):
|
||||
"""
|
||||
Preprocess HTML to reduce size while preserving structure for schema generation.
|
||||
|
||||
Args:
|
||||
html_content (str): Raw HTML content
|
||||
text_threshold (int): Maximum length for text nodes before truncation
|
||||
attr_value_threshold (int): Maximum length for attribute values before truncation
|
||||
max_size (int): Target maximum size for output HTML
|
||||
|
||||
Returns:
|
||||
str: Preprocessed HTML content
|
||||
"""
|
||||
try:
|
||||
# Parse HTML with error recovery
|
||||
parser = etree.HTMLParser(remove_comments=True, remove_blank_text=True)
|
||||
tree = lhtml.fromstring(html_content, parser=parser)
|
||||
|
||||
# 1. Remove HEAD section (keep only BODY)
|
||||
head_elements = tree.xpath('//head')
|
||||
for head in head_elements:
|
||||
if head.getparent() is not None:
|
||||
head.getparent().remove(head)
|
||||
|
||||
# 2. Define tags to remove completely
|
||||
tags_to_remove = [
|
||||
'script', 'style', 'noscript', 'iframe', 'canvas', 'svg',
|
||||
'video', 'audio', 'source', 'track', 'map', 'area'
|
||||
]
|
||||
|
||||
# Remove unwanted elements
|
||||
for tag in tags_to_remove:
|
||||
elements = tree.xpath(f'//{tag}')
|
||||
for element in elements:
|
||||
if element.getparent() is not None:
|
||||
element.getparent().remove(element)
|
||||
|
||||
# 3. Process remaining elements to clean attributes and truncate text
|
||||
for element in tree.iter():
|
||||
# Skip if we're at the root level
|
||||
if element.getparent() is None:
|
||||
continue
|
||||
|
||||
# Clean non-essential attributes but preserve structural ones
|
||||
# attribs_to_keep = {'id', 'class', 'name', 'href', 'src', 'type', 'value', 'data-'}
|
||||
|
||||
# This is more aggressive than the previous version
|
||||
attribs_to_keep = {'id', 'class', 'name', 'type', 'value'}
|
||||
|
||||
# attributes_hates_truncate = ['id', 'class', "data-"]
|
||||
|
||||
# This means, I don't care, if an attribute is too long, truncate it, go and find a better css selector to build a schema
|
||||
attributes_hates_truncate = []
|
||||
|
||||
# Process each attribute
|
||||
for attrib in list(element.attrib.keys()):
|
||||
# Keep if it's essential or starts with data-
|
||||
if not (attrib in attribs_to_keep or attrib.startswith('data-')):
|
||||
element.attrib.pop(attrib)
|
||||
# Truncate long attribute values except for selectors
|
||||
elif attrib not in attributes_hates_truncate and len(element.attrib[attrib]) > attr_value_threshold:
|
||||
element.attrib[attrib] = element.attrib[attrib][:attr_value_threshold] + '...'
|
||||
|
||||
# Truncate text content if it's too long
|
||||
if element.text and len(element.text.strip()) > text_threshold:
|
||||
element.text = element.text.strip()[:text_threshold] + '...'
|
||||
|
||||
# Also truncate tail text if present
|
||||
if element.tail and len(element.tail.strip()) > text_threshold:
|
||||
element.tail = element.tail.strip()[:text_threshold] + '...'
|
||||
|
||||
# 4. Detect duplicates and drop them in a single pass
|
||||
seen: dict[tuple, None] = {}
|
||||
for el in list(tree.xpath('//*[@class]')): # snapshot once, XPath is fast
|
||||
parent = el.getparent()
|
||||
if parent is None:
|
||||
continue
|
||||
|
||||
cls = el.get('class')
|
||||
if not cls:
|
||||
continue
|
||||
|
||||
# ── build signature ───────────────────────────────────────────
|
||||
h = xxhash.xxh64() # stream, no big join()
|
||||
for txt in el.itertext():
|
||||
h.update(txt)
|
||||
sig = (el.tag, cls, h.intdigest()) # tuple cheaper & hashable
|
||||
|
||||
# ── first seen? keep – else drop ─────────────
|
||||
if sig in seen and parent is not None:
|
||||
parent.remove(el) # duplicate
|
||||
else:
|
||||
seen[sig] = None
|
||||
|
||||
# # 4. Find repeated patterns and keep only a few examples
|
||||
# # This is a simplistic approach - more sophisticated pattern detection could be implemented
|
||||
# pattern_elements = {}
|
||||
# for element in tree.xpath('//*[contains(@class, "")]'):
|
||||
# parent = element.getparent()
|
||||
# if parent is None:
|
||||
# continue
|
||||
|
||||
# # Create a signature based on tag and classes
|
||||
# classes = element.get('class', '')
|
||||
# if not classes:
|
||||
# continue
|
||||
# innert_text = ''.join(element.xpath('.//text()'))
|
||||
# innert_text_hash = xxhash.xxh64(innert_text.encode()).hexdigest()
|
||||
# signature = f"{element.tag}.{classes}.{innert_text_hash}"
|
||||
|
||||
# if signature in pattern_elements:
|
||||
# pattern_elements[signature].append(element)
|
||||
# else:
|
||||
# pattern_elements[signature] = [element]
|
||||
|
||||
# # Keep only first examples of each repeating pattern
|
||||
# for signature, elements in pattern_elements.items():
|
||||
# if len(elements) > 1:
|
||||
# # Keep the first element and remove the rest
|
||||
# for element in elements[1:]:
|
||||
# if element.getparent() is not None:
|
||||
# element.getparent().remove(element)
|
||||
|
||||
|
||||
# # Keep only 3 examples of each repeating pattern
|
||||
# for signature, elements in pattern_elements.items():
|
||||
# if len(elements) > 3:
|
||||
# # Keep the first 2 and last elements
|
||||
# for element in elements[2:-1]:
|
||||
# if element.getparent() is not None:
|
||||
# element.getparent().remove(element)
|
||||
|
||||
# 5. Convert back to string
|
||||
result = etree.tostring(tree, encoding='unicode', method='html')
|
||||
|
||||
# If still over the size limit, apply more aggressive truncation
|
||||
if len(result) > max_size:
|
||||
return result[:max_size] + "..."
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Fallback for parsing errors
|
||||
return html_content[:max_size] if len(html_content) > max_size else html_content
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,10 @@
|
||||
import os
|
||||
import json
|
||||
import asyncio
|
||||
from typing import List, Tuple
|
||||
from typing import List, Tuple, Dict
|
||||
from functools import partial
|
||||
from uuid import uuid4
|
||||
from datetime import datetime
|
||||
|
||||
import logging
|
||||
from typing import Optional, AsyncGenerator
|
||||
@@ -18,7 +21,8 @@ from crawl4ai import (
|
||||
CacheMode,
|
||||
BrowserConfig,
|
||||
MemoryAdaptiveDispatcher,
|
||||
RateLimiter
|
||||
RateLimiter,
|
||||
LLMConfig
|
||||
)
|
||||
from crawl4ai.utils import perform_completion_with_backoff
|
||||
from crawl4ai.content_filter_strategy import (
|
||||
@@ -38,8 +42,19 @@ from utils import (
|
||||
decode_redis_hash
|
||||
)
|
||||
|
||||
import psutil, time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# --- Helper to get memory ---
|
||||
def _get_memory_mb():
|
||||
try:
|
||||
return psutil.Process().memory_info().rss / (1024 * 1024)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get memory info: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def handle_llm_qa(
|
||||
url: str,
|
||||
query: str,
|
||||
@@ -47,6 +62,8 @@ async def handle_llm_qa(
|
||||
) -> str:
|
||||
"""Process QA using LLM with crawled content as context."""
|
||||
try:
|
||||
if not url.startswith(('http://', 'https://')):
|
||||
url = 'https://' + url
|
||||
# Extract base URL by finding last '?q=' occurrence
|
||||
last_q_index = url.rfind('?q=')
|
||||
if last_q_index != -1:
|
||||
@@ -60,7 +77,7 @@ async def handle_llm_qa(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=result.error_message
|
||||
)
|
||||
content = result.markdown.fit_markdown
|
||||
content = result.markdown.fit_markdown or result.markdown.raw_markdown
|
||||
|
||||
# Create prompt and get LLM response
|
||||
prompt = f"""Use the following content as context to answer the question.
|
||||
@@ -103,8 +120,10 @@ async def process_llm_extraction(
|
||||
else:
|
||||
api_key = os.environ.get(config["llm"].get("api_key_env", None), "")
|
||||
llm_strategy = LLMExtractionStrategy(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=api_key,
|
||||
llm_config=LLMConfig(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=api_key
|
||||
),
|
||||
instruction=instruction,
|
||||
schema=json.loads(schema) if schema else None,
|
||||
)
|
||||
@@ -164,8 +183,10 @@ async def handle_markdown_request(
|
||||
FilterType.FIT: PruningContentFilter(),
|
||||
FilterType.BM25: BM25ContentFilter(user_query=query or ""),
|
||||
FilterType.LLM: LLMContentFilter(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=os.environ.get(config["llm"].get("api_key_env", None), ""),
|
||||
llm_config=LLMConfig(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=os.environ.get(config["llm"].get("api_key_env", None), ""),
|
||||
),
|
||||
instruction=query or "Extract main content"
|
||||
)
|
||||
}[filter_type]
|
||||
@@ -253,7 +274,9 @@ async def handle_llm_request(
|
||||
async def handle_task_status(
|
||||
redis: aioredis.Redis,
|
||||
task_id: str,
|
||||
base_url: str
|
||||
base_url: str,
|
||||
*,
|
||||
keep: bool = False
|
||||
) -> JSONResponse:
|
||||
"""Handle task status check requests."""
|
||||
task = await redis.hgetall(f"task:{task_id}")
|
||||
@@ -267,7 +290,7 @@ async def handle_task_status(
|
||||
response = create_task_response(task, task_id, base_url)
|
||||
|
||||
if task["status"] in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
|
||||
if should_cleanup_task(task["created_at"]):
|
||||
if not keep and should_cleanup_task(task["created_at"]):
|
||||
await redis.delete(f"task:{task_id}")
|
||||
|
||||
return JSONResponse(response)
|
||||
@@ -345,7 +368,9 @@ async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator)
|
||||
try:
|
||||
async for result in results_gen:
|
||||
try:
|
||||
server_memory_mb = _get_memory_mb()
|
||||
result_dict = result.model_dump()
|
||||
result_dict['server_memory_mb'] = server_memory_mb
|
||||
logger.info(f"Streaming result for {result_dict.get('url', 'unknown')}")
|
||||
data = json.dumps(result_dict, default=datetime_handler) + "\n"
|
||||
yield data.encode('utf-8')
|
||||
@@ -359,10 +384,11 @@ async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator)
|
||||
except asyncio.CancelledError:
|
||||
logger.warning("Client disconnected during streaming")
|
||||
finally:
|
||||
try:
|
||||
await crawler.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Crawler cleanup error: {e}")
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as e:
|
||||
# logger.error(f"Crawler cleanup error: {e}")
|
||||
pass
|
||||
|
||||
async def handle_crawl_request(
|
||||
urls: List[str],
|
||||
@@ -371,7 +397,13 @@ async def handle_crawl_request(
|
||||
config: dict
|
||||
) -> dict:
|
||||
"""Handle non-streaming crawl requests."""
|
||||
start_mem_mb = _get_memory_mb() # <--- Get memory before
|
||||
start_time = time.time()
|
||||
mem_delta_mb = None
|
||||
peak_mem_mb = start_mem_mb
|
||||
|
||||
try:
|
||||
urls = [('https://' + url) if not url.startswith(('http://', 'https://')) else url for url in urls]
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
|
||||
@@ -379,26 +411,68 @@ async def handle_crawl_request(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||
)
|
||||
) if config["crawler"]["rate_limiter"]["enabled"] else None
|
||||
)
|
||||
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
results = await crawler.arun_many(
|
||||
urls=urls,
|
||||
config=crawler_config,
|
||||
dispatcher=dispatcher
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"results": [result.model_dump() for result in results]
|
||||
}
|
||||
# crawler: AsyncWebCrawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
base_config = config["crawler"]["base_config"]
|
||||
# Iterate on key-value pairs in global_config then use haseattr to set them
|
||||
for key, value in base_config.items():
|
||||
if hasattr(crawler_config, key):
|
||||
setattr(crawler_config, key, value)
|
||||
|
||||
results = []
|
||||
func = getattr(crawler, "arun" if len(urls) == 1 else "arun_many")
|
||||
partial_func = partial(func,
|
||||
urls[0] if len(urls) == 1 else urls,
|
||||
config=crawler_config,
|
||||
dispatcher=dispatcher)
|
||||
results = await partial_func()
|
||||
|
||||
# await crawler.close()
|
||||
|
||||
end_mem_mb = _get_memory_mb() # <--- Get memory after
|
||||
end_time = time.time()
|
||||
|
||||
if start_mem_mb is not None and end_mem_mb is not None:
|
||||
mem_delta_mb = end_mem_mb - start_mem_mb # <--- Calculate delta
|
||||
peak_mem_mb = max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb) # <--- Get peak memory
|
||||
logger.info(f"Memory usage: Start: {start_mem_mb} MB, End: {end_mem_mb} MB, Delta: {mem_delta_mb} MB, Peak: {peak_mem_mb} MB")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"results": [result.model_dump() for result in results],
|
||||
"server_processing_time_s": end_time - start_time,
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": peak_mem_mb
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Crawl error: {str(e)}", exc_info=True)
|
||||
if 'crawler' in locals() and crawler.ready: # Check if crawler was initialized and started
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during exception handling: {close_e}")
|
||||
logger.error(f"Error closing crawler during exception handling: {close_e}")
|
||||
|
||||
# Measure memory even on error if possible
|
||||
end_mem_mb_error = _get_memory_mb()
|
||||
if start_mem_mb is not None and end_mem_mb_error is not None:
|
||||
mem_delta_mb = end_mem_mb_error - start_mem_mb
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
detail=json.dumps({ # Send structured error
|
||||
"error": str(e),
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb_error or 0)
|
||||
})
|
||||
)
|
||||
|
||||
async def handle_stream_crawl_request(
|
||||
@@ -410,9 +484,11 @@ async def handle_stream_crawl_request(
|
||||
"""Handle streaming crawl requests."""
|
||||
try:
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
browser_config.verbose = True
|
||||
# browser_config.verbose = True # Set to False or remove for production stress testing
|
||||
browser_config.verbose = False
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
crawler_config.scraping_strategy = LXMLWebScrapingStrategy()
|
||||
crawler_config.stream = True
|
||||
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
@@ -421,8 +497,11 @@ async def handle_stream_crawl_request(
|
||||
)
|
||||
)
|
||||
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
# crawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
results_gen = await crawler.arun_many(
|
||||
urls=urls,
|
||||
@@ -433,10 +512,60 @@ async def handle_stream_crawl_request(
|
||||
return crawler, results_gen
|
||||
|
||||
except Exception as e:
|
||||
if 'crawler' in locals():
|
||||
await crawler.close()
|
||||
# Make sure to close crawler if started during an error here
|
||||
if 'crawler' in locals() and crawler.ready:
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during stream setup exception: {close_e}")
|
||||
logger.error(f"Error closing crawler during stream setup exception: {close_e}")
|
||||
logger.error(f"Stream crawl error: {str(e)}", exc_info=True)
|
||||
# Raising HTTPException here will prevent streaming response
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
)
|
||||
|
||||
async def handle_crawl_job(
|
||||
redis,
|
||||
background_tasks: BackgroundTasks,
|
||||
urls: List[str],
|
||||
browser_config: Dict,
|
||||
crawler_config: Dict,
|
||||
config: Dict,
|
||||
) -> Dict:
|
||||
"""
|
||||
Fire-and-forget version of handle_crawl_request.
|
||||
Creates a task in Redis, runs the heavy work in a background task,
|
||||
lets /crawl/job/{task_id} polling fetch the result.
|
||||
"""
|
||||
task_id = f"crawl_{uuid4().hex[:8]}"
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.PROCESSING, # <-- keep enum values consistent
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"url": json.dumps(urls), # store list as JSON string
|
||||
"result": "",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
async def _runner():
|
||||
try:
|
||||
result = await handle_crawl_request(
|
||||
urls=urls,
|
||||
browser_config=browser_config,
|
||||
crawler_config=crawler_config,
|
||||
config=config,
|
||||
)
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.COMPLETED,
|
||||
"result": json.dumps(result),
|
||||
})
|
||||
await asyncio.sleep(5) # Give Redis time to process the update
|
||||
except Exception as exc:
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.FAILED,
|
||||
"error": str(exc),
|
||||
})
|
||||
|
||||
background_tasks.add_task(_runner)
|
||||
return {"task_id": task_id}
|
||||
@@ -10,7 +10,7 @@ from pydantic.main import BaseModel
|
||||
import base64
|
||||
|
||||
instance = JWT()
|
||||
security = HTTPBearer()
|
||||
security = HTTPBearer(auto_error=False)
|
||||
SECRET_KEY = os.environ.get("SECRET_KEY", "mysecret")
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES = 60
|
||||
|
||||
@@ -30,6 +30,9 @@ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -
|
||||
|
||||
def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)) -> Dict:
|
||||
"""Verify the JWT token from the Authorization header."""
|
||||
|
||||
if credentials is None:
|
||||
return None
|
||||
token = credentials.credentials
|
||||
verifying_key = get_jwk_from_secret(SECRET_KEY)
|
||||
try:
|
||||
@@ -38,9 +41,15 @@ def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security))
|
||||
except Exception:
|
||||
raise HTTPException(status_code=401, detail="Invalid or expired token")
|
||||
|
||||
|
||||
def get_token_dependency(config: Dict):
|
||||
"""Return the token dependency if JWT is enabled, else None."""
|
||||
return verify_token if config.get("security", {}).get("jwt_enabled", False) else None
|
||||
"""Return the token dependency if JWT is enabled, else a function that returns None."""
|
||||
|
||||
if config.get("security", {}).get("jwt_enabled", False):
|
||||
return verify_token
|
||||
else:
|
||||
return lambda: None
|
||||
|
||||
|
||||
class TokenRequest(BaseModel):
|
||||
email: EmailStr
|
||||
11631
deploy/docker/c4ai-code-context.md
Normal file
11631
deploy/docker/c4ai-code-context.md
Normal file
File diff suppressed because it is too large
Load Diff
8899
deploy/docker/c4ai-doc-context.md
Normal file
8899
deploy/docker/c4ai-doc-context.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,8 +3,9 @@ app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0"
|
||||
host: "0.0.0.0"
|
||||
port: 8000
|
||||
reload: True
|
||||
port: 11234
|
||||
reload: False
|
||||
workers: 1
|
||||
timeout_keep_alive: 300
|
||||
|
||||
# Default LLM Configuration
|
||||
@@ -38,8 +39,8 @@ rate_limiting:
|
||||
|
||||
# Security Configuration
|
||||
security:
|
||||
enabled: true
|
||||
jwt_enabled: true
|
||||
enabled: false
|
||||
jwt_enabled: false
|
||||
https_redirect: false
|
||||
trusted_hosts: ["*"]
|
||||
headers:
|
||||
@@ -50,12 +51,31 @@ security:
|
||||
|
||||
# Crawler Configuration
|
||||
crawler:
|
||||
base_config:
|
||||
simulate_user: true
|
||||
memory_threshold_percent: 95.0
|
||||
rate_limiter:
|
||||
enabled: true
|
||||
base_delay: [1.0, 2.0]
|
||||
timeouts:
|
||||
stream_init: 30.0 # Timeout for stream initialization
|
||||
batch_process: 300.0 # Timeout for batch processing
|
||||
pool:
|
||||
max_pages: 40 # ← GLOBAL_SEM permits
|
||||
idle_ttl_sec: 1800 # ← 30 min janitor cutoff
|
||||
browser:
|
||||
kwargs:
|
||||
headless: true
|
||||
text_mode: true
|
||||
extra_args:
|
||||
# - "--single-process"
|
||||
- "--no-sandbox"
|
||||
- "--disable-dev-shm-usage"
|
||||
- "--disable-gpu"
|
||||
- "--disable-software-rasterizer"
|
||||
- "--disable-web-security"
|
||||
- "--allow-insecure-localhost"
|
||||
- "--ignore-certificate-errors"
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
|
||||
60
deploy/docker/crawler_pool.py
Normal file
60
deploy/docker/crawler_pool.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# crawler_pool.py (new file)
|
||||
import asyncio, json, hashlib, time, psutil
|
||||
from contextlib import suppress
|
||||
from typing import Dict
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
from typing import Dict
|
||||
from utils import load_config
|
||||
|
||||
CONFIG = load_config()
|
||||
|
||||
POOL: Dict[str, AsyncWebCrawler] = {}
|
||||
LAST_USED: Dict[str, float] = {}
|
||||
LOCK = asyncio.Lock()
|
||||
|
||||
MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) # % RAM – refuse new browsers above this
|
||||
IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800) # close if unused for 30 min
|
||||
|
||||
def _sig(cfg: BrowserConfig) -> str:
|
||||
payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":"))
|
||||
return hashlib.sha1(payload.encode()).hexdigest()
|
||||
|
||||
async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
|
||||
try:
|
||||
sig = _sig(cfg)
|
||||
async with LOCK:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time();
|
||||
return POOL[sig]
|
||||
if psutil.virtual_memory().percent >= MEM_LIMIT:
|
||||
raise MemoryError("RAM pressure – new browser denied")
|
||||
crawler = AsyncWebCrawler(config=cfg, thread_safe=False)
|
||||
await crawler.start()
|
||||
POOL[sig] = crawler; LAST_USED[sig] = time.time()
|
||||
return crawler
|
||||
except MemoryError as e:
|
||||
raise MemoryError(f"RAM pressure – new browser denied: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to start browser: {e}")
|
||||
finally:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
else:
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
async def close_all():
|
||||
async with LOCK:
|
||||
await asyncio.gather(*(c.close() for c in POOL.values()), return_exceptions=True)
|
||||
POOL.clear(); LAST_USED.clear()
|
||||
|
||||
async def janitor():
|
||||
while True:
|
||||
await asyncio.sleep(60)
|
||||
now = time.time()
|
||||
async with LOCK:
|
||||
for sig, crawler in list(POOL.items()):
|
||||
if now - LAST_USED[sig] > IDLE_TTL:
|
||||
with suppress(Exception): await crawler.close()
|
||||
POOL.pop(sig, None); LAST_USED.pop(sig, None)
|
||||
99
deploy/docker/job.py
Normal file
99
deploy/docker/job.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""
|
||||
Job endpoints (enqueue + poll) for long-running LLM extraction and raw crawl.
|
||||
Relies on the existing Redis task helpers in api.py
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional, Callable
|
||||
from fastapi import APIRouter, BackgroundTasks, Depends, Request
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
|
||||
from api import (
|
||||
handle_llm_request,
|
||||
handle_crawl_job,
|
||||
handle_task_status,
|
||||
)
|
||||
|
||||
# ------------- dependency placeholders -------------
|
||||
_redis = None # will be injected from server.py
|
||||
_config = None
|
||||
_token_dep: Callable = lambda: None # dummy until injected
|
||||
|
||||
# public router
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# === init hook called by server.py =========================================
|
||||
def init_job_router(redis, config, token_dep) -> APIRouter:
|
||||
"""Inject shared singletons and return the router for mounting."""
|
||||
global _redis, _config, _token_dep
|
||||
_redis, _config, _token_dep = redis, config, token_dep
|
||||
return router
|
||||
|
||||
|
||||
# ---------- payload models --------------------------------------------------
|
||||
class LlmJobPayload(BaseModel):
|
||||
url: HttpUrl
|
||||
q: str
|
||||
schema: Optional[str] = None
|
||||
cache: bool = False
|
||||
|
||||
|
||||
class CrawlJobPayload(BaseModel):
|
||||
urls: list[HttpUrl]
|
||||
browser_config: Dict = {}
|
||||
crawler_config: Dict = {}
|
||||
|
||||
|
||||
# ---------- LLM job ---------------------------------------------------------
|
||||
@router.post("/llm/job", status_code=202)
|
||||
async def llm_job_enqueue(
|
||||
payload: LlmJobPayload,
|
||||
background_tasks: BackgroundTasks,
|
||||
request: Request,
|
||||
_td: Dict = Depends(lambda: _token_dep()), # late-bound dep
|
||||
):
|
||||
return await handle_llm_request(
|
||||
_redis,
|
||||
background_tasks,
|
||||
request,
|
||||
str(payload.url),
|
||||
query=payload.q,
|
||||
schema=payload.schema,
|
||||
cache=payload.cache,
|
||||
config=_config,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/llm/job/{task_id}")
|
||||
async def llm_job_status(
|
||||
request: Request,
|
||||
task_id: str,
|
||||
_td: Dict = Depends(lambda: _token_dep())
|
||||
):
|
||||
return await handle_task_status(_redis, task_id)
|
||||
|
||||
|
||||
# ---------- CRAWL job -------------------------------------------------------
|
||||
@router.post("/crawl/job", status_code=202)
|
||||
async def crawl_job_enqueue(
|
||||
payload: CrawlJobPayload,
|
||||
background_tasks: BackgroundTasks,
|
||||
_td: Dict = Depends(lambda: _token_dep()),
|
||||
):
|
||||
return await handle_crawl_job(
|
||||
_redis,
|
||||
background_tasks,
|
||||
[str(u) for u in payload.urls],
|
||||
payload.browser_config,
|
||||
payload.crawler_config,
|
||||
config=_config,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/crawl/job/{task_id}")
|
||||
async def crawl_job_status(
|
||||
request: Request,
|
||||
task_id: str,
|
||||
_td: Dict = Depends(lambda: _token_dep())
|
||||
):
|
||||
return await handle_task_status(_redis, task_id, base_url=str(request.base_url))
|
||||
252
deploy/docker/mcp_bridge.py
Normal file
252
deploy/docker/mcp_bridge.py
Normal file
@@ -0,0 +1,252 @@
|
||||
# deploy/docker/mcp_bridge.py
|
||||
|
||||
from __future__ import annotations
|
||||
import inspect, json, re, anyio
|
||||
from contextlib import suppress
|
||||
from typing import Any, Callable, Dict, List, Tuple
|
||||
import httpx
|
||||
|
||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
from fastapi import Request
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
from pydantic import BaseModel
|
||||
from mcp.server.sse import SseServerTransport
|
||||
|
||||
import mcp.types as t
|
||||
from mcp.server.lowlevel.server import Server, NotificationOptions
|
||||
from mcp.server.models import InitializationOptions
|
||||
|
||||
# ── opt‑in decorators ───────────────────────────────────────────
|
||||
def mcp_resource(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "resource", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
def mcp_template(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "template", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
def mcp_tool(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "tool", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
# ── HTTP‑proxy helper for FastAPI endpoints ─────────────────────
|
||||
def _make_http_proxy(base_url: str, route):
|
||||
method = list(route.methods - {"HEAD", "OPTIONS"})[0]
|
||||
async def proxy(**kwargs):
|
||||
# replace `/items/{id}` style params first
|
||||
path = route.path
|
||||
for k, v in list(kwargs.items()):
|
||||
placeholder = "{" + k + "}"
|
||||
if placeholder in path:
|
||||
path = path.replace(placeholder, str(v))
|
||||
kwargs.pop(k)
|
||||
url = base_url.rstrip("/") + path
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
try:
|
||||
r = (
|
||||
await client.get(url, params=kwargs)
|
||||
if method == "GET"
|
||||
else await client.request(method, url, json=kwargs)
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.text if method == "GET" else r.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
# surface FastAPI error details instead of plain 500
|
||||
raise HTTPException(e.response.status_code, e.response.text)
|
||||
return proxy
|
||||
|
||||
# ── main entry point ────────────────────────────────────────────
|
||||
def attach_mcp(
|
||||
app: FastAPI,
|
||||
*, # keyword‑only
|
||||
base: str = "/mcp",
|
||||
name: str | None = None,
|
||||
base_url: str, # eg. "http://127.0.0.1:8020"
|
||||
) -> None:
|
||||
"""Call once after all routes are declared to expose WS+SSE MCP endpoints."""
|
||||
server_name = name or app.title or "FastAPI-MCP"
|
||||
mcp = Server(server_name)
|
||||
|
||||
# tools: Dict[str, Callable] = {}
|
||||
tools: Dict[str, Tuple[Callable, Callable]] = {}
|
||||
resources: Dict[str, Callable] = {}
|
||||
templates: Dict[str, Callable] = {}
|
||||
|
||||
# register decorated FastAPI routes
|
||||
for route in app.routes:
|
||||
fn = getattr(route, "endpoint", None)
|
||||
kind = getattr(fn, "__mcp_kind__", None)
|
||||
if not kind:
|
||||
continue
|
||||
|
||||
key = fn.__mcp_name__ or re.sub(r"[/{}}]", "_", route.path).strip("_")
|
||||
|
||||
# if kind == "tool":
|
||||
# tools[key] = _make_http_proxy(base_url, route)
|
||||
if kind == "tool":
|
||||
proxy = _make_http_proxy(base_url, route)
|
||||
tools[key] = (proxy, fn)
|
||||
continue
|
||||
if kind == "resource":
|
||||
resources[key] = fn
|
||||
if kind == "template":
|
||||
templates[key] = fn
|
||||
|
||||
# helpers for JSON‑Schema
|
||||
def _schema(model: type[BaseModel] | None) -> dict:
|
||||
return {"type": "object"} if model is None else model.model_json_schema()
|
||||
|
||||
def _body_model(fn: Callable) -> type[BaseModel] | None:
|
||||
for p in inspect.signature(fn).parameters.values():
|
||||
a = p.annotation
|
||||
if inspect.isclass(a) and issubclass(a, BaseModel):
|
||||
return a
|
||||
return None
|
||||
|
||||
# MCP handlers
|
||||
@mcp.list_tools()
|
||||
async def _list_tools() -> List[t.Tool]:
|
||||
out = []
|
||||
for k, (proxy, orig_fn) in tools.items():
|
||||
desc = getattr(orig_fn, "__mcp_description__", None) or inspect.getdoc(orig_fn) or ""
|
||||
schema = getattr(orig_fn, "__mcp_schema__", None) or _schema(_body_model(orig_fn))
|
||||
out.append(
|
||||
t.Tool(name=k, description=desc, inputSchema=schema)
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
@mcp.call_tool()
|
||||
async def _call_tool(name: str, arguments: Dict | None) -> List[t.TextContent]:
|
||||
if name not in tools:
|
||||
raise HTTPException(404, "tool not found")
|
||||
|
||||
proxy, _ = tools[name]
|
||||
try:
|
||||
res = await proxy(**(arguments or {}))
|
||||
except HTTPException as exc:
|
||||
# map server‑side errors into MCP "text/error" payloads
|
||||
err = {"error": exc.status_code, "detail": exc.detail}
|
||||
return [t.TextContent(type = "text", text=json.dumps(err))]
|
||||
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
|
||||
|
||||
@mcp.list_resources()
|
||||
async def _list_resources() -> List[t.Resource]:
|
||||
return [
|
||||
t.Resource(name=k, description=inspect.getdoc(f) or "", mime_type="application/json")
|
||||
for k, f in resources.items()
|
||||
]
|
||||
|
||||
@mcp.read_resource()
|
||||
async def _read_resource(name: str) -> List[t.TextContent]:
|
||||
if name not in resources:
|
||||
raise HTTPException(404, "resource not found")
|
||||
res = resources[name]()
|
||||
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
|
||||
|
||||
@mcp.list_resource_templates()
|
||||
async def _list_templates() -> List[t.ResourceTemplate]:
|
||||
return [
|
||||
t.ResourceTemplate(
|
||||
name=k,
|
||||
description=inspect.getdoc(f) or "",
|
||||
parameters={
|
||||
p: {"type": "string"} for p in _path_params(app, f)
|
||||
},
|
||||
)
|
||||
for k, f in templates.items()
|
||||
]
|
||||
|
||||
init_opts = InitializationOptions(
|
||||
server_name=server_name,
|
||||
server_version="0.1.0",
|
||||
capabilities=mcp.get_capabilities(
|
||||
notification_options=NotificationOptions(),
|
||||
experimental_capabilities={},
|
||||
),
|
||||
)
|
||||
|
||||
# ── WebSocket transport ────────────────────────────────────
|
||||
@app.websocket_route(f"{base}/ws")
|
||||
async def _ws(ws: WebSocket):
|
||||
await ws.accept()
|
||||
c2s_send, c2s_recv = anyio.create_memory_object_stream(100)
|
||||
s2c_send, s2c_recv = anyio.create_memory_object_stream(100)
|
||||
|
||||
from pydantic import TypeAdapter
|
||||
from mcp.types import JSONRPCMessage
|
||||
adapter = TypeAdapter(JSONRPCMessage)
|
||||
|
||||
init_done = anyio.Event()
|
||||
|
||||
async def srv_to_ws():
|
||||
first = True
|
||||
try:
|
||||
async for msg in s2c_recv:
|
||||
await ws.send_json(msg.model_dump())
|
||||
if first:
|
||||
init_done.set()
|
||||
first = False
|
||||
finally:
|
||||
# make sure cleanup survives TaskGroup cancellation
|
||||
with anyio.CancelScope(shield=True):
|
||||
with suppress(RuntimeError): # idempotent close
|
||||
await ws.close()
|
||||
|
||||
async def ws_to_srv():
|
||||
try:
|
||||
# 1st frame is always "initialize"
|
||||
first = adapter.validate_python(await ws.receive_json())
|
||||
await c2s_send.send(first)
|
||||
await init_done.wait() # block until server ready
|
||||
while True:
|
||||
data = await ws.receive_json()
|
||||
await c2s_send.send(adapter.validate_python(data))
|
||||
except WebSocketDisconnect:
|
||||
await c2s_send.aclose()
|
||||
|
||||
async with anyio.create_task_group() as tg:
|
||||
tg.start_soon(mcp.run, c2s_recv, s2c_send, init_opts)
|
||||
tg.start_soon(ws_to_srv)
|
||||
tg.start_soon(srv_to_ws)
|
||||
|
||||
# ── SSE transport (official) ─────────────────────────────
|
||||
sse = SseServerTransport(f"{base}/messages/")
|
||||
|
||||
@app.get(f"{base}/sse")
|
||||
async def _mcp_sse(request: Request):
|
||||
async with sse.connect_sse(
|
||||
request.scope, request.receive, request._send # starlette ASGI primitives
|
||||
) as (read_stream, write_stream):
|
||||
await mcp.run(read_stream, write_stream, init_opts)
|
||||
|
||||
# client → server frames are POSTed here
|
||||
app.mount(f"{base}/messages", app=sse.handle_post_message)
|
||||
|
||||
# ── schema endpoint ───────────────────────────────────────
|
||||
@app.get(f"{base}/schema")
|
||||
async def _schema_endpoint():
|
||||
return JSONResponse({
|
||||
"tools": [x.model_dump() for x in await _list_tools()],
|
||||
"resources": [x.model_dump() for x in await _list_resources()],
|
||||
"resource_templates": [x.model_dump() for x in await _list_templates()],
|
||||
})
|
||||
|
||||
|
||||
# ── helpers ────────────────────────────────────────────────────
|
||||
def _route_name(path: str) -> str:
|
||||
return re.sub(r"[/{}}]", "_", path).strip("_")
|
||||
|
||||
def _path_params(app: FastAPI, fn: Callable) -> List[str]:
|
||||
for r in app.routes:
|
||||
if r.endpoint is fn:
|
||||
return list(r.param_convertors.keys())
|
||||
return []
|
||||
@@ -1,10 +1,16 @@
|
||||
crawl4ai
|
||||
fastapi
|
||||
uvicorn
|
||||
fastapi>=0.115.12
|
||||
uvicorn>=0.34.2
|
||||
gunicorn>=23.0.0
|
||||
slowapi>=0.1.9
|
||||
prometheus-fastapi-instrumentator>=7.0.2
|
||||
slowapi==0.1.9
|
||||
prometheus-fastapi-instrumentator>=7.1.0
|
||||
redis>=5.2.1
|
||||
jwt>=1.3.1
|
||||
dnspython>=2.7.0
|
||||
email-validator>=2.2.0
|
||||
email-validator==2.2.0
|
||||
sse-starlette==2.2.1
|
||||
pydantic>=2.11
|
||||
rank-bm25==0.2.2
|
||||
anyio==4.9.0
|
||||
PyJWT==2.10.1
|
||||
mcp>=1.6.0
|
||||
websockets>=15.0.1
|
||||
|
||||
42
deploy/docker/schemas.py
Normal file
42
deploy/docker/schemas.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from typing import List, Optional, Dict
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, Field
|
||||
from utils import FilterType
|
||||
|
||||
|
||||
class CrawlRequest(BaseModel):
|
||||
urls: List[str] = Field(min_length=1, max_length=100)
|
||||
browser_config: Optional[Dict] = Field(default_factory=dict)
|
||||
crawler_config: Optional[Dict] = Field(default_factory=dict)
|
||||
|
||||
class MarkdownRequest(BaseModel):
|
||||
"""Request body for the /md endpoint."""
|
||||
url: str = Field(..., description="Absolute http/https URL to fetch")
|
||||
f: FilterType = Field(FilterType.FIT,
|
||||
description="Content‑filter strategy: FIT, RAW, BM25, or LLM")
|
||||
q: Optional[str] = Field(None, description="Query string used by BM25/LLM filters")
|
||||
c: Optional[str] = Field("0", description="Cache‑bust / revision counter")
|
||||
|
||||
|
||||
class RawCode(BaseModel):
|
||||
code: str
|
||||
|
||||
class HTMLRequest(BaseModel):
|
||||
url: str
|
||||
|
||||
class ScreenshotRequest(BaseModel):
|
||||
url: str
|
||||
screenshot_wait_for: Optional[float] = 2
|
||||
output_path: Optional[str] = None
|
||||
|
||||
class PDFRequest(BaseModel):
|
||||
url: str
|
||||
output_path: Optional[str] = None
|
||||
|
||||
|
||||
class JSEndpointRequest(BaseModel):
|
||||
url: str
|
||||
scripts: List[str] = Field(
|
||||
...,
|
||||
description="List of separated JavaScript snippets to execute"
|
||||
)
|
||||
@@ -1,150 +1,477 @@
|
||||
# ───────────────────────── server.py ─────────────────────────
|
||||
"""
|
||||
Crawl4AI FastAPI entry‑point
|
||||
• Browser pool + global page cap
|
||||
• Rate‑limiting, security, metrics
|
||||
• /crawl, /crawl/stream, /md, /llm endpoints
|
||||
"""
|
||||
|
||||
# ── stdlib & 3rd‑party imports ───────────────────────────────
|
||||
from crawler_pool import get_crawler, close_all, janitor
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from auth import create_access_token, get_token_dependency, TokenRequest
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, List, Dict
|
||||
from fastapi import Request, Depends
|
||||
from fastapi.responses import FileResponse
|
||||
import base64
|
||||
import re
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from api import (
|
||||
handle_markdown_request, handle_llm_qa,
|
||||
handle_stream_crawl_request, handle_crawl_request,
|
||||
stream_results
|
||||
)
|
||||
from schemas import (
|
||||
CrawlRequest,
|
||||
MarkdownRequest,
|
||||
RawCode,
|
||||
HTMLRequest,
|
||||
ScreenshotRequest,
|
||||
PDFRequest,
|
||||
JSEndpointRequest,
|
||||
)
|
||||
|
||||
from utils import (
|
||||
FilterType,
|
||||
load_config,
|
||||
setup_logging,
|
||||
verify_email_domain,
|
||||
get_base_url,
|
||||
)
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from typing import List, Optional, Dict
|
||||
from fastapi import FastAPI, HTTPException, Request, Query, Path, Depends
|
||||
from fastapi.responses import StreamingResponse, RedirectResponse, PlainTextResponse, JSONResponse
|
||||
import asyncio
|
||||
from typing import List
|
||||
from contextlib import asynccontextmanager
|
||||
import pathlib
|
||||
|
||||
from fastapi import (
|
||||
FastAPI, HTTPException, Request, Path, Query, Depends
|
||||
)
|
||||
from rank_bm25 import BM25Okapi
|
||||
from fastapi.responses import (
|
||||
StreamingResponse,
|
||||
RedirectResponse,
|
||||
PlainTextResponse,
|
||||
JSONResponse,
|
||||
HTMLResponse,
|
||||
)
|
||||
from fastapi.middleware.httpsredirect import HTTPSRedirectMiddleware
|
||||
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from job import init_job_router
|
||||
|
||||
from mcp_bridge import attach_mcp, mcp_resource, mcp_template, mcp_tool
|
||||
|
||||
import ast
|
||||
import crawl4ai as _c4
|
||||
from pydantic import BaseModel, Field
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from prometheus_fastapi_instrumentator import Instrumentator
|
||||
from redis import asyncio as aioredis
|
||||
|
||||
# ── internal imports (after sys.path append) ─────────────────
|
||||
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||
from utils import FilterType, load_config, setup_logging, verify_email_domain
|
||||
from api import (
|
||||
handle_markdown_request,
|
||||
handle_llm_qa,
|
||||
handle_stream_crawl_request,
|
||||
handle_crawl_request,
|
||||
stream_results
|
||||
)
|
||||
from auth import create_access_token, get_token_dependency, TokenRequest # Import from auth.py
|
||||
|
||||
__version__ = "0.2.6"
|
||||
|
||||
class CrawlRequest(BaseModel):
|
||||
urls: List[str] = Field(min_length=1, max_length=100)
|
||||
browser_config: Optional[Dict] = Field(default_factory=dict)
|
||||
crawler_config: Optional[Dict] = Field(default_factory=dict)
|
||||
|
||||
# Load configuration and setup
|
||||
# ────────────────── configuration / logging ──────────────────
|
||||
config = load_config()
|
||||
setup_logging(config)
|
||||
|
||||
# Initialize Redis
|
||||
__version__ = "0.5.1-d1"
|
||||
|
||||
# ── global page semaphore (hard cap) ─────────────────────────
|
||||
MAX_PAGES = config["crawler"]["pool"].get("max_pages", 30)
|
||||
GLOBAL_SEM = asyncio.Semaphore(MAX_PAGES)
|
||||
|
||||
# import logging
|
||||
# page_log = logging.getLogger("page_cap")
|
||||
# orig_arun = AsyncWebCrawler.arun
|
||||
# async def capped_arun(self, *a, **kw):
|
||||
# await GLOBAL_SEM.acquire() # ← take slot
|
||||
# try:
|
||||
# in_flight = MAX_PAGES - GLOBAL_SEM._value # used permits
|
||||
# page_log.info("🕸️ pages_in_flight=%s / %s", in_flight, MAX_PAGES)
|
||||
# return await orig_arun(self, *a, **kw)
|
||||
# finally:
|
||||
# GLOBAL_SEM.release() # ← free slot
|
||||
|
||||
orig_arun = AsyncWebCrawler.arun
|
||||
|
||||
|
||||
async def capped_arun(self, *a, **kw):
|
||||
async with GLOBAL_SEM:
|
||||
return await orig_arun(self, *a, **kw)
|
||||
AsyncWebCrawler.arun = capped_arun
|
||||
|
||||
# ───────────────────── FastAPI lifespan ──────────────────────
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(_: FastAPI):
|
||||
await get_crawler(BrowserConfig(
|
||||
extra_args=config["crawler"]["browser"].get("extra_args", []),
|
||||
**config["crawler"]["browser"].get("kwargs", {}),
|
||||
)) # warm‑up
|
||||
app.state.janitor = asyncio.create_task(janitor()) # idle GC
|
||||
yield
|
||||
app.state.janitor.cancel()
|
||||
await close_all()
|
||||
|
||||
# ───────────────────── FastAPI instance ──────────────────────
|
||||
app = FastAPI(
|
||||
title=config["app"]["title"],
|
||||
version=config["app"]["version"],
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
# ── static playground ──────────────────────────────────────
|
||||
STATIC_DIR = pathlib.Path(__file__).parent / "static" / "playground"
|
||||
if not STATIC_DIR.exists():
|
||||
raise RuntimeError(f"Playground assets not found at {STATIC_DIR}")
|
||||
app.mount(
|
||||
"/playground",
|
||||
StaticFiles(directory=STATIC_DIR, html=True),
|
||||
name="play",
|
||||
)
|
||||
|
||||
# Serve noVNC static files if available
|
||||
VNC_DIR = pathlib.Path("/opt/novnc")
|
||||
if VNC_DIR.exists():
|
||||
app.mount("/novnc", StaticFiles(directory=VNC_DIR, html=True), name="novnc")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return RedirectResponse("/playground")
|
||||
|
||||
|
||||
@app.get("/vnc")
|
||||
async def vnc_page(request: Request):
|
||||
"""Return a simple page embedding the noVNC client."""
|
||||
url = f"{get_base_url(request)}/novnc/vnc.html?autoconnect=true&resize=scale"
|
||||
html = f"<iframe src='{url}' width='1024' height='768' style='border:none'></iframe>"
|
||||
return HTMLResponse(f"<html><body>{html}</body></html>")
|
||||
|
||||
|
||||
@app.get("/vnc/url")
|
||||
async def vnc_url(request: Request):
|
||||
"""Return the direct URL to the noVNC client."""
|
||||
url = f"{get_base_url(request)}/novnc/vnc.html?autoconnect=true&resize=scale"
|
||||
return {"url": url}
|
||||
|
||||
# ─────────────────── infra / middleware ─────────────────────
|
||||
redis = aioredis.from_url(config["redis"].get("uri", "redis://localhost"))
|
||||
|
||||
# Initialize rate limiter
|
||||
limiter = Limiter(
|
||||
key_func=get_remote_address,
|
||||
default_limits=[config["rate_limiting"]["default_limit"]],
|
||||
storage_uri=config["rate_limiting"]["storage_uri"]
|
||||
storage_uri=config["rate_limiting"]["storage_uri"],
|
||||
)
|
||||
|
||||
app = FastAPI(
|
||||
title=config["app"]["title"],
|
||||
version=config["app"]["version"]
|
||||
)
|
||||
|
||||
# Configure middleware
|
||||
def setup_security_middleware(app, config):
|
||||
sec_config = config.get("security", {})
|
||||
if sec_config.get("enabled", False):
|
||||
if sec_config.get("https_redirect", False):
|
||||
app.add_middleware(HTTPSRedirectMiddleware)
|
||||
if sec_config.get("trusted_hosts", []) != ["*"]:
|
||||
app.add_middleware(TrustedHostMiddleware, allowed_hosts=sec_config["trusted_hosts"])
|
||||
def _setup_security(app_: FastAPI):
|
||||
sec = config["security"]
|
||||
if not sec["enabled"]:
|
||||
return
|
||||
if sec.get("https_redirect"):
|
||||
app_.add_middleware(HTTPSRedirectMiddleware)
|
||||
if sec.get("trusted_hosts", []) != ["*"]:
|
||||
app_.add_middleware(
|
||||
TrustedHostMiddleware, allowed_hosts=sec["trusted_hosts"]
|
||||
)
|
||||
|
||||
setup_security_middleware(app, config)
|
||||
|
||||
# Prometheus instrumentation
|
||||
_setup_security(app)
|
||||
|
||||
if config["observability"]["prometheus"]["enabled"]:
|
||||
Instrumentator().instrument(app).expose(app)
|
||||
|
||||
# Get token dependency based on config
|
||||
token_dependency = get_token_dependency(config)
|
||||
token_dep = get_token_dependency(config)
|
||||
|
||||
|
||||
# Middleware for security headers
|
||||
@app.middleware("http")
|
||||
async def add_security_headers(request: Request, call_next):
|
||||
response = await call_next(request)
|
||||
resp = await call_next(request)
|
||||
if config["security"]["enabled"]:
|
||||
response.headers.update(config["security"]["headers"])
|
||||
return response
|
||||
resp.headers.update(config["security"]["headers"])
|
||||
return resp
|
||||
|
||||
# Token endpoint (always available, but usage depends on config)
|
||||
# ───────────────── safe config‑dump helper ─────────────────
|
||||
ALLOWED_TYPES = {
|
||||
"CrawlerRunConfig": CrawlerRunConfig,
|
||||
"BrowserConfig": BrowserConfig,
|
||||
}
|
||||
|
||||
|
||||
def _safe_eval_config(expr: str) -> dict:
|
||||
"""
|
||||
Accept exactly one top‑level call to CrawlerRunConfig(...) or BrowserConfig(...).
|
||||
Whatever is inside the parentheses is fine *except* further function calls
|
||||
(so no __import__('os') stuff). All public names from crawl4ai are available
|
||||
when we eval.
|
||||
"""
|
||||
tree = ast.parse(expr, mode="eval")
|
||||
|
||||
# must be a single call
|
||||
if not isinstance(tree.body, ast.Call):
|
||||
raise ValueError("Expression must be a single constructor call")
|
||||
|
||||
call = tree.body
|
||||
if not (isinstance(call.func, ast.Name) and call.func.id in {"CrawlerRunConfig", "BrowserConfig"}):
|
||||
raise ValueError(
|
||||
"Only CrawlerRunConfig(...) or BrowserConfig(...) are allowed")
|
||||
|
||||
# forbid nested calls to keep the surface tiny
|
||||
for node in ast.walk(call):
|
||||
if isinstance(node, ast.Call) and node is not call:
|
||||
raise ValueError("Nested function calls are not permitted")
|
||||
|
||||
# expose everything that crawl4ai exports, nothing else
|
||||
safe_env = {name: getattr(_c4, name)
|
||||
for name in dir(_c4) if not name.startswith("_")}
|
||||
obj = eval(compile(tree, "<config>", "eval"),
|
||||
{"__builtins__": {}}, safe_env)
|
||||
return obj.dump()
|
||||
|
||||
|
||||
# ── job router ──────────────────────────────────────────────
|
||||
app.include_router(init_job_router(redis, config, token_dep))
|
||||
|
||||
# ──────────────────────── Endpoints ──────────────────────────
|
||||
@app.post("/token")
|
||||
async def get_token(request_data: TokenRequest):
|
||||
if not verify_email_domain(request_data.email):
|
||||
raise HTTPException(status_code=400, detail="Invalid email domain")
|
||||
token = create_access_token({"sub": request_data.email})
|
||||
return {"email": request_data.email, "access_token": token, "token_type": "bearer"}
|
||||
async def get_token(req: TokenRequest):
|
||||
if not verify_email_domain(req.email):
|
||||
raise HTTPException(400, "Invalid email domain")
|
||||
token = create_access_token({"sub": req.email})
|
||||
return {"email": req.email, "access_token": token, "token_type": "bearer"}
|
||||
|
||||
# Endpoints with conditional auth
|
||||
@app.get("/md/{url:path}")
|
||||
|
||||
@app.post("/config/dump")
|
||||
async def config_dump(raw: RawCode):
|
||||
try:
|
||||
return JSONResponse(_safe_eval_config(raw.code.strip()))
|
||||
except Exception as e:
|
||||
raise HTTPException(400, str(e))
|
||||
|
||||
|
||||
@app.post("/md")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("md")
|
||||
async def get_markdown(
|
||||
request: Request,
|
||||
url: str,
|
||||
f: FilterType = FilterType.FIT,
|
||||
q: Optional[str] = None,
|
||||
c: Optional[str] = "0",
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
body: MarkdownRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
result = await handle_markdown_request(url, f, q, c, config)
|
||||
return PlainTextResponse(result)
|
||||
if not body.url.startswith(("http://", "https://")):
|
||||
raise HTTPException(
|
||||
400, "URL must be absolute and start with http/https")
|
||||
markdown = await handle_markdown_request(
|
||||
body.url, body.f, body.q, body.c, config
|
||||
)
|
||||
return JSONResponse({
|
||||
"url": body.url,
|
||||
"filter": body.f,
|
||||
"query": body.q,
|
||||
"cache": body.c,
|
||||
"markdown": markdown,
|
||||
"success": True
|
||||
})
|
||||
|
||||
@app.get("/llm/{url:path}", description="URL should be without http/https prefix")
|
||||
|
||||
@app.post("/html")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("html")
|
||||
async def generate_html(
|
||||
request: Request,
|
||||
body: HTMLRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Crawls the URL, preprocesses the raw HTML for schema extraction, and returns the processed HTML.
|
||||
Use when you need sanitized HTML structures for building schemas or further processing.
|
||||
"""
|
||||
cfg = CrawlerRunConfig()
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
raw_html = results[0].html
|
||||
from crawl4ai.utils import preprocess_html_for_schema
|
||||
processed_html = preprocess_html_for_schema(raw_html)
|
||||
return JSONResponse({"html": processed_html, "url": body.url, "success": True})
|
||||
|
||||
# Screenshot endpoint
|
||||
|
||||
|
||||
@app.post("/screenshot")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("screenshot")
|
||||
async def generate_screenshot(
|
||||
request: Request,
|
||||
body: ScreenshotRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Capture a full-page PNG screenshot of the specified URL, waiting an optional delay before capture,
|
||||
Use when you need an image snapshot of the rendered page. Its recommened to provide an output path to save the screenshot.
|
||||
Then in result instead of the screenshot you will get a path to the saved file.
|
||||
"""
|
||||
cfg = CrawlerRunConfig(
|
||||
screenshot=True, screenshot_wait_for=body.screenshot_wait_for)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
screenshot_data = results[0].screenshot
|
||||
if body.output_path:
|
||||
abs_path = os.path.abspath(body.output_path)
|
||||
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
|
||||
with open(abs_path, "wb") as f:
|
||||
f.write(base64.b64decode(screenshot_data))
|
||||
return {"success": True, "path": abs_path}
|
||||
return {"success": True, "screenshot": screenshot_data}
|
||||
|
||||
# PDF endpoint
|
||||
|
||||
|
||||
@app.post("/pdf")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("pdf")
|
||||
async def generate_pdf(
|
||||
request: Request,
|
||||
body: PDFRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Generate a PDF document of the specified URL,
|
||||
Use when you need a printable or archivable snapshot of the page. It is recommended to provide an output path to save the PDF.
|
||||
Then in result instead of the PDF you will get a path to the saved file.
|
||||
"""
|
||||
cfg = CrawlerRunConfig(pdf=True)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
pdf_data = results[0].pdf
|
||||
if body.output_path:
|
||||
abs_path = os.path.abspath(body.output_path)
|
||||
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
|
||||
with open(abs_path, "wb") as f:
|
||||
f.write(pdf_data)
|
||||
return {"success": True, "path": abs_path}
|
||||
return {"success": True, "pdf": base64.b64encode(pdf_data).decode()}
|
||||
|
||||
|
||||
@app.post("/execute_js")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("execute_js")
|
||||
async def execute_js(
|
||||
request: Request,
|
||||
body: JSEndpointRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Execute a sequence of JavaScript snippets on the specified URL.
|
||||
Return the full CrawlResult JSON (first result).
|
||||
Use this when you need to interact with dynamic pages using JS.
|
||||
REMEMBER: Scripts accept a list of separated JS snippets to execute and execute them in order.
|
||||
IMPORTANT: Each script should be an expression that returns a value. It can be an IIFE or an async function. You can think of it as such.
|
||||
Your script will replace '{script}' and execute in the browser context. So provide either an IIFE or a sync/async function that returns a value.
|
||||
Return Format:
|
||||
- The return result is an instance of CrawlResult, so you have access to markdown, links, and other stuff. If this is enough, you don't need to call again for other endpoints.
|
||||
|
||||
```python
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
success: bool
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
mhtml: Optional[str] = None
|
||||
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
error_message: Optional[str] = None
|
||||
session_id: Optional[str] = None
|
||||
response_headers: Optional[dict] = None
|
||||
status_code: Optional[int] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
references_markdown: str
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
```
|
||||
|
||||
"""
|
||||
cfg = CrawlerRunConfig(js_code=body.scripts)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
# Return JSON-serializable dict of the first CrawlResult
|
||||
data = results[0].model_dump()
|
||||
return JSONResponse(data)
|
||||
|
||||
|
||||
@app.get("/llm/{url:path}")
|
||||
async def llm_endpoint(
|
||||
request: Request,
|
||||
url: str = Path(...),
|
||||
q: Optional[str] = Query(None),
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
q: str = Query(...),
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
if not q:
|
||||
raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
|
||||
if not url.startswith(('http://', 'https://')):
|
||||
url = 'https://' + url
|
||||
try:
|
||||
answer = await handle_llm_qa(url, q, config)
|
||||
return JSONResponse({"answer": answer})
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
raise HTTPException(400, "Query parameter 'q' is required")
|
||||
if not url.startswith(("http://", "https://")):
|
||||
url = "https://" + url
|
||||
answer = await handle_llm_qa(url, q, config)
|
||||
return JSONResponse({"answer": answer})
|
||||
|
||||
|
||||
@app.get("/schema")
|
||||
async def get_schema():
|
||||
from crawl4ai import BrowserConfig, CrawlerRunConfig
|
||||
return {"browser": BrowserConfig().dump(), "crawler": CrawlerRunConfig().dump()}
|
||||
return {"browser": BrowserConfig().dump(),
|
||||
"crawler": CrawlerRunConfig().dump()}
|
||||
|
||||
|
||||
@app.get(config["observability"]["health_check"]["endpoint"])
|
||||
async def health():
|
||||
return {"status": "ok", "timestamp": time.time(), "version": __version__}
|
||||
|
||||
|
||||
@app.get(config["observability"]["prometheus"]["endpoint"])
|
||||
async def metrics():
|
||||
return RedirectResponse(url=config["observability"]["prometheus"]["endpoint"])
|
||||
return RedirectResponse(config["observability"]["prometheus"]["endpoint"])
|
||||
|
||||
|
||||
@app.post("/crawl")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("crawl")
|
||||
async def crawl(
|
||||
request: Request,
|
||||
crawl_request: CrawlRequest,
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Crawl a list of URLs and return the results as JSON.
|
||||
"""
|
||||
if not crawl_request.urls:
|
||||
raise HTTPException(status_code=400, detail="At least one URL required")
|
||||
|
||||
results = await handle_crawl_request(
|
||||
raise HTTPException(400, "At least one URL required")
|
||||
res = await handle_crawl_request(
|
||||
urls=crawl_request.urls,
|
||||
browser_config=crawl_request.browser_config,
|
||||
crawler_config=crawl_request.crawler_config,
|
||||
config=config
|
||||
config=config,
|
||||
)
|
||||
|
||||
return JSONResponse(results)
|
||||
return JSONResponse(res)
|
||||
|
||||
|
||||
@app.post("/crawl/stream")
|
||||
@@ -152,24 +479,161 @@ async def crawl(
|
||||
async def crawl_stream(
|
||||
request: Request,
|
||||
crawl_request: CrawlRequest,
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
if not crawl_request.urls:
|
||||
raise HTTPException(status_code=400, detail="At least one URL required")
|
||||
|
||||
crawler, results_gen = await handle_stream_crawl_request(
|
||||
raise HTTPException(400, "At least one URL required")
|
||||
crawler, gen = await handle_stream_crawl_request(
|
||||
urls=crawl_request.urls,
|
||||
browser_config=crawl_request.browser_config,
|
||||
crawler_config=crawl_request.crawler_config,
|
||||
config=config
|
||||
config=config,
|
||||
)
|
||||
|
||||
return StreamingResponse(
|
||||
stream_results(crawler, results_gen),
|
||||
media_type='application/x-ndjson',
|
||||
headers={'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'X-Stream-Status': 'active'}
|
||||
stream_results(crawler, gen),
|
||||
media_type="application/x-ndjson",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Stream-Status": "active",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def chunk_code_functions(code_md: str) -> List[str]:
|
||||
"""Extract each function/class from markdown code blocks per file."""
|
||||
pattern = re.compile(
|
||||
# match "## File: <path>" then a ```py fence, then capture until the closing ```
|
||||
r'##\s*File:\s*(?P<path>.+?)\s*?\r?\n' # file header
|
||||
r'```py\s*?\r?\n' # opening fence
|
||||
r'(?P<code>.*?)(?=\r?\n```)', # code block
|
||||
re.DOTALL
|
||||
)
|
||||
chunks: List[str] = []
|
||||
for m in pattern.finditer(code_md):
|
||||
file_path = m.group("path").strip()
|
||||
code_blk = m.group("code")
|
||||
tree = ast.parse(code_blk)
|
||||
lines = code_blk.splitlines()
|
||||
for node in tree.body:
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
start = node.lineno - 1
|
||||
end = getattr(node, "end_lineno", start + 1)
|
||||
snippet = "\n".join(lines[start:end])
|
||||
chunks.append(f"# File: {file_path}\n{snippet}")
|
||||
return chunks
|
||||
|
||||
|
||||
def chunk_doc_sections(doc: str) -> List[str]:
|
||||
lines = doc.splitlines(keepends=True)
|
||||
sections = []
|
||||
current: List[str] = []
|
||||
for line in lines:
|
||||
if re.match(r"^#{1,6}\s", line):
|
||||
if current:
|
||||
sections.append("".join(current))
|
||||
current = [line]
|
||||
else:
|
||||
current.append(line)
|
||||
if current:
|
||||
sections.append("".join(current))
|
||||
return sections
|
||||
|
||||
|
||||
@app.get("/ask")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("ask")
|
||||
async def get_context(
|
||||
request: Request,
|
||||
_td: Dict = Depends(token_dep),
|
||||
context_type: str = Query("all", regex="^(code|doc|all)$"),
|
||||
query: Optional[str] = Query(
|
||||
None, description="search query to filter chunks"),
|
||||
score_ratio: float = Query(
|
||||
0.5, ge=0.0, le=1.0, description="min score as fraction of max_score"),
|
||||
max_results: int = Query(
|
||||
20, ge=1, description="absolute cap on returned chunks"),
|
||||
):
|
||||
"""
|
||||
This end point is design for any questions about Crawl4ai library. It returns a plain text markdown with extensive information about Crawl4ai.
|
||||
You can use this as a context for any AI assistant. Use this endpoint for AI assistants to retrieve library context for decision making or code generation tasks.
|
||||
Alway is BEST practice you provide a query to filter the context. Otherwise the lenght of the response will be very long.
|
||||
|
||||
Parameters:
|
||||
- context_type: Specify "code" for code context, "doc" for documentation context, or "all" for both.
|
||||
- query: RECOMMENDED search query to filter paragraphs using BM25. You can leave this empty to get all the context.
|
||||
- score_ratio: Minimum score as a fraction of the maximum score for filtering results.
|
||||
- max_results: Maximum number of results to return. Default is 20.
|
||||
|
||||
Returns:
|
||||
- JSON response with the requested context.
|
||||
- If "code" is specified, returns the code context.
|
||||
- If "doc" is specified, returns the documentation context.
|
||||
- If "all" is specified, returns both code and documentation contexts.
|
||||
"""
|
||||
# load contexts
|
||||
base = os.path.dirname(__file__)
|
||||
code_path = os.path.join(base, "c4ai-code-context.md")
|
||||
doc_path = os.path.join(base, "c4ai-doc-context.md")
|
||||
if not os.path.exists(code_path) or not os.path.exists(doc_path):
|
||||
raise HTTPException(404, "Context files not found")
|
||||
|
||||
with open(code_path, "r") as f:
|
||||
code_content = f.read()
|
||||
with open(doc_path, "r") as f:
|
||||
doc_content = f.read()
|
||||
|
||||
# if no query, just return raw contexts
|
||||
if not query:
|
||||
if context_type == "code":
|
||||
return JSONResponse({"code_context": code_content})
|
||||
if context_type == "doc":
|
||||
return JSONResponse({"doc_context": doc_content})
|
||||
return JSONResponse({
|
||||
"code_context": code_content,
|
||||
"doc_context": doc_content,
|
||||
})
|
||||
|
||||
tokens = query.split()
|
||||
results: Dict[str, List[Dict[str, float]]] = {}
|
||||
|
||||
# code BM25 over functions/classes
|
||||
if context_type in ("code", "all"):
|
||||
code_chunks = chunk_code_functions(code_content)
|
||||
bm25 = BM25Okapi([c.split() for c in code_chunks])
|
||||
scores = bm25.get_scores(tokens)
|
||||
max_sc = float(scores.max()) if scores.size > 0 else 0.0
|
||||
cutoff = max_sc * score_ratio
|
||||
picked = [(c, s) for c, s in zip(code_chunks, scores) if s >= cutoff]
|
||||
picked = sorted(picked, key=lambda x: x[1], reverse=True)[:max_results]
|
||||
results["code_results"] = [{"text": c, "score": s} for c, s in picked]
|
||||
|
||||
# doc BM25 over markdown sections
|
||||
if context_type in ("doc", "all"):
|
||||
sections = chunk_doc_sections(doc_content)
|
||||
bm25d = BM25Okapi([sec.split() for sec in sections])
|
||||
scores_d = bm25d.get_scores(tokens)
|
||||
max_sd = float(scores_d.max()) if scores_d.size > 0 else 0.0
|
||||
cutoff_d = max_sd * score_ratio
|
||||
idxs = [i for i, s in enumerate(scores_d) if s >= cutoff_d]
|
||||
neighbors = set(i for idx in idxs for i in (idx-1, idx, idx+1))
|
||||
valid = [i for i in sorted(neighbors) if 0 <= i < len(sections)]
|
||||
valid = valid[:max_results]
|
||||
results["doc_results"] = [
|
||||
{"text": sections[i], "score": scores_d[i]} for i in valid
|
||||
]
|
||||
|
||||
return JSONResponse(results)
|
||||
|
||||
|
||||
# attach MCP layer (adds /mcp/ws, /mcp/sse, /mcp/schema)
|
||||
print(f"MCP server running on {config['app']['host']}:{config['app']['port']}")
|
||||
attach_mcp(
|
||||
app,
|
||||
base_url=f"http://{config['app']['host']}:{config['app']['port']}"
|
||||
)
|
||||
|
||||
# ────────────────────────── cli ──────────────────────────────
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(
|
||||
@@ -177,5 +641,6 @@ if __name__ == "__main__":
|
||||
host=config["app"]["host"],
|
||||
port=config["app"]["port"],
|
||||
reload=config["app"]["reload"],
|
||||
timeout_keep_alive=config["app"]["timeout_keep_alive"]
|
||||
)
|
||||
timeout_keep_alive=config["app"]["timeout_keep_alive"],
|
||||
)
|
||||
# ─────────────────────────────────────────────────────────────
|
||||
|
||||
955
deploy/docker/static/playground/index.html
Normal file
955
deploy/docker/static/playground/index.html
Normal file
@@ -0,0 +1,955 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Crawl4AI Playground</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<script>
|
||||
tailwind.config = {
|
||||
theme: {
|
||||
extend: {
|
||||
colors: {
|
||||
primary: '#4EFFFF',
|
||||
primarydim: '#09b5a5',
|
||||
accent: '#F380F5',
|
||||
dark: '#070708',
|
||||
light: '#E8E9ED',
|
||||
secondary: '#D5CEBF',
|
||||
codebg: '#1E1E1E',
|
||||
surface: '#202020',
|
||||
border: '#3F3F44',
|
||||
},
|
||||
fontFamily: {
|
||||
mono: ['Fira Code', 'monospace'],
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Fira+Code:wght@400;500&display=swap" rel="stylesheet">
|
||||
<!-- Highlight.js -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/github-dark.min.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/highlight.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.11/clipboard.min.js"></script>
|
||||
<!-- CodeMirror (python mode) -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/codemirror.min.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/codemirror.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/mode/python/python.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/addon/edit/matchbrackets.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/addon/selection/active-line.min.js"></script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/theme/darcula.min.css">
|
||||
<!-- <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/python.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/bash.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/json.min.js"></script> -->
|
||||
<style>
|
||||
/* Custom CodeMirror styling to match theme */
|
||||
.CodeMirror {
|
||||
background-color: #1E1E1E !important;
|
||||
color: #E8E9ED !important;
|
||||
border-radius: 4px;
|
||||
font-family: 'Fira Code', monospace;
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
||||
.CodeMirror-gutters {
|
||||
background-color: #1E1E1E !important;
|
||||
border-right: 1px solid #3F3F44 !important;
|
||||
}
|
||||
|
||||
.CodeMirror-linenumber {
|
||||
color: #3F3F44 !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-keyword {
|
||||
color: #4EFFFF !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-string {
|
||||
color: #F380F5 !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-number {
|
||||
color: #D5CEBF !important;
|
||||
}
|
||||
|
||||
/* Add to your <style> section or Tailwind config */
|
||||
.hljs {
|
||||
background: #1E1E1E !important;
|
||||
border-radius: 4px;
|
||||
padding: 1rem !important;
|
||||
}
|
||||
|
||||
pre code.hljs {
|
||||
display: block;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
/* Language-specific colors */
|
||||
.hljs-attr {
|
||||
color: #4EFFFF;
|
||||
}
|
||||
|
||||
/* JSON keys */
|
||||
.hljs-string {
|
||||
color: #F380F5;
|
||||
}
|
||||
|
||||
/* Strings */
|
||||
.hljs-number {
|
||||
color: #D5CEBF;
|
||||
}
|
||||
|
||||
/* Numbers */
|
||||
.hljs-keyword {
|
||||
color: #4EFFFF;
|
||||
}
|
||||
|
||||
pre code {
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.copy-btn {
|
||||
transition: all 0.2s ease;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.copy-btn:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.tab-content:hover .copy-btn {
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.tab-content:hover .copy-btn:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* copid text highlighted */
|
||||
.highlighted {
|
||||
background-color: rgba(78, 255, 255, 0.2) !important;
|
||||
transition: background-color 0.5s ease;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body class="bg-dark text-light font-mono min-h-screen flex flex-col" style="font-feature-settings: 'calt' 0;">
|
||||
<!-- Header -->
|
||||
<header class="border-b border-border px-4 py-2 flex items-center">
|
||||
<h1 class="text-lg font-medium flex items-center space-x-4">
|
||||
<span>🚀🤖 <span class="text-primary">Crawl4AI</span> Playground</span>
|
||||
|
||||
<!-- GitHub badges -->
|
||||
<a href="https://github.com/unclecode/crawl4ai" target="_blank" class="flex space-x-1">
|
||||
<img src="https://img.shields.io/github/stars/unclecode/crawl4ai?style=social"
|
||||
alt="GitHub stars" class="h-5">
|
||||
<img src="https://img.shields.io/github/forks/unclecode/crawl4ai?style=social"
|
||||
alt="GitHub forks" class="h-5">
|
||||
</a>
|
||||
|
||||
<!-- Docs -->
|
||||
<a href="https://docs.crawl4ai.com" target="_blank"
|
||||
class="text-xs text-secondary hover:text-primary underline flex items-center">
|
||||
Docs
|
||||
</a>
|
||||
|
||||
<!-- X (Twitter) follow -->
|
||||
<a href="https://x.com/unclecode" target="_blank"
|
||||
class="hover:text-primary flex items-center" title="Follow @unclecode on X">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"
|
||||
class="w-4 h-4 fill-current mr-1">
|
||||
<path d="M22.46 6c-.77.35-1.6.58-2.46.69a4.27 4.27 0 001.88-2.35 8.53 8.53 0 01-2.71 1.04 4.24 4.24 0 00-7.23 3.87A12.05 12.05 0 013 4.62a4.24 4.24 0 001.31 5.65 4.2 4.2 0 01-1.92-.53v.05a4.24 4.24 0 003.4 4.16 4.31 4.31 0 01-1.91.07 4.25 4.25 0 003.96 2.95A8.5 8.5 0 012 19.55a12.04 12.04 0 006.53 1.92c7.84 0 12.13-6.49 12.13-12.13 0-.18-.01-.36-.02-.54A8.63 8.63 0 0024 5.1a8.45 8.45 0 01-2.54.7z"/>
|
||||
</svg>
|
||||
<span class="text-xs">@unclecode</span>
|
||||
</a>
|
||||
</h1>
|
||||
|
||||
<div class="ml-auto flex space-x-2">
|
||||
<button id="play-tab"
|
||||
class="px-3 py-1 rounded-t bg-surface border border-b-0 border-border text-primary">Playground</button>
|
||||
<button id="stress-tab" class="px-3 py-1 rounded-t border border-border hover:bg-surface">Stress
|
||||
Test</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Main Playground -->
|
||||
<main id="playground" class="flex-1 flex flex-col p-4 space-y-4 max-w-5xl w-full mx-auto">
|
||||
<!-- Request Builder -->
|
||||
<section class="bg-surface rounded-lg border border-border overflow-hidden">
|
||||
<div class="px-4 py-2 border-b border-border flex items-center">
|
||||
<h2 class="font-medium">Request Builder</h2>
|
||||
<select id="endpoint" class="ml-auto bg-dark border border-border rounded px-2 py-1 text-sm">
|
||||
<option value="crawl">/crawl (batch)</option>
|
||||
<option value="crawl_stream">/crawl/stream</option>
|
||||
<option value="md">/md</option>
|
||||
<option value="llm">/llm</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="p-4">
|
||||
<label class="block mb-2 text-sm">URL(s) - one per line</label>
|
||||
<textarea id="urls" class="w-full bg-dark border border-border rounded p-2 h-32 text-sm mb-4"
|
||||
spellcheck="false">https://example.com</textarea>
|
||||
|
||||
<!-- Specific options for /md endpoint -->
|
||||
<details id="md-options" class="mb-4 hidden">
|
||||
<summary class="text-sm text-secondary cursor-pointer">/md Options</summary>
|
||||
<div class="mt-2 space-y-3 p-2 border border-border rounded">
|
||||
<div>
|
||||
<label for="md-filter" class="block text-xs text-secondary mb-1">Filter Type</label>
|
||||
<select id="md-filter" class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
<option value="fit">fit - Adaptive content filtering</option>
|
||||
<option value="raw">raw - No filtering</option>
|
||||
<option value="bm25">bm25 - BM25 keyword relevance</option>
|
||||
<option value="llm">llm - LLM-based filtering</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label for="md-query" class="block text-xs text-secondary mb-1">Query (for BM25/LLM filters)</label>
|
||||
<input id="md-query" type="text" placeholder="Enter search terms or instructions"
|
||||
class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
</div>
|
||||
<div>
|
||||
<label for="md-cache" class="block text-xs text-secondary mb-1">Cache Mode</label>
|
||||
<select id="md-cache" class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
<option value="0">Write-Only (0)</option>
|
||||
<option value="1">Enabled (1)</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
|
||||
<!-- Specific options for /llm endpoint -->
|
||||
<details id="llm-options" class="mb-4 hidden">
|
||||
<summary class="text-sm text-secondary cursor-pointer">/llm Options</summary>
|
||||
<div class="mt-2 space-y-3 p-2 border border-border rounded">
|
||||
<div>
|
||||
<label for="llm-question" class="block text-xs text-secondary mb-1">Question</label>
|
||||
<input id="llm-question" type="text" value="What is this page about?"
|
||||
class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
|
||||
<!-- Advanced config for /crawl endpoints -->
|
||||
<details id="adv-config" class="mb-4">
|
||||
<summary class="text-sm text-secondary cursor-pointer">Advanced Config <span
|
||||
class="text-xs text-primary">(Python → auto‑JSON)</span></summary>
|
||||
|
||||
<!-- Toolbar -->
|
||||
<div class="flex items-center justify-end space-x-3 mt-2">
|
||||
<label for="cfg-type" class="text-xs text-secondary">Type:</label>
|
||||
<select id="cfg-type"
|
||||
class="bg-dark border border-border rounded px-1 py-0.5 text-xs">
|
||||
<option value="CrawlerRunConfig">CrawlerRunConfig</option>
|
||||
<option value="BrowserConfig">BrowserConfig</option>
|
||||
</select>
|
||||
|
||||
<!-- help link -->
|
||||
<a href="https://docs.crawl4ai.com/api/parameters/"
|
||||
target="_blank"
|
||||
class="text-xs text-primary hover:underline flex items-center space-x-1"
|
||||
title="Open parameter reference in new tab">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"
|
||||
class="w-4 h-4 fill-current">
|
||||
<path d="M13 3h8v8h-2V6.41l-9.29 9.3-1.42-1.42 9.3-9.29H13V3z"/>
|
||||
<path d="M5 5h4V3H3v6h2V5zm0 14v-4H3v6h6v-2H5z"/>
|
||||
</svg>
|
||||
<span>Docs</span>
|
||||
</a>
|
||||
|
||||
<span id="cfg-status" class="text-xs text-secondary ml-2"></span>
|
||||
</div>
|
||||
|
||||
<!-- CodeMirror host -->
|
||||
<div id="adv-editor" class="mt-2 border border-border rounded overflow-hidden h-40"></div>
|
||||
</details>
|
||||
|
||||
<div class="flex space-x-2">
|
||||
<button id="run-btn" class="bg-primary text-dark px-4 py-2 rounded hover:bg-primarydim font-medium">
|
||||
Run (⌘/Ctrl+Enter)
|
||||
</button>
|
||||
<button id="export-btn" class="border border-border px-4 py-2 rounded hover:bg-surface hidden">
|
||||
Export Python Code
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Execution Status -->
|
||||
<section id="execution-status" class="hidden bg-surface rounded-lg border border-border p-3 text-sm">
|
||||
<div class="flex space-x-4">
|
||||
<div id="status-badge" class="flex items-center">
|
||||
<span class="w-3 h-3 rounded-full mr-2"></span>
|
||||
<span>Ready</span>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-secondary">Time:</span>
|
||||
<span id="exec-time" class="text-light">-</span>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-secondary">Memory:</span>
|
||||
<span id="exec-mem" class="text-light">-</span>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Response Viewer -->
|
||||
<!-- Update the Response Viewer section -->
|
||||
<section class="bg-surface rounded-lg border border-border overflow-hidden flex-1 flex flex-col">
|
||||
<div class="border-b border-border flex">
|
||||
<button data-tab="response" class="tab-btn active px-4 py-2 border-r border-border">Response</button>
|
||||
<button data-tab="python" class="tab-btn px-4 py-2 border-r border-border">Python</button>
|
||||
<button data-tab="curl" class="tab-btn px-4 py-2">cURL</button>
|
||||
</div>
|
||||
<div class="flex-1 overflow-auto relative">
|
||||
<!-- Response Tab -->
|
||||
<div class="tab-content active h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#response-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="response-content" class="p-4 text-sm h-full"><code class="json hljs">{}</code></pre>
|
||||
</div>
|
||||
|
||||
<!-- Python Tab -->
|
||||
<div class="tab-content hidden h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#python-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="python-content" class="p-4 text-sm h-full"><code class="python hljs"></code></pre>
|
||||
</div>
|
||||
|
||||
<!-- cURL Tab -->
|
||||
<div class="tab-content hidden h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#curl-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="curl-content" class="p-4 text-sm h-full"><code class="bash hljs"></code></pre>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<!-- Stress Test Modal -->
|
||||
<div id="stress-modal"
|
||||
class="hidden fixed inset-0 bg-black bg-opacity-70 z-50 flex items-center justify-center p-4">
|
||||
<div class="bg-surface rounded-lg border border-accent w-full max-w-3xl max-h-[90vh] flex flex-col">
|
||||
<div class="px-4 py-2 border-b border-border flex items-center">
|
||||
<h2 class="font-medium text-accent">🔥 Stress Test</h2>
|
||||
<button id="close-stress" class="ml-auto text-secondary hover:text-light">×</button>
|
||||
</div>
|
||||
|
||||
<div class="p-4 space-y-4 flex-1 overflow-auto">
|
||||
<div class="grid grid-cols-3 gap-4">
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Total URLs</label>
|
||||
<input id="st-total" type="number" value="20"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Chunk Size</label>
|
||||
<input id="st-chunk" type="number" value="5"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Concurrency</label>
|
||||
<input id="st-conc" type="number" value="2"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex items-center">
|
||||
<input id="st-stream" type="checkbox" class="mr-2">
|
||||
<label for="st-stream" class="text-sm">Use /crawl/stream</label>
|
||||
<button id="st-run"
|
||||
class="ml-auto bg-accent text-dark px-4 py-2 rounded hover:bg-opacity-90 font-medium">
|
||||
Run Stress Test
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div class="mt-4">
|
||||
<div class="bg-dark rounded border border-border p-3 h-64 overflow-auto text-sm whitespace-break-spaces"
|
||||
id="stress-log"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="px-4 py-2 border-t border-border text-sm text-secondary">
|
||||
<div class="flex justify-between">
|
||||
<span>Completed: <span id="stress-completed">0</span>/<span id="stress-total">0</span></span>
|
||||
<span>Avg. Time: <span id="stress-avg-time">0</span>ms</span>
|
||||
<span>Peak Memory: <span id="stress-peak-mem">0</span>MB</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Tab switching
|
||||
document.querySelectorAll('.tab-btn').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
|
||||
document.querySelectorAll('.tab-content').forEach(c => c.classList.add('hidden'));
|
||||
|
||||
btn.classList.add('active');
|
||||
const tabName = btn.dataset.tab;
|
||||
document.querySelector(`#${tabName}-content`).parentElement.classList.remove('hidden');
|
||||
|
||||
// Re-highlight content when switching tabs
|
||||
const activeCode = document.querySelector(`#${tabName}-content code`);
|
||||
if (activeCode) {
|
||||
forceHighlightElement(activeCode);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// View switching
|
||||
document.getElementById('play-tab').addEventListener('click', () => {
|
||||
document.getElementById('playground').classList.remove('hidden');
|
||||
document.getElementById('stress-modal').classList.add('hidden');
|
||||
document.getElementById('play-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('stress-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
document.getElementById('stress-tab').addEventListener('click', () => {
|
||||
document.getElementById('stress-modal').classList.remove('hidden');
|
||||
document.getElementById('stress-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('play-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
document.getElementById('close-stress').addEventListener('click', () => {
|
||||
document.getElementById('stress-modal').classList.add('hidden');
|
||||
document.getElementById('play-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('stress-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
// Initialize clipboard and highlight.js
|
||||
new ClipboardJS('#export-btn');
|
||||
hljs.highlightAll();
|
||||
|
||||
// Keyboard shortcut
|
||||
window.addEventListener('keydown', e => {
|
||||
if ((e.ctrlKey || e.metaKey) && e.key === 'Enter') {
|
||||
document.getElementById('run-btn').click();
|
||||
}
|
||||
});
|
||||
|
||||
// ================ ADVANCED CONFIG EDITOR ================
|
||||
const cm = CodeMirror(document.getElementById('adv-editor'), {
|
||||
value: `CrawlerRunConfig(
|
||||
stream=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)`,
|
||||
mode: 'python',
|
||||
lineNumbers: true,
|
||||
theme: 'darcula',
|
||||
tabSize: 4,
|
||||
styleActiveLine: true,
|
||||
matchBrackets: true,
|
||||
gutters: ["CodeMirror-linenumbers"],
|
||||
lineWrapping: true,
|
||||
});
|
||||
|
||||
const TEMPLATES = {
|
||||
CrawlerRunConfig: `CrawlerRunConfig(
|
||||
stream=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)`,
|
||||
BrowserConfig: `BrowserConfig(
|
||||
headless=True,
|
||||
extra_args=[
|
||||
"--no-sandbox",
|
||||
"--disable-gpu",
|
||||
],
|
||||
)`,
|
||||
};
|
||||
|
||||
document.getElementById('cfg-type').addEventListener('change', (e) => {
|
||||
cm.setValue(TEMPLATES[e.target.value]);
|
||||
document.getElementById('cfg-status').textContent = '';
|
||||
});
|
||||
|
||||
// Handle endpoint selection change to show appropriate options
|
||||
document.getElementById('endpoint').addEventListener('change', function(e) {
|
||||
const endpoint = e.target.value;
|
||||
const mdOptions = document.getElementById('md-options');
|
||||
const llmOptions = document.getElementById('llm-options');
|
||||
const advConfig = document.getElementById('adv-config');
|
||||
|
||||
// Hide all option sections first
|
||||
mdOptions.classList.add('hidden');
|
||||
llmOptions.classList.add('hidden');
|
||||
advConfig.classList.add('hidden');
|
||||
|
||||
// Show the appropriate section based on endpoint
|
||||
if (endpoint === 'md') {
|
||||
mdOptions.classList.remove('hidden');
|
||||
// Auto-open the /md options
|
||||
mdOptions.setAttribute('open', '');
|
||||
} else if (endpoint === 'llm') {
|
||||
llmOptions.classList.remove('hidden');
|
||||
// Auto-open the /llm options
|
||||
llmOptions.setAttribute('open', '');
|
||||
} else {
|
||||
// For /crawl endpoints, show the advanced config
|
||||
advConfig.classList.remove('hidden');
|
||||
}
|
||||
});
|
||||
|
||||
async function pyConfigToJson() {
|
||||
const code = cm.getValue().trim();
|
||||
if (!code) return {};
|
||||
|
||||
const res = await fetch('/config/dump', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ code }),
|
||||
});
|
||||
|
||||
const statusEl = document.getElementById('cfg-status');
|
||||
if (!res.ok) {
|
||||
const msg = await res.text();
|
||||
statusEl.textContent = '✖ config error';
|
||||
statusEl.className = 'text-xs text-red-400';
|
||||
throw new Error(msg || 'Invalid config');
|
||||
}
|
||||
|
||||
statusEl.textContent = '✓ parsed';
|
||||
statusEl.className = 'text-xs text-green-400';
|
||||
|
||||
return await res.json();
|
||||
}
|
||||
|
||||
// ================ SERVER COMMUNICATION ================
|
||||
|
||||
// Update status UI
|
||||
function updateStatus(status, time, memory, peakMemory) {
|
||||
const statusEl = document.getElementById('execution-status');
|
||||
const badgeEl = document.querySelector('#status-badge span:first-child');
|
||||
const textEl = document.querySelector('#status-badge span:last-child');
|
||||
|
||||
statusEl.classList.remove('hidden');
|
||||
badgeEl.className = 'w-3 h-3 rounded-full mr-2';
|
||||
|
||||
if (status === 'success') {
|
||||
badgeEl.classList.add('bg-green-500');
|
||||
textEl.textContent = 'Success';
|
||||
} else if (status === 'error') {
|
||||
badgeEl.classList.add('bg-red-500');
|
||||
textEl.textContent = 'Error';
|
||||
} else {
|
||||
badgeEl.classList.add('bg-yellow-500');
|
||||
textEl.textContent = 'Processing...';
|
||||
}
|
||||
|
||||
if (time) {
|
||||
document.getElementById('exec-time').textContent = `${time}ms`;
|
||||
}
|
||||
|
||||
if (memory !== undefined && peakMemory !== undefined) {
|
||||
document.getElementById('exec-mem').textContent = `Δ${memory >= 0 ? '+' : ''}${memory}MB (Peak: ${peakMemory}MB)`;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate code snippets
|
||||
function generateSnippets(api, payload, method = 'POST') {
|
||||
// Python snippet
|
||||
const pyCodeEl = document.querySelector('#python-content code');
|
||||
let pySnippet;
|
||||
|
||||
if (method === 'GET') {
|
||||
// GET request (for /llm endpoint)
|
||||
pySnippet = `import httpx\n\nasync def crawl():\n async with httpx.AsyncClient() as client:\n response = await client.get(\n "${window.location.origin}${api}"\n )\n return response.json()`;
|
||||
} else {
|
||||
// POST request (for /crawl and /md endpoints)
|
||||
pySnippet = `import httpx\n\nasync def crawl():\n async with httpx.AsyncClient() as client:\n response = await client.post(\n "${window.location.origin}${api}",\n json=${JSON.stringify(payload, null, 4).replace(/\n/g, '\n ')}\n )\n return response.json()`;
|
||||
}
|
||||
|
||||
pyCodeEl.textContent = pySnippet;
|
||||
pyCodeEl.className = 'python hljs'; // Reset classes
|
||||
forceHighlightElement(pyCodeEl);
|
||||
|
||||
// cURL snippet
|
||||
const curlCodeEl = document.querySelector('#curl-content code');
|
||||
let curlSnippet;
|
||||
|
||||
if (method === 'GET') {
|
||||
// GET request (for /llm endpoint)
|
||||
curlSnippet = `curl -X GET "${window.location.origin}${api}"`;
|
||||
} else {
|
||||
// POST request (for /crawl and /md endpoints)
|
||||
curlSnippet = `curl -X POST ${window.location.origin}${api} \\\n -H "Content-Type: application/json" \\\n -d '${JSON.stringify(payload)}'`;
|
||||
}
|
||||
|
||||
curlCodeEl.textContent = curlSnippet;
|
||||
curlCodeEl.className = 'bash hljs'; // Reset classes
|
||||
forceHighlightElement(curlCodeEl);
|
||||
}
|
||||
|
||||
// Main run function
|
||||
async function runCrawl() {
|
||||
const endpoint = document.getElementById('endpoint').value;
|
||||
const urls = document.getElementById('urls').value.trim().split(/\n/).filter(u => u);
|
||||
// 1) grab python from CodeMirror, validate via /config/dump
|
||||
let advConfig = {};
|
||||
try {
|
||||
const cfgJson = await pyConfigToJson(); // may throw
|
||||
if (Object.keys(cfgJson).length) {
|
||||
const cfgType = document.getElementById('cfg-type').value;
|
||||
advConfig = cfgType === 'CrawlerRunConfig'
|
||||
? { crawler_config: cfgJson }
|
||||
: { browser_config: cfgJson };
|
||||
}
|
||||
} catch (err) {
|
||||
updateStatus('error');
|
||||
document.querySelector('#response-content code').textContent =
|
||||
JSON.stringify({ error: err.message }, null, 2);
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
return; // stop run
|
||||
}
|
||||
|
||||
const endpointMap = {
|
||||
crawl: '/crawl',
|
||||
// crawl_stream: '/crawl/stream',
|
||||
md: '/md',
|
||||
llm: '/llm'
|
||||
};
|
||||
|
||||
const api = endpointMap[endpoint];
|
||||
let payload;
|
||||
|
||||
// Create appropriate payload based on endpoint type
|
||||
if (endpoint === 'md') {
|
||||
// Get values from the /md specific inputs
|
||||
const filterType = document.getElementById('md-filter').value;
|
||||
const query = document.getElementById('md-query').value.trim();
|
||||
const cache = document.getElementById('md-cache').value;
|
||||
|
||||
// MD endpoint expects: { url, f, q, c }
|
||||
payload = {
|
||||
url: urls[0], // Take first URL
|
||||
f: filterType, // Lowercase filter type as required by server
|
||||
q: query || null, // Use the query if provided, otherwise null
|
||||
c: cache
|
||||
};
|
||||
} else if (endpoint === 'llm') {
|
||||
// LLM endpoint has a different URL pattern and uses query params
|
||||
// This will be handled directly in the fetch below
|
||||
payload = null;
|
||||
} else {
|
||||
// Default payload for /crawl and /crawl/stream
|
||||
payload = {
|
||||
urls,
|
||||
...advConfig
|
||||
};
|
||||
}
|
||||
|
||||
updateStatus('processing');
|
||||
|
||||
try {
|
||||
const startTime = performance.now();
|
||||
let response, responseData;
|
||||
|
||||
if (endpoint === 'llm') {
|
||||
// Special handling for LLM endpoint which uses URL pattern: /llm/{encoded_url}?q={query}
|
||||
const url = urls[0];
|
||||
const encodedUrl = encodeURIComponent(url);
|
||||
// Get the question from the LLM-specific input
|
||||
const question = document.getElementById('llm-question').value.trim() || "What is this page about?";
|
||||
|
||||
response = await fetch(`${api}/${encodedUrl}?q=${encodeURIComponent(question)}`, {
|
||||
method: 'GET',
|
||||
headers: { 'Accept': 'application/json' }
|
||||
});
|
||||
} else if (endpoint === 'crawl_stream') {
|
||||
// Stream processing
|
||||
response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const reader = response.body.getReader();
|
||||
let text = '';
|
||||
let maxMemory = 0;
|
||||
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = new TextDecoder().decode(value);
|
||||
text += chunk;
|
||||
|
||||
// Process each line for memory updates
|
||||
chunk.trim().split('\n').forEach(line => {
|
||||
if (!line) return;
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
if (obj.server_memory_mb) {
|
||||
maxMemory = Math.max(maxMemory, obj.server_memory_mb);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Error parsing stream line:', e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
responseData = { stream: text };
|
||||
const time = Math.round(performance.now() - startTime);
|
||||
updateStatus('success', time, null, maxMemory);
|
||||
document.querySelector('#response-content code').textContent = text;
|
||||
document.querySelector('#response-content code').className = 'json hljs'; // Reset classes
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
} else {
|
||||
// Regular request (handles /crawl and /md)
|
||||
response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
responseData = await response.json();
|
||||
const time = Math.round(performance.now() - startTime);
|
||||
|
||||
if (!response.ok) {
|
||||
updateStatus('error', time);
|
||||
throw new Error(responseData.error || 'Request failed');
|
||||
}
|
||||
|
||||
updateStatus(
|
||||
'success',
|
||||
time,
|
||||
responseData.server_memory_delta_mb,
|
||||
responseData.server_peak_memory_mb
|
||||
);
|
||||
|
||||
document.querySelector('#response-content code').textContent = JSON.stringify(responseData, null, 2);
|
||||
document.querySelector('#response-content code').className = 'json hljs'; // Ensure class is set
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
}
|
||||
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
|
||||
// For generateSnippets, handle the LLM case specially
|
||||
if (endpoint === 'llm') {
|
||||
const url = urls[0];
|
||||
const encodedUrl = encodeURIComponent(url);
|
||||
const question = document.getElementById('llm-question').value.trim() || "What is this page about?";
|
||||
generateSnippets(`${api}/${encodedUrl}?q=${encodeURIComponent(question)}`, null, 'GET');
|
||||
} else {
|
||||
generateSnippets(api, payload);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
updateStatus('error');
|
||||
document.querySelector('#response-content code').textContent = JSON.stringify(
|
||||
{ error: error.message },
|
||||
null,
|
||||
2
|
||||
);
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
}
|
||||
}
|
||||
|
||||
// Stress test function
|
||||
async function runStressTest() {
|
||||
const total = parseInt(document.getElementById('st-total').value);
|
||||
const chunkSize = parseInt(document.getElementById('st-chunk').value);
|
||||
const concurrency = parseInt(document.getElementById('st-conc').value);
|
||||
const useStream = document.getElementById('st-stream').checked;
|
||||
|
||||
const logEl = document.getElementById('stress-log');
|
||||
logEl.textContent = '';
|
||||
|
||||
document.getElementById('stress-completed').textContent = '0';
|
||||
document.getElementById('stress-total').textContent = total;
|
||||
document.getElementById('stress-avg-time').textContent = '0';
|
||||
document.getElementById('stress-peak-mem').textContent = '0';
|
||||
|
||||
const api = useStream ? '/crawl/stream' : '/crawl';
|
||||
const urls = Array.from({ length: total }, (_, i) => `https://httpbin.org/anything/stress-${i}-${Date.now()}`);
|
||||
const chunks = [];
|
||||
|
||||
for (let i = 0; i < urls.length; i += chunkSize) {
|
||||
chunks.push(urls.slice(i, i + chunkSize));
|
||||
}
|
||||
|
||||
let completed = 0;
|
||||
let totalTime = 0;
|
||||
let peakMemory = 0;
|
||||
|
||||
const processBatch = async (batch, index) => {
|
||||
const payload = {
|
||||
urls: batch,
|
||||
browser_config: {},
|
||||
crawler_config: { cache_mode: 'BYPASS', stream: useStream }
|
||||
};
|
||||
|
||||
const start = performance.now();
|
||||
let time, memory;
|
||||
|
||||
try {
|
||||
if (useStream) {
|
||||
const response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const reader = response.body.getReader();
|
||||
let maxMem = 0;
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
const text = new TextDecoder().decode(value);
|
||||
text.split('\n').forEach(line => {
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
if (obj.server_memory_mb) {
|
||||
maxMem = Math.max(maxMem, obj.server_memory_mb);
|
||||
}
|
||||
} catch { }
|
||||
});
|
||||
}
|
||||
|
||||
memory = maxMem;
|
||||
} else {
|
||||
const response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
memory = data.server_peak_memory_mb;
|
||||
}
|
||||
|
||||
time = Math.round(performance.now() - start);
|
||||
peakMemory = Math.max(peakMemory, memory || 0);
|
||||
totalTime += time;
|
||||
|
||||
logEl.textContent += `[${index + 1}/${chunks.length}] ✔ ${time}ms | Peak ${memory}MB\n`;
|
||||
} catch (error) {
|
||||
time = Math.round(performance.now() - start);
|
||||
logEl.textContent += `[${index + 1}/${chunks.length}] ✖ ${time}ms | ${error.message}\n`;
|
||||
}
|
||||
|
||||
completed += batch.length;
|
||||
document.getElementById('stress-completed').textContent = completed;
|
||||
document.getElementById('stress-peak-mem').textContent = peakMemory;
|
||||
document.getElementById('stress-avg-time').textContent = Math.round(totalTime / (index + 1));
|
||||
|
||||
logEl.scrollTop = logEl.scrollHeight;
|
||||
};
|
||||
|
||||
// Run with concurrency control
|
||||
let active = 0;
|
||||
let index = 0;
|
||||
|
||||
return new Promise(resolve => {
|
||||
const runNext = () => {
|
||||
while (active < concurrency && index < chunks.length) {
|
||||
processBatch(chunks[index], index)
|
||||
.finally(() => {
|
||||
active--;
|
||||
runNext();
|
||||
});
|
||||
active++;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (active === 0 && index >= chunks.length) {
|
||||
logEl.textContent += '\n✅ Stress test completed\n';
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
|
||||
runNext();
|
||||
});
|
||||
}
|
||||
|
||||
// Event listeners
|
||||
document.getElementById('run-btn').addEventListener('click', runCrawl);
|
||||
document.getElementById('st-run').addEventListener('click', runStressTest);
|
||||
|
||||
function forceHighlightElement(element) {
|
||||
if (!element) return;
|
||||
|
||||
// Save current scroll position (important for large code blocks)
|
||||
const scrollTop = element.parentElement.scrollTop;
|
||||
|
||||
// Reset the element
|
||||
const text = element.textContent;
|
||||
element.innerHTML = text;
|
||||
element.removeAttribute('data-highlighted');
|
||||
|
||||
// Reapply highlighting
|
||||
hljs.highlightElement(element);
|
||||
|
||||
// Restore scroll position
|
||||
element.parentElement.scrollTop = scrollTop;
|
||||
}
|
||||
|
||||
// Initialize clipboard for all copy buttons
|
||||
function initCopyButtons() {
|
||||
document.querySelectorAll('.copy-btn').forEach(btn => {
|
||||
new ClipboardJS(btn, {
|
||||
text: () => {
|
||||
const target = document.querySelector(btn.dataset.target);
|
||||
return target ? target.textContent : '';
|
||||
}
|
||||
}).on('success', e => {
|
||||
e.clearSelection();
|
||||
// make button text "copied" for 1 second
|
||||
const originalText = e.trigger.textContent;
|
||||
e.trigger.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
e.trigger.textContent = originalText;
|
||||
}, 1000);
|
||||
// Highlight the copied code
|
||||
const target = document.querySelector(btn.dataset.target);
|
||||
if (target) {
|
||||
target.classList.add('highlighted');
|
||||
setTimeout(() => {
|
||||
target.classList.remove('highlighted');
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
}).on('error', e => {
|
||||
console.error('Error copying:', e);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Function to initialize UI based on selected endpoint
|
||||
function initUI() {
|
||||
// Trigger the endpoint change handler to set initial UI state
|
||||
const endpointSelect = document.getElementById('endpoint');
|
||||
const event = new Event('change');
|
||||
endpointSelect.dispatchEvent(event);
|
||||
|
||||
// Initialize copy buttons
|
||||
initCopyButtons();
|
||||
}
|
||||
|
||||
// Initialize on page load
|
||||
document.addEventListener('DOMContentLoaded', initUI);
|
||||
// Also call it immediately in case the script runs after DOM is already loaded
|
||||
if (document.readyState !== 'loading') {
|
||||
initUI();
|
||||
}
|
||||
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -1,12 +1,72 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
nodaemon=true ; Run supervisord in the foreground
|
||||
logfile=/dev/null ; Log supervisord output to stdout/stderr
|
||||
logfile_maxbytes=0
|
||||
|
||||
[program:redis]
|
||||
command=redis-server
|
||||
command=/usr/bin/redis-server --loglevel notice ; Path to redis-server on Alpine
|
||||
user=appuser ; Run redis as our non-root user
|
||||
autorestart=true
|
||||
priority=10
|
||||
stdout_logfile=/dev/stdout ; Redirect redis stdout to container stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr ; Redirect redis stderr to container stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:gunicorn]
|
||||
command=gunicorn --bind 0.0.0.0:8000 --workers 4 --threads 2 --timeout 300 --graceful-timeout 60 --keep-alive 65 --log-level debug --worker-class uvicorn.workers.UvicornWorker --max-requests 1000 --max-requests-jitter 50 server:app
|
||||
command=/usr/local/bin/gunicorn --bind 0.0.0.0:11235 --workers 1 --threads 4 --timeout 1800 --graceful-timeout 30 --keep-alive 300 --log-level info --worker-class uvicorn.workers.UvicornWorker server:app
|
||||
directory=/app ; Working directory for the app
|
||||
user=appuser ; Run gunicorn as our non-root user
|
||||
autorestart=true
|
||||
priority=20
|
||||
priority=20
|
||||
environment=PYTHONUNBUFFERED=1 ; Ensure Python output is sent straight to logs
|
||||
environment=DISPLAY=:99
|
||||
stdout_logfile=/dev/stdout ; Redirect gunicorn stdout to container stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr ; Redirect gunicorn stderr to container stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:xvfb]
|
||||
command=/usr/bin/Xvfb :99 -screen 0 1280x720x24
|
||||
user=appuser
|
||||
autorestart=true
|
||||
priority=5
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:fluxbox]
|
||||
command=/usr/bin/fluxbox
|
||||
user=appuser
|
||||
autorestart=true
|
||||
priority=6
|
||||
environment=DISPLAY=:99
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:x11vnc]
|
||||
command=/usr/bin/x11vnc -display :99 -nopw -forever -shared -rfbport 5900 -quiet
|
||||
user=appuser
|
||||
autorestart=true
|
||||
priority=7
|
||||
environment=DISPLAY=:99
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:websockify]
|
||||
command=/usr/bin/websockify 6080 localhost:5900 --web /opt/novnc
|
||||
user=appuser
|
||||
autorestart=true
|
||||
priority=8
|
||||
environment=DISPLAY=:99
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
# Optional: Add filebeat or other logging agents here if needed
|
||||
@@ -45,10 +45,10 @@ def datetime_handler(obj: any) -> Optional[str]:
|
||||
return obj.isoformat()
|
||||
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
|
||||
|
||||
def should_cleanup_task(created_at: str) -> bool:
|
||||
def should_cleanup_task(created_at: str, ttl_seconds: int = 3600) -> bool:
|
||||
"""Check if task should be cleaned up based on creation time."""
|
||||
created = datetime.fromisoformat(created_at)
|
||||
return (datetime.now() - created).total_seconds() > 3600
|
||||
return (datetime.now() - created).total_seconds() > ttl_seconds
|
||||
|
||||
def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]:
|
||||
"""Decode Redis hash data from bytes to strings."""
|
||||
|
||||
@@ -1,16 +1,21 @@
|
||||
# Base configuration (not a service, just a reusable config block)
|
||||
version: '3.8'
|
||||
|
||||
# Shared configuration for all environments
|
||||
x-base-config: &base-config
|
||||
ports:
|
||||
- "11235:11235"
|
||||
- "8000:8000"
|
||||
- "9222:9222"
|
||||
- "8080:8080"
|
||||
- "11235:11235" # Gunicorn port
|
||||
env_file:
|
||||
- .llm.env # API keys (create from .llm.env.example)
|
||||
environment:
|
||||
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-}
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- CLAUDE_API_KEY=${CLAUDE_API_KEY:-}
|
||||
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
||||
- TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
- GEMINI_API_TOKEN=${GEMINI_API_TOKEN:-}
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm
|
||||
- /dev/shm:/dev/shm # Chromium performance
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
@@ -24,42 +29,21 @@ x-base-config: &base-config
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
user: "appuser"
|
||||
|
||||
services:
|
||||
# Local build services for different platforms
|
||||
crawl4ai-amd64:
|
||||
crawl4ai:
|
||||
# 1. Default: Pull multi-platform test image from Docker Hub
|
||||
# 2. Override with local image via: IMAGE=local-test docker compose up
|
||||
image: ${IMAGE:-unclecode/crawl4ai:${TAG:-latest}}
|
||||
|
||||
# Local build config (used with --build)
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
PYTHON_VERSION: "3.10"
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-basic}
|
||||
ENABLE_GPU: false
|
||||
platforms:
|
||||
- linux/amd64
|
||||
profiles: ["local-amd64"]
|
||||
<<: *base-config # extends yerine doğrudan yapılandırmayı dahil ettik
|
||||
|
||||
crawl4ai-arm64:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
PYTHON_VERSION: "3.10"
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-basic}
|
||||
ENABLE_GPU: false
|
||||
platforms:
|
||||
- linux/arm64
|
||||
profiles: ["local-arm64"]
|
||||
<<: *base-config
|
||||
|
||||
# Hub services for different platforms and versions
|
||||
crawl4ai-hub-amd64:
|
||||
image: unclecode/crawl4ai:${VERSION:-basic}-amd64
|
||||
profiles: ["hub-amd64"]
|
||||
<<: *base-config
|
||||
|
||||
crawl4ai-hub-arm64:
|
||||
image: unclecode/crawl4ai:${VERSION:-basic}-arm64
|
||||
profiles: ["hub-arm64"]
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-default}
|
||||
ENABLE_GPU: ${ENABLE_GPU:-false}
|
||||
|
||||
# Inherit shared config
|
||||
<<: *base-config
|
||||
127
docs/apps/linkdin/README.md
Normal file
127
docs/apps/linkdin/README.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Crawl4AI Prospect‑Wizard – step‑by‑step guide
|
||||
|
||||
A three‑stage demo that goes from **LinkedIn scraping** ➜ **LLM reasoning** ➜ **graph visualisation**.
|
||||
|
||||
```
|
||||
prospect‑wizard/
|
||||
├─ c4ai_discover.py # Stage 1 – scrape companies + people
|
||||
├─ c4ai_insights.py # Stage 2 – embeddings, org‑charts, scores
|
||||
├─ graph_view_template.html # Stage 3 – graph viewer (static HTML)
|
||||
└─ data/ # output lands here (*.jsonl / *.json)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1 Install & boot a LinkedIn profile (one‑time)
|
||||
|
||||
### 1.1 Install dependencies
|
||||
```bash
|
||||
pip install crawl4ai litellm sentence-transformers pandas rich
|
||||
```
|
||||
|
||||
### 1.2 Create / warm a LinkedIn browser profile
|
||||
```bash
|
||||
crwl profiles
|
||||
```
|
||||
1. The interactive shell shows **New profile** – hit **enter**.
|
||||
2. Choose a name, e.g. `profile_linkedin_uc`.
|
||||
3. A Chromium window opens – log in to LinkedIn, solve whatever CAPTCHA, then close.
|
||||
|
||||
> Remember the **profile name**. All future runs take `--profile-name <your_name>`.
|
||||
|
||||
---
|
||||
|
||||
## 2 Discovery – scrape companies & people
|
||||
|
||||
```bash
|
||||
python c4ai_discover.py full \
|
||||
--query "health insurance management" \
|
||||
--geo 102713980 \ # Malaysia geoUrn
|
||||
--title-filters "" \ # or "Product,Engineering"
|
||||
--max-companies 10 \ # default set small for workshops
|
||||
--max-people 20 \ # \^ same
|
||||
--profile-name profile_linkedin_uc \
|
||||
--outdir ./data \
|
||||
--concurrency 2 \
|
||||
--log-level debug
|
||||
```
|
||||
**Outputs** in `./data/`:
|
||||
* `companies.jsonl` – one JSON per company
|
||||
* `people.jsonl` – one JSON per employee
|
||||
|
||||
🛠️ **Dry‑run:** `C4AI_DEMO_DEBUG=1 python c4ai_discover.py full --query coffee` uses bundled HTML snippets, no network.
|
||||
|
||||
### Handy geoUrn cheatsheet
|
||||
| Location | geoUrn |
|
||||
|----------|--------|
|
||||
| Singapore | **103644278** |
|
||||
| Malaysia | **102713980** |
|
||||
| United States | **103644922** |
|
||||
| United Kingdom | **102221843** |
|
||||
| Australia | **101452733** |
|
||||
_See more: <https://www.linkedin.com/search/results/companies/?geoUrn=XXX> – the number after `geoUrn=` is what you need._
|
||||
|
||||
---
|
||||
|
||||
## 3 Insights – embeddings, org‑charts, decision makers
|
||||
|
||||
```bash
|
||||
python c4ai_insights.py \
|
||||
--in ./data \
|
||||
--out ./data \
|
||||
--embed-model all-MiniLM-L6-v2 \
|
||||
--llm-provider gemini/gemini-2.0-flash \
|
||||
--llm-api-key "" \
|
||||
--top-k 10 \
|
||||
--max-llm-tokens 8024 \
|
||||
--llm-temperature 1.0 \
|
||||
--workers 4
|
||||
```
|
||||
Emits next to the Stage‑1 files:
|
||||
* `company_graph.json` – inter‑company similarity graph
|
||||
* `org_chart_<handle>.json` – one per company
|
||||
* `decision_makers.csv` – hand‑picked ‘who to pitch’ list
|
||||
|
||||
Flags reference (straight from `build_arg_parser()`):
|
||||
| Flag | Default | Purpose |
|
||||
|------|---------|---------|
|
||||
| `--in` | `.` | Stage‑1 output dir |
|
||||
| `--out` | `.` | Destination dir |
|
||||
| `--embed_model` | `all-MiniLM-L6-v2` | Sentence‑Transformer model |
|
||||
| `--top_k` | `10` | Neighbours per company in graph |
|
||||
| `--openai_model` | `gpt-4.1` | LLM for scoring decision makers |
|
||||
| `--max_llm_tokens` | `8024` | Token budget per LLM call |
|
||||
| `--llm_temperature` | `1.0` | Creativity knob |
|
||||
| `--stub` | off | Skip OpenAI and fabricate tiny charts |
|
||||
| `--workers` | `4` | Parallel LLM workers |
|
||||
|
||||
---
|
||||
|
||||
## 4 Visualise – interactive graph
|
||||
|
||||
After Stage 2 completes, simply open the HTML viewer from the project root:
|
||||
```bash
|
||||
open graph_view_template.html # or Live Server / Python -http
|
||||
```
|
||||
The page fetches `data/company_graph.json` and the `org_chart_*.json` files automatically; keep the `data/` folder beside the HTML file.
|
||||
|
||||
* Left pane → list of companies (clans).
|
||||
* Click a node to load its org‑chart on the right.
|
||||
* Chat drawer lets you ask follow‑up questions; context is pulled from `people.jsonl`.
|
||||
|
||||
---
|
||||
|
||||
## 5 Common snags
|
||||
|
||||
| Symptom | Fix |
|
||||
|---------|-----|
|
||||
| Infinite CAPTCHA | Use a residential proxy: `--proxy http://user:pass@ip:port` |
|
||||
| 429 Too Many Requests | Lower `--concurrency`, rotate profile, add delay |
|
||||
| Blank graph | Check JSON paths, clear `localStorage` in browser |
|
||||
|
||||
---
|
||||
|
||||
### TL;DR
|
||||
`crwl profiles` → `c4ai_discover.py` → `c4ai_insights.py` → open `graph_view_template.html`.
|
||||
Live long and `import crawl4ai`.
|
||||
|
||||
437
docs/apps/linkdin/c4ai_discover.py
Normal file
437
docs/apps/linkdin/c4ai_discover.py
Normal file
@@ -0,0 +1,437 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
c4ai-discover — Stage‑1 Discovery CLI
|
||||
|
||||
Scrapes LinkedIn company search + their people pages and dumps two newline‑delimited
|
||||
JSON files: companies.jsonl and people.jsonl.
|
||||
|
||||
Key design rules
|
||||
----------------
|
||||
* No BeautifulSoup — Crawl4AI only for network + HTML fetch.
|
||||
* JsonCssExtractionStrategy for structured scraping; schema auto‑generated once
|
||||
from sample HTML provided by user and then cached under ./schemas/.
|
||||
* Defaults are embedded so the file runs inside VS Code debugger without CLI args.
|
||||
* If executed as a console script (argv > 1), CLI flags win.
|
||||
* Lightweight deps: argparse + Crawl4AI stack.
|
||||
|
||||
Author: Tom @ Kidocode 2025‑04‑26
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings, re
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r"The pseudo class ':contains' is deprecated, ':-soup-contains' should be used.*",
|
||||
category=FutureWarning,
|
||||
module=r"soupsieve"
|
||||
)
|
||||
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Imports
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
import argparse
|
||||
import random
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
# 3rd-party rich for pretty logging
|
||||
from rich.console import Console
|
||||
from rich.logging import RichHandler
|
||||
|
||||
from datetime import datetime, UTC
|
||||
from textwrap import dedent
|
||||
from types import SimpleNamespace
|
||||
from typing import Dict, List, Optional
|
||||
from urllib.parse import quote
|
||||
from pathlib import Path
|
||||
from glob import glob
|
||||
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CacheMode,
|
||||
CrawlerRunConfig,
|
||||
JsonCssExtractionStrategy,
|
||||
BrowserProfiler,
|
||||
LLMConfig,
|
||||
)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Constants / paths
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
BASE_DIR = pathlib.Path(__file__).resolve().parent
|
||||
SCHEMA_DIR = BASE_DIR / "schemas"
|
||||
SCHEMA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
COMPANY_SCHEMA_PATH = SCHEMA_DIR / "company_card.json"
|
||||
PEOPLE_SCHEMA_PATH = SCHEMA_DIR / "people_card.json"
|
||||
|
||||
# ---------- deterministic target JSON examples ----------
|
||||
_COMPANY_SCHEMA_EXAMPLE = {
|
||||
"handle": "/company/posify/",
|
||||
"profile_image": "https://media.licdn.com/dms/image/v2/.../logo.jpg",
|
||||
"name": "Management Research Services, Inc. (MRS, Inc)",
|
||||
"descriptor": "Insurance • Milwaukee, Wisconsin",
|
||||
"about": "Insurance • Milwaukee, Wisconsin",
|
||||
"followers": 1000
|
||||
}
|
||||
|
||||
_PEOPLE_SCHEMA_EXAMPLE = {
|
||||
"profile_url": "https://www.linkedin.com/in/lily-ng/",
|
||||
"name": "Lily Ng",
|
||||
"headline": "VP Product @ Posify",
|
||||
"followers": 890,
|
||||
"connection_degree": "2nd",
|
||||
"avatar_url": "https://media.licdn.com/dms/image/v2/.../lily.jpg"
|
||||
}
|
||||
|
||||
# Provided sample HTML snippets (trimmed) — used exactly once to cold‑generate schema.
|
||||
_SAMPLE_COMPANY_HTML = (Path(__file__).resolve().parent / "snippets/company.html").read_text()
|
||||
_SAMPLE_PEOPLE_HTML = (Path(__file__).resolve().parent / "snippets/people.html").read_text()
|
||||
|
||||
# --------- tighter schema prompts ----------
|
||||
_COMPANY_SCHEMA_QUERY = dedent(
|
||||
"""
|
||||
Using the supplied <li> company-card HTML, build a JsonCssExtractionStrategy schema that,
|
||||
for every card, outputs *exactly* the keys shown in the example JSON below.
|
||||
JSON spec:
|
||||
• handle – href of the outermost <a> that wraps the logo/title, e.g. "/company/posify/"
|
||||
• profile_image – absolute URL of the <img> inside that link
|
||||
• name – text of the <a> inside the <span class*='t-16'>
|
||||
• descriptor – text line with industry • location
|
||||
• about – text of the <div class*='t-normal'> below the name (industry + geo)
|
||||
• followers – integer parsed from the <div> containing 'followers'
|
||||
|
||||
IMPORTANT: Do not use the base64 kind of classes to target element. It's not reliable.
|
||||
The main div parent contains these li element is "div.search-results-container" you can use this.
|
||||
The <ul> parent has "role" equal to "list". Using these two should be enough to target the <li> elements."
|
||||
"""
|
||||
)
|
||||
|
||||
_PEOPLE_SCHEMA_QUERY = dedent(
|
||||
"""
|
||||
Using the supplied <li> people-card HTML, build a JsonCssExtractionStrategy schema that
|
||||
outputs exactly the keys in the example JSON below.
|
||||
Fields:
|
||||
• profile_url – href of the outermost profile link
|
||||
• name – text inside artdeco-entity-lockup__title
|
||||
• headline – inner text of artdeco-entity-lockup__subtitle
|
||||
• followers – integer parsed from the span inside lt-line-clamp--multi-line
|
||||
• connection_degree – '1st', '2nd', etc. from artdeco-entity-lockup__badge
|
||||
• avatar_url – src of the <img> within artdeco-entity-lockup__image
|
||||
|
||||
IMPORTANT: Do not use the base64 kind of classes to target element. It's not reliable.
|
||||
The main div parent contains these li element is a "div" has these classes "artdeco-card org-people-profile-card__card-spacing org-people__card-margin-bottom".
|
||||
"""
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Utility helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _load_or_build_schema(
|
||||
path: pathlib.Path,
|
||||
sample_html: str,
|
||||
query: str,
|
||||
example_json: Dict,
|
||||
force = False
|
||||
) -> Dict:
|
||||
"""Load schema from path, else call generate_schema once and persist."""
|
||||
if path.exists() and not force:
|
||||
return json.loads(path.read_text())
|
||||
|
||||
logging.info("[SCHEMA] Generating schema %s", path.name)
|
||||
schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=sample_html,
|
||||
llm_config=LLMConfig(
|
||||
provider=os.getenv("C4AI_SCHEMA_PROVIDER", "openai/gpt-4o"),
|
||||
api_token=os.getenv("OPENAI_API_KEY", "env:OPENAI_API_KEY"),
|
||||
),
|
||||
query=query,
|
||||
target_json_example=json.dumps(example_json, indent=2),
|
||||
)
|
||||
path.write_text(json.dumps(schema, indent=2))
|
||||
return schema
|
||||
|
||||
|
||||
def _openai_friendly_number(text: str) -> Optional[int]:
|
||||
"""Extract first int from text like '1K followers' (returns 1000)."""
|
||||
import re
|
||||
|
||||
m = re.search(r"(\d[\d,]*)", text.replace(",", ""))
|
||||
if not m:
|
||||
return None
|
||||
val = int(m.group(1))
|
||||
if "k" in text.lower():
|
||||
val *= 1000
|
||||
if "m" in text.lower():
|
||||
val *= 1_000_000
|
||||
return val
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Core async workers
|
||||
# ---------------------------------------------------------------------------
|
||||
async def crawl_company_search(crawler: AsyncWebCrawler, url: str, schema: Dict, limit: int) -> List[Dict]:
|
||||
"""Paginate 10-item company search pages until `limit` reached."""
|
||||
extraction = JsonCssExtractionStrategy(schema)
|
||||
cfg = CrawlerRunConfig(
|
||||
extraction_strategy=extraction,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
wait_for = ".search-marvel-srp",
|
||||
session_id="company_search",
|
||||
delay_before_return_html=1,
|
||||
magic = True,
|
||||
verbose= False,
|
||||
)
|
||||
companies, page = [], 1
|
||||
while len(companies) < max(limit, 10):
|
||||
paged_url = f"{url}&page={page}"
|
||||
res = await crawler.arun(paged_url, config=cfg)
|
||||
batch = json.loads(res[0].extracted_content)
|
||||
if not batch:
|
||||
break
|
||||
for item in batch:
|
||||
name = item.get("name", "").strip()
|
||||
handle = item.get("handle", "").strip()
|
||||
if not handle or not name:
|
||||
continue
|
||||
descriptor = item.get("descriptor")
|
||||
about = item.get("about")
|
||||
followers = _openai_friendly_number(str(item.get("followers", "")))
|
||||
companies.append(
|
||||
{
|
||||
"handle": handle,
|
||||
"name": name,
|
||||
"descriptor": descriptor,
|
||||
"about": about,
|
||||
"followers": followers,
|
||||
"people_url": f"{handle}people/",
|
||||
"captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z",
|
||||
}
|
||||
)
|
||||
page += 1
|
||||
logging.info(
|
||||
f"[dim]Page {page}[/] — running total: {len(companies)}/{limit} companies"
|
||||
)
|
||||
|
||||
return companies[:max(limit, 10)]
|
||||
|
||||
|
||||
async def crawl_people_page(
|
||||
crawler: AsyncWebCrawler,
|
||||
people_url: str,
|
||||
schema: Dict,
|
||||
limit: int,
|
||||
title_kw: str,
|
||||
) -> List[Dict]:
|
||||
people_u = f"{people_url}?keywords={quote(title_kw)}"
|
||||
extraction = JsonCssExtractionStrategy(schema)
|
||||
cfg = CrawlerRunConfig(
|
||||
extraction_strategy=extraction,
|
||||
# scan_full_page=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
magic=True,
|
||||
wait_for=".org-people-profile-card__card-spacing",
|
||||
delay_before_return_html=1,
|
||||
session_id="people_search",
|
||||
)
|
||||
res = await crawler.arun(people_u, config=cfg)
|
||||
if not res[0].success:
|
||||
return []
|
||||
raw = json.loads(res[0].extracted_content)
|
||||
people = []
|
||||
for p in raw[:limit]:
|
||||
followers = _openai_friendly_number(str(p.get("followers", "")))
|
||||
people.append(
|
||||
{
|
||||
"profile_url": p.get("profile_url"),
|
||||
"name": p.get("name"),
|
||||
"headline": p.get("headline"),
|
||||
"followers": followers,
|
||||
"connection_degree": p.get("connection_degree"),
|
||||
"avatar_url": p.get("avatar_url"),
|
||||
}
|
||||
)
|
||||
return people
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI + main
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def build_arg_parser() -> argparse.ArgumentParser:
|
||||
ap = argparse.ArgumentParser("c4ai-discover — Crawl4AI LinkedIn discovery")
|
||||
sub = ap.add_subparsers(dest="cmd", required=False, help="run scope")
|
||||
|
||||
def add_flags(parser: argparse.ArgumentParser):
|
||||
parser.add_argument("--query", required=False, help="query keyword(s)")
|
||||
parser.add_argument("--geo", required=False, type=int, help="LinkedIn geoUrn")
|
||||
parser.add_argument("--title-filters", default="Product,Engineering", help="comma list of job keywords")
|
||||
parser.add_argument("--max-companies", type=int, default=1000)
|
||||
parser.add_argument("--max-people", type=int, default=500)
|
||||
parser.add_argument("--profile-name", default=str(pathlib.Path.home() / ".crawl4ai/profiles/profile_linkedin_uc"))
|
||||
parser.add_argument("--outdir", default="./output")
|
||||
parser.add_argument("--concurrency", type=int, default=4)
|
||||
parser.add_argument("--log-level", default="info", choices=["debug", "info", "warn", "error"])
|
||||
|
||||
add_flags(sub.add_parser("full"))
|
||||
add_flags(sub.add_parser("companies"))
|
||||
add_flags(sub.add_parser("people"))
|
||||
|
||||
# global flags
|
||||
ap.add_argument(
|
||||
"--debug",
|
||||
action="store_true",
|
||||
help="Use built-in demo defaults (same as C4AI_DEMO_DEBUG=1)",
|
||||
)
|
||||
return ap
|
||||
|
||||
|
||||
def detect_debug_defaults(force = False) -> SimpleNamespace:
|
||||
if not force and sys.gettrace() is None and not os.getenv("C4AI_DEMO_DEBUG"):
|
||||
return SimpleNamespace()
|
||||
# ----- debug‑friendly defaults -----
|
||||
return SimpleNamespace(
|
||||
cmd="full",
|
||||
query="health insurance management",
|
||||
geo=102713980,
|
||||
# title_filters="Product,Engineering",
|
||||
title_filters="",
|
||||
max_companies=10,
|
||||
max_people=5,
|
||||
profile_name="profile_linkedin_uc",
|
||||
outdir="./debug_out",
|
||||
concurrency=2,
|
||||
log_level="debug",
|
||||
)
|
||||
|
||||
|
||||
async def async_main(opts):
|
||||
# ─────────── logging setup ───────────
|
||||
console = Console()
|
||||
logging.basicConfig(
|
||||
level=opts.log_level.upper(),
|
||||
format="%(message)s",
|
||||
handlers=[RichHandler(console=console, markup=True, rich_tracebacks=True)],
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Load or build schemas (one‑time LLM call each)
|
||||
# -------------------------------------------------------------------
|
||||
company_schema = _load_or_build_schema(
|
||||
COMPANY_SCHEMA_PATH,
|
||||
_SAMPLE_COMPANY_HTML,
|
||||
_COMPANY_SCHEMA_QUERY,
|
||||
_COMPANY_SCHEMA_EXAMPLE,
|
||||
# True
|
||||
)
|
||||
people_schema = _load_or_build_schema(
|
||||
PEOPLE_SCHEMA_PATH,
|
||||
_SAMPLE_PEOPLE_HTML,
|
||||
_PEOPLE_SCHEMA_QUERY,
|
||||
_PEOPLE_SCHEMA_EXAMPLE,
|
||||
# True
|
||||
)
|
||||
|
||||
outdir = BASE_DIR / pathlib.Path(opts.outdir)
|
||||
outdir.mkdir(parents=True, exist_ok=True)
|
||||
f_companies = (BASE_DIR / outdir / "companies.jsonl").open("a", encoding="utf-8")
|
||||
f_people = (BASE_DIR / outdir / "people.jsonl").open("a", encoding="utf-8")
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Prepare crawler with cookie pool rotation
|
||||
# -------------------------------------------------------------------
|
||||
profiler = BrowserProfiler()
|
||||
path = profiler.get_profile_path(opts.profile_name)
|
||||
bc = BrowserConfig(
|
||||
headless=False,
|
||||
verbose=False,
|
||||
user_data_dir=path,
|
||||
use_managed_browser=True,
|
||||
user_agent_mode = "random",
|
||||
user_agent_generator_config= {
|
||||
"platforms": "mobile",
|
||||
"os": "Android"
|
||||
}
|
||||
)
|
||||
crawler = AsyncWebCrawler(config=bc)
|
||||
|
||||
await crawler.start()
|
||||
|
||||
# Single worker for simplicity; concurrency can be scaled by arun_many if needed.
|
||||
# crawler = await next_crawler().start()
|
||||
try:
|
||||
# Build LinkedIn search URL
|
||||
search_url = f'https://www.linkedin.com/search/results/companies/?keywords={quote(opts.query)}&companyHqGeo="{opts.geo}"'
|
||||
logging.info("Seed URL => %s", search_url)
|
||||
|
||||
companies: List[Dict] = []
|
||||
if opts.cmd in ("companies", "full"):
|
||||
companies = await crawl_company_search(
|
||||
crawler, search_url, company_schema, opts.max_companies
|
||||
)
|
||||
for c in companies:
|
||||
f_companies.write(json.dumps(c, ensure_ascii=False) + "\n")
|
||||
logging.info(f"[bold green]✓[/] Companies scraped so far: {len(companies)}")
|
||||
|
||||
if opts.cmd in ("people", "full"):
|
||||
if not companies:
|
||||
# load from previous run
|
||||
src = outdir / "companies.jsonl"
|
||||
if not src.exists():
|
||||
logging.error("companies.jsonl missing — run companies/full first")
|
||||
return 10
|
||||
companies = [json.loads(l) for l in src.read_text().splitlines()]
|
||||
total_people = 0
|
||||
title_kw = " ".join([t.strip() for t in opts.title_filters.split(",") if t.strip()]) if opts.title_filters else ""
|
||||
for comp in companies:
|
||||
people = await crawl_people_page(
|
||||
crawler,
|
||||
comp["people_url"],
|
||||
people_schema,
|
||||
opts.max_people,
|
||||
title_kw,
|
||||
)
|
||||
for p in people:
|
||||
rec = p | {
|
||||
"company_handle": comp["handle"],
|
||||
# "captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z",
|
||||
"captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z",
|
||||
}
|
||||
f_people.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
||||
total_people += len(people)
|
||||
logging.info(
|
||||
f"{comp['name']} — [cyan]{len(people)}[/] people extracted"
|
||||
)
|
||||
await asyncio.sleep(random.uniform(0.5, 1))
|
||||
logging.info("Total people scraped: %d", total_people)
|
||||
finally:
|
||||
await crawler.close()
|
||||
f_companies.close()
|
||||
f_people.close()
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
parser = build_arg_parser()
|
||||
cli_opts = parser.parse_args()
|
||||
|
||||
# decide on debug defaults
|
||||
if cli_opts.debug:
|
||||
opts = detect_debug_defaults(force=True)
|
||||
else:
|
||||
env_defaults = detect_debug_defaults()
|
||||
opts = env_defaults if env_defaults else cli_opts
|
||||
|
||||
if not getattr(opts, "cmd", None):
|
||||
opts.cmd = "full"
|
||||
|
||||
exit_code = asyncio.run(async_main(cli_opts))
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
377
docs/apps/linkdin/c4ai_insights.py
Normal file
377
docs/apps/linkdin/c4ai_insights.py
Normal file
@@ -0,0 +1,377 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Stage-2 Insights builder
|
||||
------------------------
|
||||
Reads companies.jsonl & people.jsonl (Stage-1 output) and produces:
|
||||
• company_graph.json
|
||||
• org_chart_<handle>.json (one per company)
|
||||
• decision_makers.csv
|
||||
• graph_view.html (interactive visualisation)
|
||||
|
||||
Run:
|
||||
python c4ai_insights.py --in ./stage1_out --out ./stage2_out
|
||||
|
||||
Author : Tom @ Kidocode, 2025-04-28
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Imports & Third-party
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
import argparse, asyncio, json, pathlib, random
|
||||
from datetime import datetime, UTC
|
||||
from types import SimpleNamespace
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any
|
||||
# Pretty CLI UX
|
||||
from rich.console import Console
|
||||
from rich.logging import RichHandler
|
||||
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeElapsedColumn
|
||||
import logging
|
||||
|
||||
|
||||
BASE_DIR = pathlib.Path(__file__).resolve().parent
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# 3rd-party deps
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
import numpy as np
|
||||
# from sentence_transformers import SentenceTransformer
|
||||
# from sklearn.metrics.pairwise import cosine_similarity
|
||||
import pandas as pd
|
||||
import hashlib
|
||||
|
||||
from litellm import completion #Support any LLM Provider
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Utils
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def load_jsonl(path: Path) -> List[Dict[str, Any]]:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return [json.loads(l) for l in f]
|
||||
|
||||
def dump_json(obj, path: Path):
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(obj, f, ensure_ascii=False, indent=2)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Constants
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
BASE_DIR = pathlib.Path(__file__).resolve().parent
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Debug defaults (mirrors Stage-1 trick)
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def dev_defaults() -> SimpleNamespace:
|
||||
return SimpleNamespace(
|
||||
in_dir="./debug_out",
|
||||
out_dir="./insights_debug",
|
||||
embed_model="all-MiniLM-L6-v2",
|
||||
top_k=10,
|
||||
llm_provider="openai/gpt-4.1",
|
||||
llm_api_key=None,
|
||||
max_llm_tokens=8000,
|
||||
llm_temperature=1.0,
|
||||
workers=4
|
||||
)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Graph builders
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def embed_descriptions(companies, model_name:str, opts) -> np.ndarray:
|
||||
from sentence_transformers import SentenceTransformer
|
||||
|
||||
logging.debug(f"Using embedding model: {model_name}")
|
||||
cache_path = BASE_DIR / Path(opts.out_dir) / "embeds_cache.json"
|
||||
cache = {}
|
||||
if cache_path.exists():
|
||||
with open(cache_path) as f:
|
||||
cache = json.load(f)
|
||||
# flush cache if model differs
|
||||
if cache.get("_model") != model_name:
|
||||
cache = {}
|
||||
|
||||
model = SentenceTransformer(model_name)
|
||||
new_texts, new_indices = [], []
|
||||
vectors = np.zeros((len(companies), 384), dtype=np.float32)
|
||||
|
||||
for idx, comp in enumerate(companies):
|
||||
text = comp.get("about") or comp.get("descriptor","")
|
||||
h = hashlib.sha1(text.encode("utf-8")).hexdigest()
|
||||
cached = cache.get(comp["handle"])
|
||||
if cached and cached["hash"] == h:
|
||||
vectors[idx] = np.array(cached["vector"], dtype=np.float32)
|
||||
else:
|
||||
new_texts.append(text)
|
||||
new_indices.append((idx, comp["handle"], h))
|
||||
|
||||
if new_texts:
|
||||
embeds = model.encode(new_texts, show_progress_bar=False, convert_to_numpy=True)
|
||||
for vec, (idx, handle, h) in zip(embeds, new_indices):
|
||||
vectors[idx] = vec
|
||||
cache[handle] = {"hash": h, "vector": vec.tolist()}
|
||||
cache["_model"] = model_name
|
||||
with open(cache_path, "w") as f:
|
||||
json.dump(cache, f)
|
||||
|
||||
return vectors
|
||||
|
||||
def build_company_graph(companies, embeds:np.ndarray, top_k:int) -> Dict[str,Any]:
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
sims = cosine_similarity(embeds)
|
||||
nodes, edges = [], []
|
||||
idx_of = {c["handle"]: i for i,c in enumerate(companies)}
|
||||
for i,c in enumerate(companies):
|
||||
node = dict(
|
||||
id=c["handle"].strip("/"),
|
||||
name=c["name"],
|
||||
handle=c["handle"],
|
||||
about=c.get("about",""),
|
||||
people_url=c.get("people_url",""),
|
||||
industry=c.get("descriptor","").split("•")[0].strip(),
|
||||
geoUrn=c.get("geoUrn"),
|
||||
followers=c.get("followers",0),
|
||||
# desc_embed=embeds[i].tolist(),
|
||||
desc_embed=[],
|
||||
)
|
||||
nodes.append(node)
|
||||
# pick top-k most similar except itself
|
||||
top_idx = np.argsort(sims[i])[::-1][1:top_k+1]
|
||||
for j in top_idx:
|
||||
tgt = companies[j]
|
||||
weight = float(sims[i,j])
|
||||
if node["industry"] == tgt.get("descriptor","").split("•")[0].strip():
|
||||
weight += 0.10
|
||||
if node["geoUrn"] == tgt.get("geoUrn"):
|
||||
weight += 0.05
|
||||
tgt['followers'] = tgt.get("followers", None) or 1
|
||||
node["followers"] = node.get("followers", None) or 1
|
||||
follower_ratio = min(node["followers"], tgt.get("followers",1)) / max(node["followers"] or 1, tgt.get("followers",1))
|
||||
weight += 0.05 * follower_ratio
|
||||
edges.append(dict(
|
||||
source=node["id"],
|
||||
target=tgt["handle"].strip("/"),
|
||||
weight=round(weight,4),
|
||||
drivers=dict(
|
||||
embed_sim=round(float(sims[i,j]),4),
|
||||
industry_match=0.10 if node["industry"] == tgt.get("descriptor","").split("•")[0].strip() else 0,
|
||||
geo_overlap=0.05 if node["geoUrn"] == tgt.get("geoUrn") else 0,
|
||||
)
|
||||
))
|
||||
# return {"nodes":nodes,"edges":edges,"meta":{"generated_at":datetime.now(UTC).isoformat()}}
|
||||
return {"nodes":nodes,"edges":edges,"meta":{"generated_at":datetime.now(UTC).isoformat()}}
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Org-chart via LLM
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
async def infer_org_chart_llm(company, people, llm_provider:str, api_key:str, max_tokens:int, temperature:float, stub:bool=False, base_url:str=None):
|
||||
if stub:
|
||||
# Tiny fake org-chart when debugging offline
|
||||
chief = random.choice(people)
|
||||
nodes = [{
|
||||
"id": chief["profile_url"],
|
||||
"name": chief["name"],
|
||||
"title": chief["headline"],
|
||||
"dept": chief["headline"].split()[:1][0],
|
||||
"yoe_total": 8,
|
||||
"yoe_current": 2,
|
||||
"seniority_score": 0.8,
|
||||
"decision_score": 0.9,
|
||||
"avatar_url": chief.get("avatar_url")
|
||||
}]
|
||||
return {"nodes":nodes,"edges":[],"meta":{"debug_stub":True,"generated_at":datetime.now(UTC).isoformat()}}
|
||||
|
||||
prompt = [
|
||||
{"role":"system","content":"You are an expert B2B org-chart reasoner."},
|
||||
{"role":"user","content":f"""Here is the company description:
|
||||
|
||||
<company>
|
||||
{json.dumps(company, ensure_ascii=False)}
|
||||
</company>
|
||||
|
||||
Here is a JSON list of employees:
|
||||
<employees>
|
||||
{json.dumps(people, ensure_ascii=False)}
|
||||
</employees>
|
||||
|
||||
1) Build a reporting tree (manager -> direct reports)
|
||||
2) For each person output a decision_score 0-1 for buying new software
|
||||
|
||||
Return JSON: {{ "nodes":[{{id,name,title,dept,yoe_total,yoe_current,seniority_score,decision_score,avatar_url,profile_url}}], "edges":[{{source,target,type,confidence}}] }}
|
||||
"""}
|
||||
]
|
||||
resp = completion(
|
||||
model=llm_provider,
|
||||
messages=prompt,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
response_format={"type":"json_object"},
|
||||
api_key=api_key,
|
||||
base_url=base_url
|
||||
)
|
||||
chart = json.loads(resp.choices[0].message.content)
|
||||
chart["meta"] = dict(
|
||||
model=llm_provider,
|
||||
generated_at=datetime.now(UTC).isoformat()
|
||||
)
|
||||
return chart
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# CSV flatten
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def export_decision_makers(charts_dir:Path, csv_path:Path, threshold:float=0.5):
|
||||
rows=[]
|
||||
for p in charts_dir.glob("org_chart_*.json"):
|
||||
data=json.loads(p.read_text())
|
||||
comp = p.stem.split("org_chart_")[1]
|
||||
for n in data.get("nodes",[]):
|
||||
if n.get("decision_score",0)>=threshold:
|
||||
rows.append(dict(
|
||||
company=comp,
|
||||
person=n["name"],
|
||||
title=n["title"],
|
||||
decision_score=n["decision_score"],
|
||||
profile_url=n["id"]
|
||||
))
|
||||
pd.DataFrame(rows).to_csv(csv_path,index=False)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# HTML rendering
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def render_html(out:Path, template_dir:Path):
|
||||
# From template folder cp graph_view.html and ai.js in out folder
|
||||
import shutil
|
||||
shutil.copy(template_dir/"graph_view_template.html", out / "graph_view.html")
|
||||
shutil.copy(template_dir/"ai.js", out)
|
||||
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Main async pipeline
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
async def run(opts):
|
||||
# ── silence SDK noise ──────────────────────────────────────────────────────
|
||||
for noisy in ("openai", "httpx", "httpcore"):
|
||||
lg = logging.getLogger(noisy)
|
||||
lg.setLevel(logging.WARNING) # or ERROR if you want total silence
|
||||
lg.propagate = False # optional: stop them reaching root
|
||||
|
||||
# ────────────── logging bootstrap ──────────────
|
||||
console = Console()
|
||||
logging.basicConfig(
|
||||
level="INFO",
|
||||
format="%(message)s",
|
||||
handlers=[RichHandler(console=console, markup=True, rich_tracebacks=True)],
|
||||
)
|
||||
|
||||
in_dir = BASE_DIR / Path(opts.in_dir)
|
||||
out_dir = BASE_DIR / Path(opts.out_dir)
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
companies = load_jsonl(in_dir/"companies.jsonl")
|
||||
people = load_jsonl(in_dir/"people.jsonl")
|
||||
|
||||
logging.info(f"[bold cyan]Loaded[/] {len(companies)} companies, {len(people)} people")
|
||||
|
||||
logging.info("[bold]⇢[/] Embedding company descriptions…")
|
||||
embeds = embed_descriptions(companies, opts.embed_model, opts)
|
||||
|
||||
logging.info("[bold]⇢[/] Building similarity graph")
|
||||
company_graph = build_company_graph(companies, embeds, opts.top_k)
|
||||
dump_json(company_graph, out_dir/"company_graph.json")
|
||||
|
||||
# Filter companies that need processing
|
||||
to_process = []
|
||||
for comp in companies:
|
||||
handle = comp["handle"].strip("/").replace("/","_")
|
||||
out_file = out_dir/f"org_chart_{handle}.json"
|
||||
if out_file.exists() and False:
|
||||
logging.info(f"[green]✓[/] Skipping existing {comp['name']}")
|
||||
continue
|
||||
to_process.append(comp)
|
||||
|
||||
|
||||
if not to_process:
|
||||
logging.info("[yellow]All companies already processed[/]")
|
||||
else:
|
||||
workers = getattr(opts, 'workers', 1)
|
||||
parallel = workers > 1
|
||||
|
||||
logging.info(f"[bold]⇢[/] Inferring org-charts via LLM {f'(parallel={workers} workers)' if parallel else ''}")
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
BarColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
TimeElapsedColumn(),
|
||||
console=console,
|
||||
) as progress:
|
||||
task = progress.add_task("Org charts", total=len(to_process))
|
||||
|
||||
async def process_one(comp):
|
||||
handle = comp["handle"].strip("/").replace("/","_")
|
||||
persons = [p for p in people if p["company_handle"].strip("/") == comp["handle"].strip("/")]
|
||||
chart = await infer_org_chart_llm(
|
||||
comp, persons,
|
||||
llm_provider=opts.llm_provider,
|
||||
api_key=opts.llm_api_key or None,
|
||||
max_tokens=opts.max_llm_tokens,
|
||||
temperature=opts.llm_temperature,
|
||||
stub=opts.stub or False,
|
||||
base_url=opts.llm_base_url or None
|
||||
)
|
||||
chart["meta"]["company"] = comp["name"]
|
||||
|
||||
# Save the result immediately
|
||||
dump_json(chart, out_dir/f"org_chart_{handle}.json")
|
||||
|
||||
progress.update(task, advance=1, description=f"{comp['name']} ({len(persons)} ppl)")
|
||||
|
||||
# Create tasks for all companies
|
||||
tasks = [process_one(comp) for comp in to_process]
|
||||
|
||||
# Process in batches based on worker count
|
||||
semaphore = asyncio.Semaphore(workers)
|
||||
|
||||
async def bounded_process(coro):
|
||||
async with semaphore:
|
||||
return await coro
|
||||
|
||||
# Run with concurrency control
|
||||
await asyncio.gather(*(bounded_process(task) for task in tasks))
|
||||
|
||||
logging.info("[bold]⇢[/] Flattening decision-makers CSV")
|
||||
export_decision_makers(out_dir, out_dir/"decision_makers.csv")
|
||||
|
||||
render_html(out_dir, template_dir=BASE_DIR/"templates")
|
||||
logging.success = lambda msg, **k: console.print(f"[bold green]✓[/] {msg}", **k)
|
||||
logging.success(f"Stage-2 artefacts written to {out_dir}")
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# CLI
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def build_arg_parser():
|
||||
p = argparse.ArgumentParser(description="Build graphs & visualisation from Stage-1 output")
|
||||
p.add_argument("--in", dest="in_dir", required=False, help="Stage-1 output dir", default=".")
|
||||
p.add_argument("--out", dest="out_dir", required=False, help="Destination dir", default=".")
|
||||
p.add_argument("--embed-model", default="all-MiniLM-L6-v2")
|
||||
p.add_argument("--top-k", type=int, default=10, help="Top-k neighbours per company")
|
||||
p.add_argument("--llm-provider", default="openai/gpt-4.1",
|
||||
help="LLM model to use in format 'provider/model_name' (e.g., 'anthropic/claude-3')")
|
||||
p.add_argument("--llm-api-key", help="API key for LLM provider (defaults to env vars)")
|
||||
p.add_argument("--llm-base-url", help="Base URL for LLM API endpoint")
|
||||
p.add_argument("--max-llm-tokens", type=int, default=8024)
|
||||
p.add_argument("--llm-temperature", type=float, default=1.0)
|
||||
p.add_argument("--stub", action="store_true", help="Skip OpenAI call and generate tiny fake org charts")
|
||||
p.add_argument("--workers", type=int, default=4, help="Number of parallel workers for LLM inference")
|
||||
return p
|
||||
|
||||
def main():
|
||||
dbg = dev_defaults()
|
||||
# opts = dbg if True else build_arg_parser().parse_args()
|
||||
opts = build_arg_parser().parse_args()
|
||||
asyncio.run(run(opts))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
39
docs/apps/linkdin/schemas/company_card.json
Normal file
39
docs/apps/linkdin/schemas/company_card.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"name": "LinkedIn Company Card",
|
||||
"baseSelector": "div.search-results-container ul[role='list'] > li",
|
||||
"fields": [
|
||||
{
|
||||
"name": "handle",
|
||||
"selector": "a[href*='/company/']",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
},
|
||||
{
|
||||
"name": "profile_image",
|
||||
"selector": "a[href*='/company/'] img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"selector": "span[class*='t-16'] a",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "descriptor",
|
||||
"selector": "div[class*='t-black t-normal']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "about",
|
||||
"selector": "p[class*='entity-result__summary--2-lines']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "followers",
|
||||
"selector": "div:contains('followers')",
|
||||
"type": "regex",
|
||||
"pattern": "(\\d+)\\s*followers"
|
||||
}
|
||||
]
|
||||
}
|
||||
38
docs/apps/linkdin/schemas/people_card.json
Normal file
38
docs/apps/linkdin/schemas/people_card.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"name": "LinkedIn People Card",
|
||||
"baseSelector": "li.org-people-profile-card__profile-card-spacing",
|
||||
"fields": [
|
||||
{
|
||||
"name": "profile_url",
|
||||
"selector": "a.eETATgYTipaVsmrBChiBJJvFsdPhNpulhPZUVLHLo",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".artdeco-entity-lockup__title .lt-line-clamp--single-line",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "headline",
|
||||
"selector": ".artdeco-entity-lockup__subtitle .lt-line-clamp--multi-line",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "followers",
|
||||
"selector": ".lt-line-clamp--multi-line.t-12",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "connection_degree",
|
||||
"selector": ".artdeco-entity-lockup__badge .artdeco-entity-lockup__degree",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "avatar_url",
|
||||
"selector": ".artdeco-entity-lockup__image img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
143
docs/apps/linkdin/snippets/company.html
Normal file
143
docs/apps/linkdin/snippets/company.html
Normal file
@@ -0,0 +1,143 @@
|
||||
<li class="yCLWzruNprmIzaZzFFonVFBtMrbaVYnuDFA">
|
||||
<!----><!---->
|
||||
|
||||
|
||||
|
||||
<div class="IxlEPbRZwQYrRltKPvHAyjBmCdIWTAoYo" data-chameleon-result-urn="urn:li:company:362492"
|
||||
data-view-name="search-entity-result-universal-template">
|
||||
|
||||
|
||||
|
||||
|
||||
<div class="linked-area flex-1
|
||||
cursor-pointer">
|
||||
|
||||
<div class="BAEgVqVuxosMJZodcelsgPoyRcrkiqgVCGHXNQ">
|
||||
<div class="afcvrbGzNuyRlhPPQWrWirJtUdHAAtUlqxwvVA">
|
||||
<div class="display-flex align-items-center">
|
||||
<!---->
|
||||
|
||||
<a class="eETATgYTipaVsmrBChiBJJvFsdPhNpulhPZUVLHLo scale-down " aria-hidden="true"
|
||||
tabindex="-1" href="https://www.linkedin.com/company/managment-research-services-inc./"
|
||||
data-test-app-aware-link="">
|
||||
|
||||
<div class="ivm-image-view-model ">
|
||||
|
||||
<div class="ivm-view-attr__img-wrapper
|
||||
|
||||
">
|
||||
<!---->
|
||||
<!----> <img width="48"
|
||||
src="https://media.licdn.com/dms/image/v2/C560BAQFWpusEOgW-ww/company-logo_100_100/company-logo_100_100/0/1630583697877/managment_research_services_inc_logo?e=1750896000&v=beta&t=Ch9vyEZdfng-1D1m_XqP5kjNpVXUBKkk9cNhMZUhx0E"
|
||||
loading="lazy" height="48" alt="Management Research Services, Inc. (MRS, Inc)"
|
||||
id="ember28"
|
||||
class="ivm-view-attr__img--centered EntityPhoto-square-3 evi-image lazy-image ember-view">
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</a>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
class="wympnVuDByXHvafWrMGJLZuchDmCRqLmWPwg MmzCPRicJimZvjJhvqTzDcDbdHhWPzspERzA pt3 pb3 t-12 t-black--light">
|
||||
<div class="mb1">
|
||||
|
||||
<div class="t-roman t-sans">
|
||||
|
||||
|
||||
|
||||
<div class="display-flex">
|
||||
<span class="TikBXjihYvcNUoIzkslUaEjfIuLmYxfs OoHEyXgsiIqGADjcOtTmfdpoYVXrLKTvkwI ">
|
||||
<span class="CgaWLOzmXNuKbRIRARSErqCJcBPYudEKo
|
||||
t-16">
|
||||
<a class="eETATgYTipaVsmrBChiBJJvFsdPhNpulhPZUVLHLo "
|
||||
href="https://www.linkedin.com/company/managment-research-services-inc./"
|
||||
data-test-app-aware-link="">
|
||||
<!---->Management Research Services, Inc. (MRS, Inc)<!---->
|
||||
<!----> </a>
|
||||
<!----> </span>
|
||||
</span>
|
||||
<!---->
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div class="LjmdKCEqKITHihFOiQsBAQylkdnsWhqZii
|
||||
t-14 t-black t-normal">
|
||||
<!---->Insurance • Milwaukee, Wisconsin<!---->
|
||||
</div>
|
||||
|
||||
<div class="cTPhJiHyNLmxdQYFlsEOutjznmqrVHUByZwZ
|
||||
t-14 t-normal">
|
||||
<!---->1K followers<!---->
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
<!---->
|
||||
<p class="yWzlqwKNlvCWVNoKqmzoDDEnBMUuyynaLg
|
||||
entity-result__summary--2-lines
|
||||
t-12 t-black--light
|
||||
">
|
||||
<!---->MRS combines 30 years of experience supporting the Life,<span class="white-space-pre">
|
||||
</span><strong><!---->Health<!----></strong><span class="white-space-pre"> </span>and
|
||||
Annuities<span class="white-space-pre"> </span><strong><!---->Insurance<!----></strong><span
|
||||
class="white-space-pre"> </span>Industry with customized<span class="white-space-pre">
|
||||
</span><strong><!---->insurance<!----></strong><span class="white-space-pre">
|
||||
</span>underwriting solutions that efficiently support clients’ workflows. Supported by the
|
||||
Agenium Platform (www.agenium.ai) our innovative underwriting solutions are guaranteed to
|
||||
optimize requirements...<!---->
|
||||
</p>
|
||||
|
||||
<!---->
|
||||
</div>
|
||||
<div class="qXxdnXtzRVFTnTnetmNpssucBwQBsWlUuk MmzCPRicJimZvjJhvqTzDcDbdHhWPzspERzA">
|
||||
<!---->
|
||||
|
||||
|
||||
<div>
|
||||
|
||||
|
||||
|
||||
|
||||
<button aria-label="Follow Management Research Services, Inc. (MRS, Inc)" id="ember61"
|
||||
class="artdeco-button artdeco-button--2 artdeco-button--secondary ember-view"
|
||||
type="button"><!---->
|
||||
<span class="artdeco-button__text">
|
||||
Follow
|
||||
</span></button>
|
||||
|
||||
|
||||
|
||||
<!---->
|
||||
<!---->
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</li>
|
||||
94
docs/apps/linkdin/snippets/people.html
Normal file
94
docs/apps/linkdin/snippets/people.html
Normal file
@@ -0,0 +1,94 @@
|
||||
<li class="grid grid__col--lg-8 block org-people-profile-card__profile-card-spacing">
|
||||
<div>
|
||||
|
||||
|
||||
<section class="artdeco-card full-width qQdPErXQkSAbwApNgNfuxukTIPPykttCcZGOHk">
|
||||
<!---->
|
||||
|
||||
<img width="210" src="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
|
||||
ariarole="presentation" loading="lazy" height="210" alt="" id="ember96"
|
||||
class="evi-image lazy-image ghost-default ember-view org-people-profile-card__cover-photo org-people-profile-card__cover-photo--people">
|
||||
|
||||
<div class="org-people-profile-card__profile-info">
|
||||
<div id="ember97"
|
||||
class="artdeco-entity-lockup artdeco-entity-lockup--stacked-center artdeco-entity-lockup--size-7 ember-view">
|
||||
<div id="ember98"
|
||||
class="artdeco-entity-lockup__image artdeco-entity-lockup__image--type-circle ember-view"
|
||||
type="circle">
|
||||
|
||||
<a class="eETATgYTipaVsmrBChiBJJvFsdPhNpulhPZUVLHLo "
|
||||
id="org-people-profile-card__profile-image-0"
|
||||
href="https://www.linkedin.com/in/speakerrayna?miniProfileUrn=urn%3Ali%3Afs_miniProfile%3AACoAABsqUBoBr5x071PuGGpNtK3NlvSARiVXPIs"
|
||||
data-test-app-aware-link="">
|
||||
<img width="104"
|
||||
src="https://media.licdn.com/dms/image/v2/D5603AQGs2Vyju4xZ7A/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1681741067031?e=1750896000&v=beta&t=Hvj--IrrmpVIH7pec7-l_PQok8vsS__CGeUqBWOw7co"
|
||||
loading="lazy" height="104" alt="Dr. Rayna S." id="ember99"
|
||||
class="evi-image lazy-image ember-view">
|
||||
</a>
|
||||
|
||||
|
||||
</div>
|
||||
<div id="ember100" class="artdeco-entity-lockup__content ember-view">
|
||||
<div id="ember101" class="artdeco-entity-lockup__title ember-view">
|
||||
<a class="eETATgYTipaVsmrBChiBJJvFsdPhNpulhPZUVLHLo link-without-visited-state"
|
||||
aria-label="View Dr. Rayna S.’s profile"
|
||||
href="https://www.linkedin.com/in/speakerrayna?miniProfileUrn=urn%3Ali%3Afs_miniProfile%3AACoAABsqUBoBr5x071PuGGpNtK3NlvSARiVXPIs"
|
||||
data-test-app-aware-link="">
|
||||
<div id="ember103" class="ember-view lt-line-clamp lt-line-clamp--single-line AGabuksChUpCmjWshSnaZryLKSthOKkwclxY
|
||||
t-black" style="">
|
||||
Dr. Rayna S.
|
||||
|
||||
<!---->
|
||||
</div>
|
||||
|
||||
</a>
|
||||
|
||||
</div>
|
||||
<div id="ember104" class="artdeco-entity-lockup__badge ember-view"> <span class="a11y-text">3rd+
|
||||
degree connection</span>
|
||||
<span class="artdeco-entity-lockup__degree" aria-hidden="true">
|
||||
· 3rd
|
||||
</span>
|
||||
<!----><!---->
|
||||
</div>
|
||||
<div id="ember105" class="artdeco-entity-lockup__subtitle ember-view">
|
||||
<div class="t-14 t-black--light t-normal">
|
||||
<div id="ember107" class="ember-view lt-line-clamp lt-line-clamp--multi-line"
|
||||
style="-webkit-line-clamp: 2">
|
||||
Leadership and Talent Development Consultant and Professional Speaker
|
||||
|
||||
<!---->
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div id="ember108" class="artdeco-entity-lockup__caption ember-view"></div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<span class="text-align-center">
|
||||
<span id="ember110"
|
||||
class="ember-view lt-line-clamp lt-line-clamp--multi-line t-12 t-black--light mt2"
|
||||
style="-webkit-line-clamp: 3">
|
||||
727 followers
|
||||
|
||||
<!----> </span>
|
||||
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<footer class="ph3 pb3">
|
||||
<button aria-label="Follow Dr. Rayna S." id="ember111"
|
||||
class="artdeco-button artdeco-button--2 artdeco-button--secondary ember-view full-width"
|
||||
type="button"><!---->
|
||||
<span class="artdeco-button__text">
|
||||
Follow
|
||||
</span></button>
|
||||
</footer>
|
||||
|
||||
</section>
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
</li>
|
||||
50
docs/apps/linkdin/templates/ai.js
Normal file
50
docs/apps/linkdin/templates/ai.js
Normal file
@@ -0,0 +1,50 @@
|
||||
// ==== File: ai.js ====
|
||||
|
||||
class ApiHandler {
|
||||
constructor(apiKey = null) {
|
||||
this.apiKey = apiKey || localStorage.getItem("openai_api_key") || "";
|
||||
console.log("ApiHandler ready");
|
||||
}
|
||||
|
||||
setApiKey(k) {
|
||||
this.apiKey = k.trim();
|
||||
if (this.apiKey) localStorage.setItem("openai_api_key", this.apiKey);
|
||||
}
|
||||
|
||||
async *chatStream(messages, {model = "gpt-4o", temperature = 0.7} = {}) {
|
||||
if (!this.apiKey) throw new Error("OpenAI API key missing");
|
||||
const payload = {model, messages, stream: true, max_tokens: 1024};
|
||||
const controller = new AbortController();
|
||||
|
||||
const res = await fetch("https://api.openai.com/v1/chat/completions", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${this.apiKey}`,
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
signal: controller.signal,
|
||||
});
|
||||
if (!res.ok) throw new Error(`OpenAI: ${res.statusText}`);
|
||||
const reader = res.body.getReader();
|
||||
const dec = new TextDecoder();
|
||||
|
||||
let buf = "";
|
||||
while (true) {
|
||||
const {done, value} = await reader.read();
|
||||
if (done) break;
|
||||
buf += dec.decode(value, {stream: true});
|
||||
for (const line of buf.split("\n")) {
|
||||
if (!line.startsWith("data: ")) continue;
|
||||
if (line.includes("[DONE]")) return;
|
||||
const json = JSON.parse(line.slice(6));
|
||||
const delta = json.choices?.[0]?.delta?.content;
|
||||
if (delta) yield delta;
|
||||
}
|
||||
buf = buf.endsWith("\n") ? "" : buf; // keep partial line
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
window.API = new ApiHandler();
|
||||
|
||||
1171
docs/apps/linkdin/templates/graph_view_template.html
Normal file
1171
docs/apps/linkdin/templates/graph_view_template.html
Normal file
File diff suppressed because it is too large
Load Diff
51
docs/codebase/browser.md
Normal file
51
docs/codebase/browser.md
Normal file
@@ -0,0 +1,51 @@
|
||||
### browser_manager.py
|
||||
|
||||
| Function | What it does |
|
||||
|---|---|
|
||||
| `ManagedBrowser.build_browser_flags` | Returns baseline Chromium CLI flags, disables GPU and sandbox, plugs locale, timezone, stealth tweaks, and any extras from `BrowserConfig`. |
|
||||
| `ManagedBrowser.__init__` | Stores config and logger, creates temp dir, preps internal state. |
|
||||
| `ManagedBrowser.start` | Spawns or connects to the Chromium process, returns its CDP endpoint plus the `subprocess.Popen` handle. |
|
||||
| `ManagedBrowser._initial_startup_check` | Pings the CDP endpoint once to be sure the browser is alive, raises if not. |
|
||||
| `ManagedBrowser._monitor_browser_process` | Async-loops on the subprocess, logs exits or crashes, restarts if policy allows. |
|
||||
| `ManagedBrowser._get_browser_path_WIP` | Old helper that maps OS + browser type to an executable path. |
|
||||
| `ManagedBrowser._get_browser_path` | Current helper, checks env vars, Playwright cache, and OS defaults for the real executable. |
|
||||
| `ManagedBrowser._get_browser_args` | Builds the final CLI arg list by merging user flags, stealth flags, and defaults. |
|
||||
| `ManagedBrowser.cleanup` | Terminates the browser, stops monitors, deletes the temp dir. |
|
||||
| `ManagedBrowser.create_profile` | Opens a visible browser so a human can log in, then zips the resulting user-data-dir to `~/.crawl4ai/profiles/<name>`. |
|
||||
| `ManagedBrowser.list_profiles` | Thin wrapper, now forwarded to `BrowserProfiler.list_profiles()`. |
|
||||
| `ManagedBrowser.delete_profile` | Thin wrapper, now forwarded to `BrowserProfiler.delete_profile()`. |
|
||||
| `BrowserManager.__init__` | Holds the global Playwright instance, browser handle, config signature cache, session map, and logger. |
|
||||
| `BrowserManager.start` | Boots the underlying `ManagedBrowser`, then spins up the default Playwright browser context with stealth patches. |
|
||||
| `BrowserManager._build_browser_args` | Translates `CrawlerRunConfig` (proxy, UA, timezone, headless flag, etc.) into Playwright `launch_args`. |
|
||||
| `BrowserManager.setup_context` | Applies locale, geolocation, permissions, cookies, and UA overrides on a fresh context. |
|
||||
| `BrowserManager.create_browser_context` | Internal helper that actually calls `browser.new_context(**options)` after running `setup_context`. |
|
||||
| `BrowserManager._make_config_signature` | Hashes the non-ephemeral parts of `CrawlerRunConfig` so contexts can be reused safely. |
|
||||
| `BrowserManager.get_page` | Returns a ready `Page` for a given session id, reusing an existing one or creating a new context/page, injects helper scripts, updates `last_used`. |
|
||||
| `BrowserManager.kill_session` | Force-closes a context/page for a session and removes it from the session map. |
|
||||
| `BrowserManager._cleanup_expired_sessions` | Periodic sweep that drops sessions idle longer than `ttl_seconds`. |
|
||||
| `BrowserManager.close` | Gracefully shuts down all contexts, the browser, Playwright, and background tasks. |
|
||||
|
||||
---
|
||||
|
||||
### browser_profiler.py
|
||||
|
||||
| Function | What it does |
|
||||
|---|---|
|
||||
| `BrowserProfiler.__init__` | Sets up profile folder paths, async logger, and signal handlers. |
|
||||
| `BrowserProfiler.create_profile` | Launches a visible browser with a new user-data-dir for manual login, on exit compresses and stores it as a named profile. |
|
||||
| `BrowserProfiler.cleanup_handler` | General SIGTERM/SIGINT cleanup wrapper that kills child processes. |
|
||||
| `BrowserProfiler.sigint_handler` | Handles Ctrl-C during an interactive session, makes sure the browser shuts down cleanly. |
|
||||
| `BrowserProfiler.listen_for_quit_command` | Async REPL that exits when the user types `q`. |
|
||||
| `BrowserProfiler.list_profiles` | Enumerates `~/.crawl4ai/profiles`, prints profile name, browser type, size, and last modified. |
|
||||
| `BrowserProfiler.get_profile_path` | Returns the absolute path of a profile given its name, or `None` if missing. |
|
||||
| `BrowserProfiler.delete_profile` | Removes a profile folder or a direct path from disk, with optional confirmation prompt. |
|
||||
| `BrowserProfiler.interactive_manager` | Text UI loop for listing, creating, deleting, or launching profiles. |
|
||||
| `BrowserProfiler.launch_standalone_browser` | Starts a non-headless Chromium with remote debugging enabled and keeps it alive for manual tests. |
|
||||
| `BrowserProfiler.get_cdp_json` | Pulls `/json/version` from a CDP endpoint and returns the parsed JSON. |
|
||||
| `BrowserProfiler.launch_builtin_browser` | Spawns a headless Chromium in the background, saves `{wsEndpoint, pid, started_at}` to `~/.crawl4ai/builtin_browser.json`. |
|
||||
| `BrowserProfiler.get_builtin_browser_info` | Reads that JSON file, verifies the PID, and returns browser status info. |
|
||||
| `BrowserProfiler._is_browser_running` | Cross-platform helper that checks if a PID is still alive. |
|
||||
| `BrowserProfiler.kill_builtin_browser` | Terminates the background builtin browser and removes its status file. |
|
||||
| `BrowserProfiler.get_builtin_browser_status` | Returns `{running: bool, wsEndpoint, pid, started_at}` for quick health checks. |
|
||||
|
||||
Let me know what you want to tweak or dive into next.
|
||||
40
docs/codebase/cli.md
Normal file
40
docs/codebase/cli.md
Normal file
@@ -0,0 +1,40 @@
|
||||
### `cli.py` command surface
|
||||
|
||||
| Command | Inputs / flags | What it does |
|
||||
|---|---|---|
|
||||
| **profiles** | *(none)* | Opens the interactive profile manager, lets you list, create, delete saved browser profiles that live in `~/.crawl4ai/profiles`. |
|
||||
| **browser status** | – | Prints whether the always-on *builtin* browser is running, shows its CDP URL, PID, start time. |
|
||||
| **browser stop** | – | Kills the builtin browser and deletes its status file. |
|
||||
| **browser view** | `--url, -u` URL *(optional)* | Pops a visible window of the builtin browser, navigates to `URL` or `about:blank`. |
|
||||
| **config list** | – | Dumps every global setting, showing current value, default, and description. |
|
||||
| **config get** | `key` | Prints the value of a single setting, falls back to default if unset. |
|
||||
| **config set** | `key value` | Persists a new value in the global config (stored under `~/.crawl4ai/config.yml`). |
|
||||
| **examples** | – | Just spits out real-world CLI usage samples. |
|
||||
| **crawl** | `url` *(positional)*<br>`--browser-config,-B` path<br>`--crawler-config,-C` path<br>`--filter-config,-f` path<br>`--extraction-config,-e` path<br>`--json-extract,-j` [desc]\*<br>`--schema,-s` path<br>`--browser,-b` k=v list<br>`--crawler,-c` k=v list<br>`--output,-o` all,json,markdown,md,markdown-fit,md-fit *(default all)*<br>`--output-file,-O` path<br>`--bypass-cache,-b` *(flag, default true — note flag reuse)*<br>`--question,-q` str<br>`--verbose,-v` *(flag)*<br>`--profile,-p` profile-name | One-shot crawl + extraction. Builds `BrowserConfig` and `CrawlerRunConfig` from inline flags or separate YAML/JSON files, runs `AsyncWebCrawler.run()`, can route through a named saved profile and pipe the result to stdout or a file. |
|
||||
| **(default)** | Same flags as **crawl**, plus `--example` | Shortcut so you can type just `crwl https://site.com`. When first arg is not a known sub-command, it falls through to *crawl*. |
|
||||
|
||||
\* `--json-extract/-j` with no value turns on LLM-based JSON extraction using an auto schema, supplying a string lets you prompt-engineer the field descriptions.
|
||||
|
||||
> Quick mental model
|
||||
> `profiles` = manage identities,
|
||||
> `browser ...` = control long-running headless Chrome that all crawls can piggy-back on,
|
||||
> `crawl` = do the actual work,
|
||||
> `config` = tweak global defaults,
|
||||
> everything else is sugar.
|
||||
|
||||
### Quick-fire “profile” usage cheatsheet
|
||||
|
||||
| Scenario | Command (copy-paste ready) | Notes |
|
||||
|---|---|---|
|
||||
| **Launch interactive Profile Manager UI** | `crwl profiles` | Opens TUI with options: 1 List, 2 Create, 3 Delete, 4 Use-to-crawl, 5 Exit. |
|
||||
| **Create a fresh profile** | `crwl profiles` → choose **2** → name it → browser opens → log in → press **q** in terminal | Saves to `~/.crawl4ai/profiles/<name>`. |
|
||||
| **List saved profiles** | `crwl profiles` → choose **1** | Shows name, browser type, size, last-modified. |
|
||||
| **Delete a profile** | `crwl profiles` → choose **3** → pick the profile index → confirm | Removes the folder. |
|
||||
| **Crawl with a profile (default alias)** | `crwl https://site.com/dashboard -p my-profile` | Keeps login cookies, sets `use_managed_browser=true` under the hood. |
|
||||
| **Crawl + verbose JSON output** | `crwl https://site.com -p my-profile -o json -v` | Any other `crawl` flags work the same. |
|
||||
| **Crawl with extra browser tweaks** | `crwl https://site.com -p my-profile -b "headless=true,viewport_width=1680"` | CLI overrides go on top of the profile. |
|
||||
| **Same but via explicit sub-command** | `crwl crawl https://site.com -p my-profile` | Identical to default alias. |
|
||||
| **Use profile from inside Profile Manager** | `crwl profiles` → choose **4** → pick profile → enter URL → follow prompts | Handy when demo-ing to non-CLI folks. |
|
||||
| **One-off crawl with a profile folder path (no name lookup)** | `crwl https://site.com -b "user_data_dir=$HOME/.crawl4ai/profiles/my-profile,use_managed_browser=true"` | Bypasses registry, useful for CI scripts. |
|
||||
| **Launch a dev browser on CDP port with the same identity** | `crwl cdp -d $HOME/.crawl4ai/profiles/my-profile -P 9223` | Lets Puppeteer/Playwright attach for debugging. |
|
||||
|
||||
123
docs/examples/README_BUILTIN_BROWSER.md
Normal file
123
docs/examples/README_BUILTIN_BROWSER.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Builtin Browser in Crawl4AI
|
||||
|
||||
This document explains the builtin browser feature in Crawl4AI and how to use it effectively.
|
||||
|
||||
## What is the Builtin Browser?
|
||||
|
||||
The builtin browser is a persistent Chrome instance that Crawl4AI manages for you. It runs in the background and can be used by multiple crawling operations, eliminating the need to start and stop browsers for each crawl.
|
||||
|
||||
Benefits include:
|
||||
- **Faster startup times** - The browser is already running, so your scripts start faster
|
||||
- **Shared resources** - All your crawling scripts can use the same browser instance
|
||||
- **Simplified management** - No need to worry about CDP URLs or browser processes
|
||||
- **Persistent cookies and sessions** - Browser state persists between script runs
|
||||
- **Less resource usage** - Only one browser instance for multiple scripts
|
||||
|
||||
## Using the Builtin Browser
|
||||
|
||||
### In Python Code
|
||||
|
||||
Using the builtin browser in your code is simple:
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
|
||||
# Create browser config with builtin mode
|
||||
browser_config = BrowserConfig(
|
||||
browser_mode="builtin", # This is the key setting!
|
||||
headless=True # Can be headless or not
|
||||
)
|
||||
|
||||
# Create the crawler
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
|
||||
# Use it - no need to explicitly start()
|
||||
result = await crawler.arun("https://example.com")
|
||||
```
|
||||
|
||||
Key points:
|
||||
1. Set `browser_mode="builtin"` in your BrowserConfig
|
||||
2. No need for explicit `start()` call - the crawler will automatically connect to the builtin browser
|
||||
3. No need to use a context manager or call `close()` - the browser stays running
|
||||
|
||||
### Via CLI
|
||||
|
||||
The CLI provides commands to manage the builtin browser:
|
||||
|
||||
```bash
|
||||
# Start the builtin browser
|
||||
crwl browser start
|
||||
|
||||
# Check its status
|
||||
crwl browser status
|
||||
|
||||
# Open a visible window to see what the browser is doing
|
||||
crwl browser view --url https://example.com
|
||||
|
||||
# Stop it when no longer needed
|
||||
crwl browser stop
|
||||
|
||||
# Restart with different settings
|
||||
crwl browser restart --no-headless
|
||||
```
|
||||
|
||||
When crawling via CLI, simply add the builtin browser mode:
|
||||
|
||||
```bash
|
||||
crwl https://example.com -b "browser_mode=builtin"
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
1. When a crawler with `browser_mode="builtin"` is created:
|
||||
- It checks if a builtin browser is already running
|
||||
- If not, it automatically launches one
|
||||
- It connects to the browser via CDP (Chrome DevTools Protocol)
|
||||
|
||||
2. The browser process continues running after your script exits
|
||||
- This means it's ready for the next crawl
|
||||
- You can manage it via the CLI commands
|
||||
|
||||
3. During installation, Crawl4AI attempts to create a builtin browser automatically
|
||||
|
||||
## Example
|
||||
|
||||
See the [builtin_browser_example.py](builtin_browser_example.py) file for a complete example.
|
||||
|
||||
Run it with:
|
||||
|
||||
```bash
|
||||
python builtin_browser_example.py
|
||||
```
|
||||
|
||||
## When to Use
|
||||
|
||||
The builtin browser is ideal for:
|
||||
- Scripts that run frequently
|
||||
- Development and testing workflows
|
||||
- Applications that need to minimize startup time
|
||||
- Systems where you want to manage browser instances centrally
|
||||
|
||||
You might not want to use it when:
|
||||
- Running one-off scripts
|
||||
- When you need different browser configurations for different tasks
|
||||
- In environments where persistent processes are not allowed
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues:
|
||||
|
||||
1. Check the browser status:
|
||||
```
|
||||
crwl browser status
|
||||
```
|
||||
|
||||
2. Try restarting it:
|
||||
```
|
||||
crwl browser restart
|
||||
```
|
||||
|
||||
3. If problems persist, stop it and let Crawl4AI start a fresh one:
|
||||
```
|
||||
crwl browser stop
|
||||
```
|
||||
79
docs/examples/arun_vs_arun_many.py
Normal file
79
docs/examples/arun_vs_arun_many.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import asyncio
|
||||
import time
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.async_configs import CrawlerRunConfig
|
||||
from crawl4ai.async_dispatcher import MemoryAdaptiveDispatcher, RateLimiter
|
||||
|
||||
VERBOSE = False
|
||||
|
||||
async def crawl_sequential(urls):
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
|
||||
results = []
|
||||
start_time = time.perf_counter()
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
for url in urls:
|
||||
result_container = await crawler.arun(url=url, config=config)
|
||||
results.append(result_container[0])
|
||||
total_time = time.perf_counter() - start_time
|
||||
return total_time, results
|
||||
|
||||
async def crawl_parallel_dispatcher(urls):
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
|
||||
# Dispatcher with rate limiter enabled (default behavior)
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
rate_limiter=RateLimiter(base_delay=(1.0, 3.0), max_delay=60.0, max_retries=3),
|
||||
max_session_permit=50,
|
||||
)
|
||||
start_time = time.perf_counter()
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result_container = await crawler.arun_many(urls=urls, config=config, dispatcher=dispatcher)
|
||||
results = []
|
||||
if isinstance(result_container, list):
|
||||
results = result_container
|
||||
else:
|
||||
async for res in result_container:
|
||||
results.append(res)
|
||||
total_time = time.perf_counter() - start_time
|
||||
return total_time, results
|
||||
|
||||
async def crawl_parallel_no_rate_limit(urls):
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, verbose=VERBOSE)
|
||||
# Dispatcher with no rate limiter and a high session permit to avoid queuing
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
rate_limiter=None,
|
||||
max_session_permit=len(urls) # allow all URLs concurrently
|
||||
)
|
||||
start_time = time.perf_counter()
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result_container = await crawler.arun_many(urls=urls, config=config, dispatcher=dispatcher)
|
||||
results = []
|
||||
if isinstance(result_container, list):
|
||||
results = result_container
|
||||
else:
|
||||
async for res in result_container:
|
||||
results.append(res)
|
||||
total_time = time.perf_counter() - start_time
|
||||
return total_time, results
|
||||
|
||||
async def main():
|
||||
urls = ["https://example.com"] * 100
|
||||
print(f"Crawling {len(urls)} URLs sequentially...")
|
||||
seq_time, seq_results = await crawl_sequential(urls)
|
||||
print(f"Sequential crawling took: {seq_time:.2f} seconds\n")
|
||||
|
||||
print(f"Crawling {len(urls)} URLs in parallel using arun_many with dispatcher (with rate limit)...")
|
||||
disp_time, disp_results = await crawl_parallel_dispatcher(urls)
|
||||
print(f"Parallel (dispatcher with rate limiter) took: {disp_time:.2f} seconds\n")
|
||||
|
||||
print(f"Crawling {len(urls)} URLs in parallel using dispatcher with no rate limiter...")
|
||||
no_rl_time, no_rl_results = await crawl_parallel_no_rate_limit(urls)
|
||||
print(f"Parallel (dispatcher without rate limiter) took: {no_rl_time:.2f} seconds\n")
|
||||
|
||||
print("Crawl4ai - Crawling Comparison")
|
||||
print("--------------------------------------------------------")
|
||||
print(f"Sequential crawling took: {seq_time:.2f} seconds")
|
||||
print(f"Parallel (dispatcher with rate limiter) took: {disp_time:.2f} seconds")
|
||||
print(f"Parallel (dispatcher without rate limiter) took: {no_rl_time:.2f} seconds")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
86
docs/examples/builtin_browser_example.py
Normal file
86
docs/examples/builtin_browser_example.py
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Builtin Browser Example
|
||||
|
||||
This example demonstrates how to use Crawl4AI's builtin browser feature,
|
||||
which simplifies the browser management process. With builtin mode:
|
||||
|
||||
- No need to manually start or connect to a browser
|
||||
- No need to manage CDP URLs or browser processes
|
||||
- Automatically connects to an existing browser or launches one if needed
|
||||
- Browser persists between script runs, reducing startup time
|
||||
- No explicit cleanup or close() calls needed
|
||||
|
||||
The example also demonstrates "auto-starting" where you don't need to explicitly
|
||||
call start() method on the crawler.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
import time
|
||||
|
||||
async def crawl_with_builtin_browser():
|
||||
"""
|
||||
Simple example of crawling with the builtin browser.
|
||||
|
||||
Key features:
|
||||
1. browser_mode="builtin" in BrowserConfig
|
||||
2. No explicit start() call needed
|
||||
3. No explicit close() needed
|
||||
"""
|
||||
print("\n=== Crawl4AI Builtin Browser Example ===\n")
|
||||
|
||||
# Create a browser configuration with builtin mode
|
||||
browser_config = BrowserConfig(
|
||||
browser_mode="builtin", # This is the key setting!
|
||||
headless=True # Can run headless for background operation
|
||||
)
|
||||
|
||||
# Create crawler run configuration
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS, # Skip cache for this demo
|
||||
screenshot=True, # Take a screenshot
|
||||
verbose=True # Show verbose logging
|
||||
)
|
||||
|
||||
# Create the crawler instance
|
||||
# Note: We don't need to use "async with" context manager
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
|
||||
# Start crawling several URLs - no explicit start() needed!
|
||||
# The crawler will automatically connect to the builtin browser
|
||||
print("\n➡️ Crawling first URL...")
|
||||
t0 = time.time()
|
||||
result1 = await crawler.arun(
|
||||
url="https://crawl4ai.com",
|
||||
config=crawler_config
|
||||
)
|
||||
t1 = time.time()
|
||||
print(f"✅ First URL crawled in {t1-t0:.2f} seconds")
|
||||
print(f" Got {len(result1.markdown.raw_markdown)} characters of content")
|
||||
print(f" Title: {result1.metadata.get('title', 'No title')}")
|
||||
|
||||
# Try another URL - the browser is already running, so this should be faster
|
||||
print("\n➡️ Crawling second URL...")
|
||||
t0 = time.time()
|
||||
result2 = await crawler.arun(
|
||||
url="https://example.com",
|
||||
config=crawler_config
|
||||
)
|
||||
t1 = time.time()
|
||||
print(f"✅ Second URL crawled in {t1-t0:.2f} seconds")
|
||||
print(f" Got {len(result2.markdown.raw_markdown)} characters of content")
|
||||
print(f" Title: {result2.metadata.get('title', 'No title')}")
|
||||
|
||||
# The builtin browser continues running in the background
|
||||
# No need to explicitly close it
|
||||
print("\n🔄 The builtin browser remains running for future use")
|
||||
print(" You can use 'crwl browser status' to check its status")
|
||||
print(" or 'crwl browser stop' to stop it when completely done")
|
||||
|
||||
async def main():
|
||||
"""Run the example"""
|
||||
await crawl_with_builtin_browser()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
209
docs/examples/crawler_monitor_example.py
Normal file
209
docs/examples/crawler_monitor_example.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
CrawlerMonitor Example
|
||||
|
||||
This example demonstrates how to use the CrawlerMonitor component
|
||||
to visualize and track web crawler operations in real-time.
|
||||
"""
|
||||
|
||||
import time
|
||||
import uuid
|
||||
import random
|
||||
import threading
|
||||
from crawl4ai.components.crawler_monitor import CrawlerMonitor
|
||||
from crawl4ai.models import CrawlStatus
|
||||
|
||||
def simulate_webcrawler_operations(monitor, num_tasks=20):
|
||||
"""
|
||||
Simulates a web crawler's operations with multiple tasks and different states.
|
||||
|
||||
Args:
|
||||
monitor: The CrawlerMonitor instance
|
||||
num_tasks: Number of tasks to simulate
|
||||
"""
|
||||
print(f"Starting simulation with {num_tasks} tasks...")
|
||||
|
||||
# Create and register all tasks first
|
||||
task_ids = []
|
||||
for i in range(num_tasks):
|
||||
task_id = str(uuid.uuid4())
|
||||
url = f"https://example.com/page{i}"
|
||||
monitor.add_task(task_id, url)
|
||||
task_ids.append((task_id, url))
|
||||
|
||||
# Small delay between task creation
|
||||
time.sleep(0.2)
|
||||
|
||||
# Process tasks with a variety of different behaviors
|
||||
threads = []
|
||||
for i, (task_id, url) in enumerate(task_ids):
|
||||
# Create a thread for each task
|
||||
thread = threading.Thread(
|
||||
target=process_task,
|
||||
args=(monitor, task_id, url, i)
|
||||
)
|
||||
thread.daemon = True
|
||||
threads.append(thread)
|
||||
|
||||
# Start threads in batches to simulate concurrent processing
|
||||
batch_size = 4 # Process 4 tasks at a time
|
||||
for i in range(0, len(threads), batch_size):
|
||||
batch = threads[i:i+batch_size]
|
||||
for thread in batch:
|
||||
thread.start()
|
||||
time.sleep(0.5) # Stagger thread start times
|
||||
|
||||
# Wait a bit before starting next batch
|
||||
time.sleep(random.uniform(1.0, 3.0))
|
||||
|
||||
# Update queue statistics
|
||||
update_queue_stats(monitor)
|
||||
|
||||
# Simulate memory pressure changes
|
||||
active_threads = [t for t in threads if t.is_alive()]
|
||||
if len(active_threads) > 8:
|
||||
monitor.update_memory_status("CRITICAL")
|
||||
elif len(active_threads) > 4:
|
||||
monitor.update_memory_status("PRESSURE")
|
||||
else:
|
||||
monitor.update_memory_status("NORMAL")
|
||||
|
||||
# Wait for all threads to complete
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
# Final updates
|
||||
update_queue_stats(monitor)
|
||||
monitor.update_memory_status("NORMAL")
|
||||
|
||||
print("Simulation completed!")
|
||||
|
||||
def process_task(monitor, task_id, url, index):
|
||||
"""Simulate processing of a single task."""
|
||||
# Tasks start in queued state (already added)
|
||||
|
||||
# Simulate waiting in queue
|
||||
wait_time = random.uniform(0.5, 3.0)
|
||||
time.sleep(wait_time)
|
||||
|
||||
# Start processing - move to IN_PROGRESS
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=time.time(),
|
||||
wait_time=wait_time
|
||||
)
|
||||
|
||||
# Simulate task processing with memory usage changes
|
||||
total_process_time = random.uniform(2.0, 10.0)
|
||||
step_time = total_process_time / 5 # Update in 5 steps
|
||||
|
||||
for step in range(5):
|
||||
# Simulate increasing then decreasing memory usage
|
||||
if step < 3: # First 3 steps - increasing
|
||||
memory_usage = random.uniform(5.0, 20.0) * (step + 1)
|
||||
else: # Last 2 steps - decreasing
|
||||
memory_usage = random.uniform(5.0, 20.0) * (5 - step)
|
||||
|
||||
# Update peak memory if this is higher
|
||||
peak = max(memory_usage, monitor.get_task_stats(task_id).get("peak_memory", 0))
|
||||
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak
|
||||
)
|
||||
|
||||
time.sleep(step_time)
|
||||
|
||||
# Determine final state - 80% success, 20% failure
|
||||
if index % 5 == 0: # Every 5th task fails
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
end_time=time.time(),
|
||||
memory_usage=0.0,
|
||||
error_message="Connection timeout"
|
||||
)
|
||||
else:
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.COMPLETED,
|
||||
end_time=time.time(),
|
||||
memory_usage=0.0
|
||||
)
|
||||
|
||||
def update_queue_stats(monitor):
|
||||
"""Update queue statistics based on current tasks."""
|
||||
task_stats = monitor.get_all_task_stats()
|
||||
|
||||
# Count queued tasks
|
||||
queued_tasks = [
|
||||
stats for stats in task_stats.values()
|
||||
if stats["status"] == CrawlStatus.QUEUED.name
|
||||
]
|
||||
|
||||
total_queued = len(queued_tasks)
|
||||
|
||||
if total_queued > 0:
|
||||
current_time = time.time()
|
||||
# Calculate wait times
|
||||
wait_times = [
|
||||
current_time - stats.get("enqueue_time", current_time)
|
||||
for stats in queued_tasks
|
||||
]
|
||||
highest_wait_time = max(wait_times) if wait_times else 0.0
|
||||
avg_wait_time = sum(wait_times) / len(wait_times) if wait_times else 0.0
|
||||
else:
|
||||
highest_wait_time = 0.0
|
||||
avg_wait_time = 0.0
|
||||
|
||||
# Update monitor
|
||||
monitor.update_queue_statistics(
|
||||
total_queued=total_queued,
|
||||
highest_wait_time=highest_wait_time,
|
||||
avg_wait_time=avg_wait_time
|
||||
)
|
||||
|
||||
def main():
|
||||
# Initialize the monitor
|
||||
monitor = CrawlerMonitor(
|
||||
urls_total=20, # Total URLs to process
|
||||
refresh_rate=0.5, # Update UI twice per second
|
||||
enable_ui=True, # Enable terminal UI
|
||||
max_width=120 # Set maximum width to 120 characters
|
||||
)
|
||||
|
||||
# Start the monitor
|
||||
monitor.start()
|
||||
|
||||
try:
|
||||
# Run simulation
|
||||
simulate_webcrawler_operations(monitor)
|
||||
|
||||
# Keep monitor running a bit to see final state
|
||||
print("Waiting to view final state...")
|
||||
time.sleep(5)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nExample interrupted by user")
|
||||
finally:
|
||||
# Stop the monitor
|
||||
monitor.stop()
|
||||
print("Example completed!")
|
||||
|
||||
# Print some statistics
|
||||
summary = monitor.get_summary()
|
||||
print("\nCrawler Statistics Summary:")
|
||||
print(f"Total URLs: {summary['urls_total']}")
|
||||
print(f"Completed: {summary['urls_completed']}")
|
||||
print(f"Completion percentage: {summary['completion_percentage']:.1f}%")
|
||||
print(f"Peak memory usage: {summary['peak_memory_percent']:.1f}%")
|
||||
|
||||
# Print task status counts
|
||||
status_counts = summary['status_counts']
|
||||
print("\nTask Status Counts:")
|
||||
for status, count in status_counts.items():
|
||||
print(f" {status}: {count}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
445
docs/examples/crypto_analysis_example.py
Normal file
445
docs/examples/crypto_analysis_example.py
Normal file
@@ -0,0 +1,445 @@
|
||||
"""
|
||||
Crawl4AI Crypto Trading Analysis Demo
|
||||
Author: Unclecode
|
||||
Date: 2024-03-15
|
||||
|
||||
This script demonstrates advanced crypto market analysis using:
|
||||
1. Web scraping of real-time CoinMarketCap data
|
||||
2. Smart table extraction with layout detection
|
||||
3. Hedge fund-grade financial metrics
|
||||
4. Interactive visualizations for trading signals
|
||||
|
||||
Key Features:
|
||||
- Volume Anomaly Detection: Finds unusual trading activity
|
||||
- Liquidity Power Score: Identifies easily tradable assets
|
||||
- Volatility-Weighted Momentum: Surface sustainable trends
|
||||
- Smart Money Signals: Algorithmic buy/hold recommendations
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import re
|
||||
import plotly.express as px
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
LXMLWebScrapingStrategy,
|
||||
)
|
||||
from crawl4ai import CrawlResult
|
||||
from typing import List
|
||||
|
||||
__current_dir__ = __file__.rsplit("/", 1)[0]
|
||||
|
||||
class CryptoAlphaGenerator:
|
||||
"""
|
||||
Advanced crypto analysis engine that transforms raw web data into:
|
||||
- Volume anomaly flags
|
||||
- Liquidity scores
|
||||
- Momentum-risk ratios
|
||||
- Machine learning-inspired trading signals
|
||||
|
||||
Methods:
|
||||
analyze_tables(): Process raw tables into trading insights
|
||||
create_visuals(): Generate institutional-grade visualizations
|
||||
generate_insights(): Create plain English trading recommendations
|
||||
"""
|
||||
|
||||
def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Convert crypto market data to machine-readable format.
|
||||
Handles currency symbols, units (B=Billions), and percentage values.
|
||||
"""
|
||||
# Make a copy to avoid SettingWithCopyWarning
|
||||
df = df.copy()
|
||||
|
||||
# Clean Price column (handle currency symbols)
|
||||
df["Price"] = df["Price"].astype(str).str.replace("[^\d.]", "", regex=True).astype(float)
|
||||
|
||||
# Handle Market Cap and Volume, considering both Billions and Trillions
|
||||
def convert_large_numbers(value):
|
||||
if pd.isna(value):
|
||||
return float('nan')
|
||||
value = str(value)
|
||||
multiplier = 1
|
||||
if 'B' in value:
|
||||
multiplier = 1e9
|
||||
elif 'T' in value:
|
||||
multiplier = 1e12
|
||||
# Handle cases where the value might already be numeric
|
||||
cleaned_value = re.sub(r"[^\d.]", "", value)
|
||||
return float(cleaned_value) * multiplier if cleaned_value else float('nan')
|
||||
|
||||
df["Market Cap"] = df["Market Cap"].apply(convert_large_numbers)
|
||||
df["Volume(24h)"] = df["Volume(24h)"].apply(convert_large_numbers)
|
||||
|
||||
# Convert percentages to decimal values
|
||||
for col in ["1h %", "24h %", "7d %"]:
|
||||
if col in df.columns:
|
||||
# First ensure it's string, then clean
|
||||
df[col] = (
|
||||
df[col].astype(str)
|
||||
.str.replace("%", "")
|
||||
.str.replace(",", ".")
|
||||
.replace("nan", np.nan)
|
||||
)
|
||||
df[col] = pd.to_numeric(df[col], errors='coerce') / 100
|
||||
|
||||
return df
|
||||
|
||||
def calculate_metrics(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Compute advanced trading metrics used by quantitative funds:
|
||||
|
||||
1. Volume/Market Cap Ratio - Measures liquidity efficiency
|
||||
(High ratio = Underestimated attention, and small-cap = higher growth potential)
|
||||
|
||||
2. Volatility Score - Risk-adjusted momentum potential - Shows how stable is the trend
|
||||
(STD of 1h/24h/7d returns)
|
||||
|
||||
3. Momentum Score - Weighted average of returns - Shows how strong is the trend
|
||||
(1h:30% + 24h:50% + 7d:20%)
|
||||
|
||||
4. Volume Anomaly - 3σ deviation detection
|
||||
(Flags potential insider activity) - Unusual trading activity – Flags coins with volume spikes (potential insider buying or news).
|
||||
"""
|
||||
# Liquidity Metrics
|
||||
df["Volume/Market Cap Ratio"] = df["Volume(24h)"] / df["Market Cap"]
|
||||
|
||||
# Risk Metrics
|
||||
df["Volatility Score"] = df[["1h %", "24h %", "7d %"]].std(axis=1)
|
||||
|
||||
# Momentum Metrics
|
||||
df["Momentum Score"] = df["1h %"] * 0.3 + df["24h %"] * 0.5 + df["7d %"] * 0.2
|
||||
|
||||
# Anomaly Detection
|
||||
median_vol = df["Volume(24h)"].median()
|
||||
df["Volume Anomaly"] = df["Volume(24h)"] > 3 * median_vol
|
||||
|
||||
# Value Flags
|
||||
# Undervalued Flag - Low market cap and high momentum
|
||||
# (High growth potential and low attention)
|
||||
df["Undervalued Flag"] = (df["Market Cap"] < 1e9) & (
|
||||
df["Momentum Score"] > 0.05
|
||||
)
|
||||
# Liquid Giant Flag - High volume/market cap ratio and large market cap
|
||||
# (High liquidity and large market cap = institutional interest)
|
||||
df["Liquid Giant"] = (df["Volume/Market Cap Ratio"] > 0.15) & (
|
||||
df["Market Cap"] > 1e9
|
||||
)
|
||||
|
||||
return df
|
||||
|
||||
def generate_insights_simple(self, df: pd.DataFrame) -> str:
|
||||
"""
|
||||
Generates an ultra-actionable crypto trading report with:
|
||||
- Risk-tiered opportunities (High/Medium/Low)
|
||||
- Concrete examples for each trade type
|
||||
- Entry/exit strategies spelled out
|
||||
- Visual cues for quick scanning
|
||||
"""
|
||||
report = [
|
||||
"🚀 **CRYPTO TRADING CHEAT SHEET** 🚀",
|
||||
"*Based on quantitative signals + hedge fund tactics*",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
]
|
||||
|
||||
# 1. HIGH-RISK: Undervalued Small-Caps (Momentum Plays)
|
||||
high_risk = df[df["Undervalued Flag"]].sort_values("Momentum Score", ascending=False)
|
||||
if not high_risk.empty:
|
||||
example_coin = high_risk.iloc[0]
|
||||
report.extend([
|
||||
"\n🔥 **HIGH-RISK: Rocket Fuel Small-Caps**",
|
||||
f"*Example Trade:* {example_coin['Name']} (Price: ${example_coin['Price']:.6f})",
|
||||
"📊 *Why?* Tiny market cap (<$1B) but STRONG momentum (+{:.0f}% last week)".format(example_coin['7d %']*100),
|
||||
"🎯 *Strategy:*",
|
||||
"1. Wait for 5-10% dip from recent high (${:.6f} → Buy under ${:.6f})".format(
|
||||
example_coin['Price'] / (1 - example_coin['24h %']), # Approx recent high
|
||||
example_coin['Price'] * 0.95
|
||||
),
|
||||
"2. Set stop-loss at -10% (${:.6f})".format(example_coin['Price'] * 0.90),
|
||||
"3. Take profit at +20% (${:.6f})".format(example_coin['Price'] * 1.20),
|
||||
"⚠️ *Risk Warning:* These can drop 30% fast! Never bet more than 5% of your portfolio.",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
])
|
||||
|
||||
# 2. MEDIUM-RISK: Liquid Giants (Swing Trades)
|
||||
medium_risk = df[df["Liquid Giant"]].sort_values("Volume/Market Cap Ratio", ascending=False)
|
||||
if not medium_risk.empty:
|
||||
example_coin = medium_risk.iloc[0]
|
||||
report.extend([
|
||||
"\n💎 **MEDIUM-RISK: Liquid Giants (Safe Swing Trades)**",
|
||||
f"*Example Trade:* {example_coin['Name']} (Market Cap: ${example_coin['Market Cap']/1e9:.1f}B)",
|
||||
"📊 *Why?* Huge volume (${:.1f}M/day) makes it easy to enter/exit".format(example_coin['Volume(24h)']/1e6),
|
||||
"🎯 *Strategy:*",
|
||||
"1. Buy when 24h volume > 15% of market cap (Current: {:.0f}%)".format(example_coin['Volume/Market Cap Ratio']*100),
|
||||
"2. Hold 1-4 weeks (Big coins trend longer)",
|
||||
"3. Exit when momentum drops below 5% (Current: {:.0f}%)".format(example_coin['Momentum Score']*100),
|
||||
"📉 *Pro Tip:* Watch Bitcoin's trend - if BTC drops 5%, these usually follow.",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
])
|
||||
|
||||
# 3. LOW-RISK: Stable Momentum (DCA Targets)
|
||||
low_risk = df[
|
||||
(df["Momentum Score"] > 0.05) &
|
||||
(df["Volatility Score"] < 0.03)
|
||||
].sort_values("Market Cap", ascending=False)
|
||||
if not low_risk.empty:
|
||||
example_coin = low_risk.iloc[0]
|
||||
report.extend([
|
||||
"\n🛡️ **LOW-RISK: Steady Climbers (DCA & Forget)**",
|
||||
f"*Example Trade:* {example_coin['Name']} (Volatility: {example_coin['Volatility Score']:.2f}/5)",
|
||||
"📊 *Why?* Rises steadily (+{:.0f}%/week) with LOW drama".format(example_coin['7d %']*100),
|
||||
"🎯 *Strategy:*",
|
||||
"1. Buy small amounts every Tuesday/Friday (DCA)",
|
||||
"2. Hold for 3+ months (Compound gains work best here)",
|
||||
"3. Sell 10% at every +25% milestone",
|
||||
"💰 *Best For:* Long-term investors who hate sleepless nights",
|
||||
"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
])
|
||||
|
||||
# Volume Spike Alerts
|
||||
anomalies = df[df["Volume Anomaly"]].sort_values("Volume(24h)", ascending=False)
|
||||
if not anomalies.empty:
|
||||
example_coin = anomalies.iloc[0]
|
||||
report.extend([
|
||||
"\n🚨 **Volume Spike Alert (Possible News/Whale Action)**",
|
||||
f"*Coin:* {example_coin['Name']} (Volume: ${example_coin['Volume(24h)']/1e6:.1f}M, usual: ${example_coin['Volume(24h)']/3/1e6:.1f}M)",
|
||||
"🔍 *Check:* Twitter/CoinGecko for news before trading",
|
||||
"⚡ *If no news:* Could be insider buying - watch price action:",
|
||||
"- Break above today's high → Buy with tight stop-loss",
|
||||
"- Fade back down → Avoid (may be a fakeout)"
|
||||
])
|
||||
|
||||
# Pro Tip Footer
|
||||
report.append("\n✨ *Pro Tip:* Bookmark this report & check back in 24h to see if signals held up.")
|
||||
|
||||
return "\n".join(report)
|
||||
|
||||
def generate_insights(self, df: pd.DataFrame) -> str:
|
||||
"""
|
||||
Generates a tactical trading report with:
|
||||
- Top 3 trades per risk level (High/Medium/Low)
|
||||
- Auto-calculated entry/exit prices
|
||||
- BTC chart toggle tip
|
||||
"""
|
||||
# Filter top candidates for each risk level
|
||||
high_risk = (
|
||||
df[df["Undervalued Flag"]]
|
||||
.sort_values("Momentum Score", ascending=False)
|
||||
.head(3)
|
||||
)
|
||||
medium_risk = (
|
||||
df[df["Liquid Giant"]]
|
||||
.sort_values("Volume/Market Cap Ratio", ascending=False)
|
||||
.head(3)
|
||||
)
|
||||
low_risk = (
|
||||
df[(df["Momentum Score"] > 0.05) & (df["Volatility Score"] < 0.03)]
|
||||
.sort_values("Momentum Score", ascending=False)
|
||||
.head(3)
|
||||
)
|
||||
|
||||
report = ["# 🎯 Crypto Trading Tactical Report (Top 3 Per Risk Tier)"]
|
||||
|
||||
# 1. High-Risk Trades (Small-Cap Momentum)
|
||||
if not high_risk.empty:
|
||||
report.append("\n## 🔥 HIGH RISK: Small-Cap Rockets (5-50% Potential)")
|
||||
for i, coin in high_risk.iterrows():
|
||||
current_price = coin["Price"]
|
||||
entry = current_price * 0.95 # -5% dip
|
||||
stop_loss = current_price * 0.90 # -10%
|
||||
take_profit = current_price * 1.20 # +20%
|
||||
|
||||
report.append(
|
||||
f"\n### {coin['Name']} (Momentum: {coin['Momentum Score']:.1%})"
|
||||
f"\n- **Current Price:** ${current_price:.4f}"
|
||||
f"\n- **Entry:** < ${entry:.4f} (Wait for pullback)"
|
||||
f"\n- **Stop-Loss:** ${stop_loss:.4f} (-10%)"
|
||||
f"\n- **Target:** ${take_profit:.4f} (+20%)"
|
||||
f"\n- **Risk/Reward:** 1:2"
|
||||
f"\n- **Watch:** Volume spikes above {coin['Volume(24h)']/1e6:.1f}M"
|
||||
)
|
||||
|
||||
# 2. Medium-Risk Trades (Liquid Giants)
|
||||
if not medium_risk.empty:
|
||||
report.append("\n## 💎 MEDIUM RISK: Liquid Swing Trades (10-30% Potential)")
|
||||
for i, coin in medium_risk.iterrows():
|
||||
current_price = coin["Price"]
|
||||
entry = current_price * 0.98 # -2% dip
|
||||
stop_loss = current_price * 0.94 # -6%
|
||||
take_profit = current_price * 1.15 # +15%
|
||||
|
||||
report.append(
|
||||
f"\n### {coin['Name']} (Liquidity Score: {coin['Volume/Market Cap Ratio']:.1%})"
|
||||
f"\n- **Current Price:** ${current_price:.2f}"
|
||||
f"\n- **Entry:** < ${entry:.2f} (Buy slight dips)"
|
||||
f"\n- **Stop-Loss:** ${stop_loss:.2f} (-6%)"
|
||||
f"\n- **Target:** ${take_profit:.2f} (+15%)"
|
||||
f"\n- **Hold Time:** 1-3 weeks"
|
||||
f"\n- **Key Metric:** Volume/Cap > 15%"
|
||||
)
|
||||
|
||||
# 3. Low-Risk Trades (Stable Momentum)
|
||||
if not low_risk.empty:
|
||||
report.append("\n## 🛡️ LOW RISK: Steady Gainers (5-15% Potential)")
|
||||
for i, coin in low_risk.iterrows():
|
||||
current_price = coin["Price"]
|
||||
entry = current_price * 0.99 # -1% dip
|
||||
stop_loss = current_price * 0.97 # -3%
|
||||
take_profit = current_price * 1.10 # +10%
|
||||
|
||||
report.append(
|
||||
f"\n### {coin['Name']} (Stability Score: {1/coin['Volatility Score']:.1f}x)"
|
||||
f"\n- **Current Price:** ${current_price:.2f}"
|
||||
f"\n- **Entry:** < ${entry:.2f} (Safe zone)"
|
||||
f"\n- **Stop-Loss:** ${stop_loss:.2f} (-3%)"
|
||||
f"\n- **Target:** ${take_profit:.2f} (+10%)"
|
||||
f"\n- **DCA Suggestion:** 3 buys over 72 hours"
|
||||
)
|
||||
|
||||
# Volume Anomaly Alert
|
||||
anomalies = df[df["Volume Anomaly"]].sort_values("Volume(24h)", ascending=False).head(2)
|
||||
if not anomalies.empty:
|
||||
report.append("\n⚠️ **Volume Spike Alerts**")
|
||||
for i, coin in anomalies.iterrows():
|
||||
report.append(
|
||||
f"- {coin['Name']}: Volume {coin['Volume(24h)']/1e6:.1f}M "
|
||||
f"(3x normal) | Price moved: {coin['24h %']:.1%}"
|
||||
)
|
||||
|
||||
# Pro Tip
|
||||
report.append(
|
||||
"\n📊 **Chart Hack:** Hide BTC in visuals:\n"
|
||||
"```python\n"
|
||||
"# For 3D Map:\n"
|
||||
"fig.update_traces(visible=False, selector={'name':'Bitcoin'})\n"
|
||||
"# For Treemap:\n"
|
||||
"df = df[df['Name'] != 'Bitcoin']\n"
|
||||
"```"
|
||||
)
|
||||
|
||||
return "\n".join(report)
|
||||
|
||||
def create_visuals(self, df: pd.DataFrame) -> dict:
|
||||
"""Enhanced visuals with BTC toggle support"""
|
||||
# 3D Market Map (with BTC toggle hint)
|
||||
fig1 = px.scatter_3d(
|
||||
df,
|
||||
x="Market Cap",
|
||||
y="Volume/Market Cap Ratio",
|
||||
z="Momentum Score",
|
||||
color="Name", # Color by name to allow toggling
|
||||
hover_name="Name",
|
||||
title="Market Map (Toggle BTC in legend to focus on alts)",
|
||||
log_x=True
|
||||
)
|
||||
fig1.update_traces(
|
||||
marker=dict(size=df["Volatility Score"]*100 + 5) # Dynamic sizing
|
||||
)
|
||||
|
||||
# Liquidity Tree (exclude BTC if too dominant)
|
||||
if df[df["Name"] == "BitcoinBTC"]["Market Cap"].values[0] > df["Market Cap"].median() * 10:
|
||||
df = df[df["Name"] != "BitcoinBTC"]
|
||||
|
||||
fig2 = px.treemap(
|
||||
df,
|
||||
path=["Name"],
|
||||
values="Market Cap",
|
||||
color="Volume/Market Cap Ratio",
|
||||
title="Liquidity Tree (BTC auto-removed if dominant)"
|
||||
)
|
||||
|
||||
return {"market_map": fig1, "liquidity_tree": fig2}
|
||||
|
||||
async def main():
|
||||
"""
|
||||
Main execution flow:
|
||||
1. Configure headless browser for scraping
|
||||
2. Extract live crypto market data
|
||||
3. Clean and analyze using hedge fund models
|
||||
4. Generate visualizations and insights
|
||||
5. Output professional trading report
|
||||
"""
|
||||
# Configure browser with anti-detection features
|
||||
browser_config = BrowserConfig(
|
||||
headless=False,
|
||||
)
|
||||
|
||||
# Initialize crawler with smart table detection
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
|
||||
try:
|
||||
# Set up scraping parameters
|
||||
crawl_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
table_score_threshold=8, # Strict table detection
|
||||
keep_data_attributes=True,
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
scan_full_page=True,
|
||||
scroll_delay=0.2,
|
||||
)
|
||||
|
||||
# Execute market data extraction
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
url="https://coinmarketcap.com/?page=1", config=crawl_config
|
||||
)
|
||||
|
||||
# Process results
|
||||
raw_df = pd.DataFrame()
|
||||
for result in results:
|
||||
# Use the new tables field, falling back to media["tables"] for backward compatibility
|
||||
tables = result.tables if hasattr(result, "tables") and result.tables else result.media.get("tables", [])
|
||||
if result.success and tables:
|
||||
# Extract primary market table
|
||||
# DataFrame
|
||||
raw_df = pd.DataFrame(
|
||||
tables[0]["rows"],
|
||||
columns=tables[0]["headers"],
|
||||
)
|
||||
break
|
||||
|
||||
|
||||
# This is for debugging only
|
||||
# ////// Remove this in production from here..
|
||||
# Save raw data for debugging
|
||||
raw_df.to_csv(f"{__current_dir__}/tmp/raw_crypto_data.csv", index=False)
|
||||
print("🔍 Raw data saved to 'raw_crypto_data.csv'")
|
||||
|
||||
# Read from file for debugging
|
||||
raw_df = pd.read_csv(f"{__current_dir__}/tmp/raw_crypto_data.csv")
|
||||
# ////// ..to here
|
||||
|
||||
# Select top 20
|
||||
raw_df = raw_df.head(50)
|
||||
# Remove "Buy" from name
|
||||
raw_df["Name"] = raw_df["Name"].str.replace("Buy", "")
|
||||
|
||||
# Initialize analysis engine
|
||||
analyzer = CryptoAlphaGenerator()
|
||||
clean_df = analyzer.clean_data(raw_df)
|
||||
analyzed_df = analyzer.calculate_metrics(clean_df)
|
||||
|
||||
# Generate outputs
|
||||
visuals = analyzer.create_visuals(analyzed_df)
|
||||
insights = analyzer.generate_insights(analyzed_df)
|
||||
|
||||
# Save visualizations
|
||||
visuals["market_map"].write_html(f"{__current_dir__}/tmp/market_map.html")
|
||||
visuals["liquidity_tree"].write_html(f"{__current_dir__}/tmp/liquidity_tree.html")
|
||||
|
||||
# Display results
|
||||
print("🔑 Key Trading Insights:")
|
||||
print(insights)
|
||||
print("\n📊 Open 'market_map.html' for interactive analysis")
|
||||
print("\n📊 Open 'liquidity_tree.html' for interactive analysis")
|
||||
|
||||
finally:
|
||||
await crawler.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -39,7 +39,7 @@ async def memory_adaptive_with_rate_limit(urls, browser_config, run_config):
|
||||
start = time.perf_counter()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=70.0,
|
||||
memory_threshold_percent=95.0,
|
||||
max_session_permit=10,
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=(1.0, 2.0), max_delay=30.0, max_retries=2
|
||||
|
||||
1317
docs/examples/docker/demo_docker_api.py
Normal file
1317
docs/examples/docker/demo_docker_api.py
Normal file
File diff suppressed because it is too large
Load Diff
149
docs/examples/docker/demo_docker_polling.py
Normal file
149
docs/examples/docker/demo_docker_polling.py
Normal file
@@ -0,0 +1,149 @@
|
||||
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
demo_docker_polling.py
|
||||
Quick sanity-check for the asynchronous crawl job endpoints:
|
||||
|
||||
• POST /crawl/job – enqueue work, get task_id
|
||||
• GET /crawl/job/{id} – poll status / fetch result
|
||||
|
||||
The style matches demo_docker_api.py (console.rule banners, helper
|
||||
functions, coloured status lines). Adjust BASE_URL as needed.
|
||||
|
||||
Run: python demo_docker_polling.py
|
||||
"""
|
||||
|
||||
import asyncio, json, os, time, urllib.parse
|
||||
from typing import Dict, List
|
||||
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.syntax import Syntax
|
||||
|
||||
console = Console()
|
||||
BASE_URL = os.getenv("BASE_URL", "http://localhost:11234")
|
||||
SIMPLE_URL = "https://example.org"
|
||||
LINKS_URL = "https://httpbin.org/links/10/1"
|
||||
|
||||
# --- helpers --------------------------------------------------------------
|
||||
|
||||
|
||||
def print_payload(payload: Dict):
|
||||
console.print(Panel(Syntax(json.dumps(payload, indent=2),
|
||||
"json", theme="monokai", line_numbers=False),
|
||||
title="Payload", border_style="cyan", expand=False))
|
||||
|
||||
|
||||
async def check_server_health(client: httpx.AsyncClient) -> bool:
|
||||
try:
|
||||
resp = await client.get("/health")
|
||||
if resp.is_success:
|
||||
console.print("[green]Server healthy[/]")
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
console.print("[bold red]Server is not responding on /health[/]")
|
||||
return False
|
||||
|
||||
|
||||
async def poll_for_result(client: httpx.AsyncClient, task_id: str,
|
||||
poll_interval: float = 1.5, timeout: float = 90.0):
|
||||
"""Hit /crawl/job/{id} until COMPLETED/FAILED or timeout."""
|
||||
start = time.time()
|
||||
while True:
|
||||
resp = await client.get(f"/crawl/job/{task_id}")
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
status = data.get("status")
|
||||
if status.upper() in ("COMPLETED", "FAILED"):
|
||||
return data
|
||||
if time.time() - start > timeout:
|
||||
raise TimeoutError(f"Task {task_id} did not finish in {timeout}s")
|
||||
await asyncio.sleep(poll_interval)
|
||||
|
||||
|
||||
# --- demo functions -------------------------------------------------------
|
||||
|
||||
|
||||
async def demo_poll_single_url(client: httpx.AsyncClient):
|
||||
payload = {
|
||||
"urls": [SIMPLE_URL],
|
||||
"browser_config": {"type": "BrowserConfig",
|
||||
"params": {"headless": True}},
|
||||
"crawler_config": {"type": "CrawlerRunConfig",
|
||||
"params": {"cache_mode": "BYPASS"}}
|
||||
}
|
||||
|
||||
console.rule("[bold blue]Demo A: /crawl/job Single URL[/]", style="blue")
|
||||
print_payload(payload)
|
||||
|
||||
# enqueue
|
||||
resp = await client.post("/crawl/job", json=payload)
|
||||
console.print(f"Enqueue status: [bold]{resp.status_code}[/]")
|
||||
resp.raise_for_status()
|
||||
task_id = resp.json()["task_id"]
|
||||
console.print(f"Task ID: [yellow]{task_id}[/]")
|
||||
|
||||
# poll
|
||||
console.print("Polling…")
|
||||
result = await poll_for_result(client, task_id)
|
||||
console.print(Panel(Syntax(json.dumps(result, indent=2),
|
||||
"json", theme="fruity"),
|
||||
title="Final result", border_style="green"))
|
||||
if result["status"] == "COMPLETED":
|
||||
console.print("[green]✅ Crawl succeeded[/]")
|
||||
else:
|
||||
console.print("[red]❌ Crawl failed[/]")
|
||||
|
||||
|
||||
async def demo_poll_multi_url(client: httpx.AsyncClient):
|
||||
payload = {
|
||||
"urls": [SIMPLE_URL, LINKS_URL],
|
||||
"browser_config": {"type": "BrowserConfig",
|
||||
"params": {"headless": True}},
|
||||
"crawler_config": {"type": "CrawlerRunConfig",
|
||||
"params": {"cache_mode": "BYPASS"}}
|
||||
}
|
||||
|
||||
console.rule("[bold magenta]Demo B: /crawl/job Multi-URL[/]",
|
||||
style="magenta")
|
||||
print_payload(payload)
|
||||
|
||||
resp = await client.post("/crawl/job", json=payload)
|
||||
console.print(f"Enqueue status: [bold]{resp.status_code}[/]")
|
||||
resp.raise_for_status()
|
||||
task_id = resp.json()["task_id"]
|
||||
console.print(f"Task ID: [yellow]{task_id}[/]")
|
||||
|
||||
console.print("Polling…")
|
||||
result = await poll_for_result(client, task_id)
|
||||
console.print(Panel(Syntax(json.dumps(result, indent=2),
|
||||
"json", theme="fruity"),
|
||||
title="Final result", border_style="green"))
|
||||
if result["status"] == "COMPLETED":
|
||||
console.print(
|
||||
f"[green]✅ {len(json.loads(result['result'])['results'])} URLs crawled[/]")
|
||||
else:
|
||||
console.print("[red]❌ Crawl failed[/]")
|
||||
|
||||
|
||||
# --- main runner ----------------------------------------------------------
|
||||
|
||||
|
||||
async def main_demo():
|
||||
async with httpx.AsyncClient(base_url=BASE_URL, timeout=300.0) as client:
|
||||
if not await check_server_health(client):
|
||||
return
|
||||
await demo_poll_single_url(client)
|
||||
await demo_poll_multi_url(client)
|
||||
console.rule("[bold green]Polling demos complete[/]", style="green")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
asyncio.run(main_demo())
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Interrupted by user[/]")
|
||||
except Exception:
|
||||
console.print_exception(show_locals=False)
|
||||
@@ -73,7 +73,7 @@ async def test_stream_crawl(session, token: str):
|
||||
# "https://news.ycombinator.com/news"
|
||||
],
|
||||
"browser_config": {"headless": True, "viewport": {"width": 1200}},
|
||||
"crawler_config": {"stream": True, "cache_mode": "aggressive"}
|
||||
"crawler_config": {"stream": True, "cache_mode": "bypass"}
|
||||
}
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
print(f"\nTesting Streaming Crawl: {url}")
|
||||
|
||||
@@ -11,7 +11,7 @@ import asyncio
|
||||
import os
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai import LLMConfig
|
||||
from crawl4ai.extraction_strategy import (
|
||||
LLMExtractionStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
@@ -61,19 +61,19 @@ async def main():
|
||||
|
||||
# 1. LLM Extraction with different input formats
|
||||
markdown_strategy = LLMExtractionStrategy(
|
||||
llmConfig = LlmConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY")),
|
||||
llm_config = LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY")),
|
||||
instruction="Extract product information including name, price, and description",
|
||||
)
|
||||
|
||||
html_strategy = LLMExtractionStrategy(
|
||||
input_format="html",
|
||||
llmConfig=LlmConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY")),
|
||||
llm_config=LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY")),
|
||||
instruction="Extract product information from HTML including structured data",
|
||||
)
|
||||
|
||||
fit_markdown_strategy = LLMExtractionStrategy(
|
||||
input_format="fit_markdown",
|
||||
llmConfig=LlmConfig(provider="openai/gpt-4o-mini",api_token=os.getenv("OPENAI_API_KEY")),
|
||||
llm_config=LLMConfig(provider="openai/gpt-4o-mini",api_token=os.getenv("OPENAI_API_KEY")),
|
||||
instruction="Extract product information from cleaned markdown",
|
||||
)
|
||||
|
||||
|
||||
@@ -12,9 +12,10 @@ We’ve introduced a new feature that effortlessly handles even the biggest page
|
||||
|
||||
**Simple Example:**
|
||||
```python
|
||||
import os, sys
|
||||
import os
|
||||
import sys
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode, CrawlerRunConfig
|
||||
|
||||
# Adjust paths as needed
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
@@ -26,9 +27,11 @@ async def main():
|
||||
# Request both PDF and screenshot
|
||||
result = await crawler.arun(
|
||||
url='https://en.wikipedia.org/wiki/List_of_common_misconceptions',
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
pdf=True,
|
||||
screenshot=True
|
||||
config=CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
pdf=True,
|
||||
screenshot=True
|
||||
)
|
||||
)
|
||||
|
||||
if result.success:
|
||||
@@ -40,9 +43,8 @@ async def main():
|
||||
|
||||
# Save PDF
|
||||
if result.pdf:
|
||||
pdf_bytes = b64decode(result.pdf)
|
||||
with open(os.path.join(__location__, "page.pdf"), "wb") as f:
|
||||
f.write(pdf_bytes)
|
||||
f.write(result.pdf)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
|
||||
@@ -3,7 +3,6 @@ from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
DefaultMarkdownGenerator,
|
||||
PruningContentFilter,
|
||||
CrawlResult
|
||||
@@ -11,23 +10,20 @@ from crawl4ai import (
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(headless=True, verbose=True)
|
||||
browser_config = BrowserConfig(
|
||||
headless=False,
|
||||
verbose=True,
|
||||
)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=PruningContentFilter(
|
||||
# threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
# )
|
||||
content_filter=PruningContentFilter()
|
||||
),
|
||||
)
|
||||
result : CrawlResult = await crawler.arun(
|
||||
# url="https://www.helloworld.org", config=crawler_config
|
||||
url="https://www.kidocode.com", config=crawler_config
|
||||
result: CrawlResult = await crawler.arun(
|
||||
url="https://www.helloworld.org", config=crawler_config
|
||||
)
|
||||
print(result.markdown.raw_markdown[:500])
|
||||
# print(result.model_dump())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai import LLMConfig
|
||||
from crawl4ai import AsyncWebCrawler, LLMExtractionStrategy
|
||||
import asyncio
|
||||
import os
|
||||
@@ -23,7 +23,7 @@ async def main():
|
||||
word_count_threshold=1,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
# provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'),
|
||||
llmConfig=LlmConfig(provider="groq/llama-3.1-70b-versatile", api_token=os.getenv("GROQ_API_KEY")),
|
||||
llm_config=LLMConfig(provider="groq/llama-3.1-70b-versatile", api_token=os.getenv("GROQ_API_KEY")),
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="From the crawled content, extract all mentioned model names along with their "
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai import LLMConfig
|
||||
from crawl4ai.content_filter_strategy import LLMContentFilter
|
||||
|
||||
async def test_llm_filter():
|
||||
@@ -23,7 +23,7 @@ async def test_llm_filter():
|
||||
|
||||
# Initialize LLM filter with focused instruction
|
||||
filter = LLMContentFilter(
|
||||
llmConfig=LlmConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||
llm_config=LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||
instruction="""
|
||||
Focus on extracting the core educational content about Python classes.
|
||||
Include:
|
||||
@@ -43,7 +43,7 @@ async def test_llm_filter():
|
||||
)
|
||||
|
||||
filter = LLMContentFilter(
|
||||
llmConfig=LlmConfig(provider="openai/gpt-4o",api_token=os.getenv('OPENAI_API_KEY')),
|
||||
llm_config=LLMConfig(provider="openai/gpt-4o",api_token=os.getenv('OPENAI_API_KEY')),
|
||||
chunk_token_threshold=2 ** 12 * 2, # 2048 * 2
|
||||
ignore_cache = True,
|
||||
instruction="""
|
||||
|
||||
64
docs/examples/markdown/content_source_example.py
Normal file
64
docs/examples/markdown/content_source_example.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
Example showing how to use the content_source parameter to control HTML input for markdown generation.
|
||||
"""
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, DefaultMarkdownGenerator
|
||||
|
||||
async def demo_content_source():
|
||||
"""Demonstrates different content_source options for markdown generation."""
|
||||
url = "https://example.com" # Simple demo site
|
||||
|
||||
print("Crawling with different content_source options...")
|
||||
|
||||
# --- Example 1: Default Behavior (cleaned_html) ---
|
||||
# This uses the HTML after it has been processed by the scraping strategy
|
||||
# The HTML is cleaned, simplified, and optimized for readability
|
||||
default_generator = DefaultMarkdownGenerator() # content_source="cleaned_html" is default
|
||||
default_config = CrawlerRunConfig(markdown_generator=default_generator)
|
||||
|
||||
# --- Example 2: Raw HTML ---
|
||||
# This uses the original HTML directly from the webpage
|
||||
# Preserves more original content but may include navigation, ads, etc.
|
||||
raw_generator = DefaultMarkdownGenerator(content_source="raw_html")
|
||||
raw_config = CrawlerRunConfig(markdown_generator=raw_generator)
|
||||
|
||||
# --- Example 3: Fit HTML ---
|
||||
# This uses preprocessed HTML optimized for schema extraction
|
||||
# Better for structured data extraction but may lose some formatting
|
||||
fit_generator = DefaultMarkdownGenerator(content_source="fit_html")
|
||||
fit_config = CrawlerRunConfig(markdown_generator=fit_generator)
|
||||
|
||||
# Execute all three crawlers in sequence
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Default (cleaned_html)
|
||||
result_default = await crawler.arun(url=url, config=default_config)
|
||||
|
||||
# Raw HTML
|
||||
result_raw = await crawler.arun(url=url, config=raw_config)
|
||||
|
||||
# Fit HTML
|
||||
result_fit = await crawler.arun(url=url, config=fit_config)
|
||||
|
||||
# Print a summary of the results
|
||||
print("\nMarkdown Generation Results:\n")
|
||||
|
||||
print("1. Default (cleaned_html):")
|
||||
print(f" Length: {len(result_default.markdown.raw_markdown)} chars")
|
||||
print(f" First 80 chars: {result_default.markdown.raw_markdown[:80]}...\n")
|
||||
|
||||
print("2. Raw HTML:")
|
||||
print(f" Length: {len(result_raw.markdown.raw_markdown)} chars")
|
||||
print(f" First 80 chars: {result_raw.markdown.raw_markdown[:80]}...\n")
|
||||
|
||||
print("3. Fit HTML:")
|
||||
print(f" Length: {len(result_fit.markdown.raw_markdown)} chars")
|
||||
print(f" First 80 chars: {result_fit.markdown.raw_markdown[:80]}...\n")
|
||||
|
||||
# Demonstrate differences in output
|
||||
print("\nKey Takeaways:")
|
||||
print("- cleaned_html: Best for readable, focused content")
|
||||
print("- raw_html: Preserves more original content, but may include noise")
|
||||
print("- fit_html: Optimized for schema extraction and structured data")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(demo_content_source())
|
||||
42
docs/examples/markdown/content_source_short_example.py
Normal file
42
docs/examples/markdown/content_source_short_example.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""
|
||||
Example demonstrating how to use the content_source parameter in MarkdownGenerationStrategy
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, DefaultMarkdownGenerator
|
||||
|
||||
async def demo_markdown_source_config():
|
||||
print("\n=== Demo: Configuring Markdown Source ===")
|
||||
|
||||
# Example 1: Generate markdown from cleaned HTML (default behavior)
|
||||
cleaned_md_generator = DefaultMarkdownGenerator(content_source="cleaned_html")
|
||||
config_cleaned = CrawlerRunConfig(markdown_generator=cleaned_md_generator)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result_cleaned = await crawler.arun(url="https://example.com", config=config_cleaned)
|
||||
print("Markdown from Cleaned HTML (default):")
|
||||
print(f" Length: {len(result_cleaned.markdown.raw_markdown)}")
|
||||
print(f" Start: {result_cleaned.markdown.raw_markdown[:100]}...")
|
||||
|
||||
# Example 2: Generate markdown directly from raw HTML
|
||||
raw_md_generator = DefaultMarkdownGenerator(content_source="raw_html")
|
||||
config_raw = CrawlerRunConfig(markdown_generator=raw_md_generator)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result_raw = await crawler.arun(url="https://example.com", config=config_raw)
|
||||
print("\nMarkdown from Raw HTML:")
|
||||
print(f" Length: {len(result_raw.markdown.raw_markdown)}")
|
||||
print(f" Start: {result_raw.markdown.raw_markdown[:100]}...")
|
||||
|
||||
# Example 3: Generate markdown from preprocessed 'fit' HTML
|
||||
fit_md_generator = DefaultMarkdownGenerator(content_source="fit_html")
|
||||
config_fit = CrawlerRunConfig(markdown_generator=fit_md_generator)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result_fit = await crawler.arun(url="https://example.com", config=config_fit)
|
||||
print("\nMarkdown from Fit HTML:")
|
||||
print(f" Length: {len(result_fit.markdown.raw_markdown)}")
|
||||
print(f" Start: {result_fit.markdown.raw_markdown[:100]}...")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(demo_markdown_source_config())
|
||||
477
docs/examples/network_console_capture_example.py
Normal file
477
docs/examples/network_console_capture_example.py
Normal file
@@ -0,0 +1,477 @@
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode, CrawlResult
|
||||
from crawl4ai import BrowserConfig
|
||||
|
||||
__cur_dir__ = Path(__file__).parent
|
||||
|
||||
# Create temp directory if it doesn't exist
|
||||
os.makedirs(os.path.join(__cur_dir__, "tmp"), exist_ok=True)
|
||||
|
||||
async def demo_basic_network_capture():
|
||||
"""Basic network request capturing example"""
|
||||
print("\n=== 1. Basic Network Request Capturing ===")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
capture_network_requests=True,
|
||||
wait_until="networkidle" # Wait for network to be idle
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://example.com/",
|
||||
config=config
|
||||
)
|
||||
|
||||
if result.success and result.network_requests:
|
||||
print(f"Captured {len(result.network_requests)} network events")
|
||||
|
||||
# Count by event type
|
||||
event_types = {}
|
||||
for req in result.network_requests:
|
||||
event_type = req.get("event_type", "unknown")
|
||||
event_types[event_type] = event_types.get(event_type, 0) + 1
|
||||
|
||||
print("Event types:")
|
||||
for event_type, count in event_types.items():
|
||||
print(f" - {event_type}: {count}")
|
||||
|
||||
# Show a sample request and response
|
||||
request = next((r for r in result.network_requests if r.get("event_type") == "request"), None)
|
||||
response = next((r for r in result.network_requests if r.get("event_type") == "response"), None)
|
||||
|
||||
if request:
|
||||
print("\nSample request:")
|
||||
print(f" URL: {request.get('url')}")
|
||||
print(f" Method: {request.get('method')}")
|
||||
print(f" Headers: {list(request.get('headers', {}).keys())}")
|
||||
|
||||
if response:
|
||||
print("\nSample response:")
|
||||
print(f" URL: {response.get('url')}")
|
||||
print(f" Status: {response.get('status')} {response.get('status_text', '')}")
|
||||
print(f" Headers: {list(response.get('headers', {}).keys())}")
|
||||
|
||||
async def demo_basic_console_capture():
|
||||
"""Basic console message capturing example"""
|
||||
print("\n=== 2. Basic Console Message Capturing ===")
|
||||
|
||||
# Create a simple HTML file with console messages
|
||||
html_file = os.path.join(__cur_dir__, "tmp", "console_test.html")
|
||||
with open(html_file, "w") as f:
|
||||
f.write("""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Console Test</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Console Message Test</h1>
|
||||
<script>
|
||||
console.log("This is a basic log message");
|
||||
console.info("This is an info message");
|
||||
console.warn("This is a warning message");
|
||||
console.error("This is an error message");
|
||||
|
||||
// Generate an error
|
||||
try {
|
||||
nonExistentFunction();
|
||||
} catch (e) {
|
||||
console.error("Caught error:", e);
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
""")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
capture_console_messages=True,
|
||||
wait_until="networkidle" # Wait to make sure all scripts execute
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
url=f"file://{html_file}",
|
||||
config=config
|
||||
)
|
||||
|
||||
if result.success and result.console_messages:
|
||||
print(f"Captured {len(result.console_messages)} console messages")
|
||||
|
||||
# Count by message type
|
||||
message_types = {}
|
||||
for msg in result.console_messages:
|
||||
msg_type = msg.get("type", "unknown")
|
||||
message_types[msg_type] = message_types.get(msg_type, 0) + 1
|
||||
|
||||
print("Message types:")
|
||||
for msg_type, count in message_types.items():
|
||||
print(f" - {msg_type}: {count}")
|
||||
|
||||
# Show all messages
|
||||
print("\nAll console messages:")
|
||||
for i, msg in enumerate(result.console_messages, 1):
|
||||
print(f" {i}. [{msg.get('type', 'unknown')}] {msg.get('text', '')}")
|
||||
|
||||
async def demo_combined_capture():
|
||||
"""Capturing both network requests and console messages"""
|
||||
print("\n=== 3. Combined Network and Console Capture ===")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
capture_network_requests=True,
|
||||
capture_console_messages=True,
|
||||
wait_until="networkidle"
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://httpbin.org/html",
|
||||
config=config
|
||||
)
|
||||
|
||||
if result.success:
|
||||
network_count = len(result.network_requests) if result.network_requests else 0
|
||||
console_count = len(result.console_messages) if result.console_messages else 0
|
||||
|
||||
print(f"Captured {network_count} network events and {console_count} console messages")
|
||||
|
||||
# Save the captured data to a JSON file for analysis
|
||||
output_file = os.path.join(__cur_dir__, "tmp", "capture_data.json")
|
||||
with open(output_file, "w") as f:
|
||||
json.dump({
|
||||
"url": result.url,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"network_requests": result.network_requests,
|
||||
"console_messages": result.console_messages
|
||||
}, f, indent=2)
|
||||
|
||||
print(f"Full capture data saved to {output_file}")
|
||||
|
||||
async def analyze_spa_network_traffic():
|
||||
"""Analyze network traffic of a Single-Page Application"""
|
||||
print("\n=== 4. Analyzing SPA Network Traffic ===")
|
||||
|
||||
async with AsyncWebCrawler(config=BrowserConfig(
|
||||
headless=True,
|
||||
viewport_width=1280,
|
||||
viewport_height=800
|
||||
)) as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
capture_network_requests=True,
|
||||
capture_console_messages=True,
|
||||
# Wait longer to ensure all resources are loaded
|
||||
wait_until="networkidle",
|
||||
page_timeout=60000, # 60 seconds
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://weather.com",
|
||||
config=config
|
||||
)
|
||||
|
||||
if result.success and result.network_requests:
|
||||
# Extract different types of requests
|
||||
requests = []
|
||||
responses = []
|
||||
failures = []
|
||||
|
||||
for event in result.network_requests:
|
||||
event_type = event.get("event_type")
|
||||
if event_type == "request":
|
||||
requests.append(event)
|
||||
elif event_type == "response":
|
||||
responses.append(event)
|
||||
elif event_type == "request_failed":
|
||||
failures.append(event)
|
||||
|
||||
print(f"Captured {len(requests)} requests, {len(responses)} responses, and {len(failures)} failures")
|
||||
|
||||
# Analyze request types
|
||||
resource_types = {}
|
||||
for req in requests:
|
||||
resource_type = req.get("resource_type", "unknown")
|
||||
resource_types[resource_type] = resource_types.get(resource_type, 0) + 1
|
||||
|
||||
print("\nResource types:")
|
||||
for resource_type, count in sorted(resource_types.items(), key=lambda x: x[1], reverse=True):
|
||||
print(f" - {resource_type}: {count}")
|
||||
|
||||
# Analyze API calls
|
||||
api_calls = [r for r in requests if "api" in r.get("url", "").lower()]
|
||||
if api_calls:
|
||||
print(f"\nDetected {len(api_calls)} API calls:")
|
||||
for i, call in enumerate(api_calls[:5], 1): # Show first 5
|
||||
print(f" {i}. {call.get('method')} {call.get('url')}")
|
||||
if len(api_calls) > 5:
|
||||
print(f" ... and {len(api_calls) - 5} more")
|
||||
|
||||
# Analyze response status codes
|
||||
status_codes = {}
|
||||
for resp in responses:
|
||||
status = resp.get("status", 0)
|
||||
status_codes[status] = status_codes.get(status, 0) + 1
|
||||
|
||||
print("\nResponse status codes:")
|
||||
for status, count in sorted(status_codes.items()):
|
||||
print(f" - {status}: {count}")
|
||||
|
||||
# Analyze failures
|
||||
if failures:
|
||||
print("\nFailed requests:")
|
||||
for i, failure in enumerate(failures[:5], 1): # Show first 5
|
||||
print(f" {i}. {failure.get('url')} - {failure.get('failure_text')}")
|
||||
if len(failures) > 5:
|
||||
print(f" ... and {len(failures) - 5} more")
|
||||
|
||||
# Check for console errors
|
||||
if result.console_messages:
|
||||
errors = [msg for msg in result.console_messages if msg.get("type") == "error"]
|
||||
if errors:
|
||||
print(f"\nDetected {len(errors)} console errors:")
|
||||
for i, error in enumerate(errors[:3], 1): # Show first 3
|
||||
print(f" {i}. {error.get('text', '')[:100]}...")
|
||||
if len(errors) > 3:
|
||||
print(f" ... and {len(errors) - 3} more")
|
||||
|
||||
# Save analysis to file
|
||||
output_file = os.path.join(__cur_dir__, "tmp", "weather_network_analysis.json")
|
||||
with open(output_file, "w") as f:
|
||||
json.dump({
|
||||
"url": result.url,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"statistics": {
|
||||
"request_count": len(requests),
|
||||
"response_count": len(responses),
|
||||
"failure_count": len(failures),
|
||||
"resource_types": resource_types,
|
||||
"status_codes": {str(k): v for k, v in status_codes.items()},
|
||||
"api_call_count": len(api_calls),
|
||||
"console_error_count": len(errors) if result.console_messages else 0
|
||||
},
|
||||
"network_requests": result.network_requests,
|
||||
"console_messages": result.console_messages
|
||||
}, f, indent=2)
|
||||
|
||||
print(f"\nFull analysis saved to {output_file}")
|
||||
|
||||
async def demo_security_analysis():
|
||||
"""Using network capture for security analysis"""
|
||||
print("\n=== 5. Security Analysis with Network Capture ===")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
capture_network_requests=True,
|
||||
capture_console_messages=True,
|
||||
wait_until="networkidle"
|
||||
)
|
||||
|
||||
# A site that makes multiple third-party requests
|
||||
result = await crawler.arun(
|
||||
url="https://www.nytimes.com/",
|
||||
config=config
|
||||
)
|
||||
|
||||
if result.success and result.network_requests:
|
||||
print(f"Captured {len(result.network_requests)} network events")
|
||||
|
||||
# Extract all domains
|
||||
domains = set()
|
||||
for req in result.network_requests:
|
||||
if req.get("event_type") == "request":
|
||||
url = req.get("url", "")
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
domain = urlparse(url).netloc
|
||||
if domain:
|
||||
domains.add(domain)
|
||||
except:
|
||||
pass
|
||||
|
||||
print(f"\nDetected requests to {len(domains)} unique domains:")
|
||||
main_domain = urlparse(result.url).netloc
|
||||
|
||||
# Separate first-party vs third-party domains
|
||||
first_party = [d for d in domains if main_domain in d]
|
||||
third_party = [d for d in domains if main_domain not in d]
|
||||
|
||||
print(f" - First-party domains: {len(first_party)}")
|
||||
print(f" - Third-party domains: {len(third_party)}")
|
||||
|
||||
# Look for potential trackers/analytics
|
||||
tracking_keywords = ["analytics", "tracker", "pixel", "tag", "stats", "metric", "collect", "beacon"]
|
||||
potential_trackers = []
|
||||
|
||||
for domain in third_party:
|
||||
if any(keyword in domain.lower() for keyword in tracking_keywords):
|
||||
potential_trackers.append(domain)
|
||||
|
||||
if potential_trackers:
|
||||
print(f"\nPotential tracking/analytics domains ({len(potential_trackers)}):")
|
||||
for i, domain in enumerate(sorted(potential_trackers)[:10], 1):
|
||||
print(f" {i}. {domain}")
|
||||
if len(potential_trackers) > 10:
|
||||
print(f" ... and {len(potential_trackers) - 10} more")
|
||||
|
||||
# Check for insecure (HTTP) requests
|
||||
insecure_requests = [
|
||||
req.get("url") for req in result.network_requests
|
||||
if req.get("event_type") == "request" and req.get("url", "").startswith("http://")
|
||||
]
|
||||
|
||||
if insecure_requests:
|
||||
print(f"\nWarning: Found {len(insecure_requests)} insecure (HTTP) requests:")
|
||||
for i, url in enumerate(insecure_requests[:5], 1):
|
||||
print(f" {i}. {url}")
|
||||
if len(insecure_requests) > 5:
|
||||
print(f" ... and {len(insecure_requests) - 5} more")
|
||||
|
||||
# Save security analysis to file
|
||||
output_file = os.path.join(__cur_dir__, "tmp", "security_analysis.json")
|
||||
with open(output_file, "w") as f:
|
||||
json.dump({
|
||||
"url": result.url,
|
||||
"main_domain": main_domain,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"analysis": {
|
||||
"total_requests": len([r for r in result.network_requests if r.get("event_type") == "request"]),
|
||||
"unique_domains": len(domains),
|
||||
"first_party_domains": first_party,
|
||||
"third_party_domains": third_party,
|
||||
"potential_trackers": potential_trackers,
|
||||
"insecure_requests": insecure_requests
|
||||
}
|
||||
}, f, indent=2)
|
||||
|
||||
print(f"\nFull security analysis saved to {output_file}")
|
||||
|
||||
async def demo_performance_analysis():
|
||||
"""Using network capture for performance analysis"""
|
||||
print("\n=== 6. Performance Analysis with Network Capture ===")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
capture_network_requests=True,
|
||||
page_timeout=60 * 2 * 1000 # 120 seconds
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://www.cnn.com/",
|
||||
config=config
|
||||
)
|
||||
|
||||
if result.success and result.network_requests:
|
||||
# Filter only response events with timing information
|
||||
responses_with_timing = [
|
||||
r for r in result.network_requests
|
||||
if r.get("event_type") == "response" and r.get("request_timing")
|
||||
]
|
||||
|
||||
if responses_with_timing:
|
||||
print(f"Analyzing timing for {len(responses_with_timing)} network responses")
|
||||
|
||||
# Group by resource type
|
||||
resource_timings = {}
|
||||
for resp in responses_with_timing:
|
||||
url = resp.get("url", "")
|
||||
timing = resp.get("request_timing", {})
|
||||
|
||||
# Determine resource type from URL extension
|
||||
ext = url.split(".")[-1].lower() if "." in url.split("/")[-1] else "unknown"
|
||||
if ext in ["jpg", "jpeg", "png", "gif", "webp", "svg", "ico"]:
|
||||
resource_type = "image"
|
||||
elif ext in ["js"]:
|
||||
resource_type = "javascript"
|
||||
elif ext in ["css"]:
|
||||
resource_type = "css"
|
||||
elif ext in ["woff", "woff2", "ttf", "otf", "eot"]:
|
||||
resource_type = "font"
|
||||
else:
|
||||
resource_type = "other"
|
||||
|
||||
if resource_type not in resource_timings:
|
||||
resource_timings[resource_type] = []
|
||||
|
||||
# Calculate request duration if timing information is available
|
||||
if isinstance(timing, dict) and "requestTime" in timing and "receiveHeadersEnd" in timing:
|
||||
# Convert to milliseconds
|
||||
duration = (timing["receiveHeadersEnd"] - timing["requestTime"]) * 1000
|
||||
resource_timings[resource_type].append({
|
||||
"url": url,
|
||||
"duration_ms": duration
|
||||
})
|
||||
if isinstance(timing, dict) and "requestStart" in timing and "responseStart" in timing and "startTime" in timing:
|
||||
# Convert to milliseconds
|
||||
duration = (timing["responseStart"] - timing["requestStart"]) * 1000
|
||||
resource_timings[resource_type].append({
|
||||
"url": url,
|
||||
"duration_ms": duration
|
||||
})
|
||||
|
||||
# Calculate statistics for each resource type
|
||||
print("\nPerformance by resource type:")
|
||||
for resource_type, timings in resource_timings.items():
|
||||
if timings:
|
||||
durations = [t["duration_ms"] for t in timings]
|
||||
avg_duration = sum(durations) / len(durations)
|
||||
max_duration = max(durations)
|
||||
slowest_resource = next(t["url"] for t in timings if t["duration_ms"] == max_duration)
|
||||
|
||||
print(f" {resource_type.upper()}:")
|
||||
print(f" - Count: {len(timings)}")
|
||||
print(f" - Avg time: {avg_duration:.2f} ms")
|
||||
print(f" - Max time: {max_duration:.2f} ms")
|
||||
print(f" - Slowest: {slowest_resource}")
|
||||
|
||||
# Identify the slowest resources overall
|
||||
all_timings = []
|
||||
for resource_type, timings in resource_timings.items():
|
||||
for timing in timings:
|
||||
timing["type"] = resource_type
|
||||
all_timings.append(timing)
|
||||
|
||||
all_timings.sort(key=lambda x: x["duration_ms"], reverse=True)
|
||||
|
||||
print("\nTop 5 slowest resources:")
|
||||
for i, timing in enumerate(all_timings[:5], 1):
|
||||
print(f" {i}. [{timing['type']}] {timing['url']} - {timing['duration_ms']:.2f} ms")
|
||||
|
||||
# Save performance analysis to file
|
||||
output_file = os.path.join(__cur_dir__, "tmp", "performance_analysis.json")
|
||||
with open(output_file, "w") as f:
|
||||
json.dump({
|
||||
"url": result.url,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"resource_timings": resource_timings,
|
||||
"slowest_resources": all_timings[:10] # Save top 10
|
||||
}, f, indent=2)
|
||||
|
||||
print(f"\nFull performance analysis saved to {output_file}")
|
||||
|
||||
async def main():
|
||||
"""Run all demo functions sequentially"""
|
||||
print("=== Network and Console Capture Examples ===")
|
||||
|
||||
# Make sure tmp directory exists
|
||||
os.makedirs(os.path.join(__cur_dir__, "tmp"), exist_ok=True)
|
||||
|
||||
# Run basic examples
|
||||
# await demo_basic_network_capture()
|
||||
await demo_basic_console_capture()
|
||||
# await demo_combined_capture()
|
||||
|
||||
# Run advanced examples
|
||||
# await analyze_spa_network_traffic()
|
||||
# await demo_security_analysis()
|
||||
# await demo_performance_analysis()
|
||||
|
||||
print("\n=== Examples Complete ===")
|
||||
print(f"Check the tmp directory for output files: {os.path.join(__cur_dir__, 'tmp')}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,6 +1,6 @@
|
||||
import os, sys
|
||||
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai import LLMConfig
|
||||
|
||||
sys.path.append(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
@@ -211,7 +211,7 @@ async def extract_structured_data_using_llm(
|
||||
word_count_threshold=1,
|
||||
page_timeout=80000,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llmConfig=LlmConfig(provider=provider,api_token=api_token),
|
||||
llm_config=LLMConfig(provider=provider,api_token=api_token),
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
@@ -1,675 +0,0 @@
|
||||
import os, sys
|
||||
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
|
||||
# append parent directory to system path
|
||||
sys.path.append(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
)
|
||||
os.environ["FIRECRAWL_API_KEY"] = "fc-84b370ccfad44beabc686b38f1769692"
|
||||
|
||||
import asyncio
|
||||
# import nest_asyncio
|
||||
# nest_asyncio.apply()
|
||||
|
||||
import time
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from typing import Dict, List
|
||||
from bs4 import BeautifulSoup
|
||||
from pydantic import BaseModel, Field
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
||||
from crawl4ai.extraction_strategy import (
|
||||
JsonCssExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
)
|
||||
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
|
||||
print("Crawl4AI: Advanced Web Crawling and Data Extraction")
|
||||
print("GitHub Repository: https://github.com/unclecode/crawl4ai")
|
||||
print("Twitter: @unclecode")
|
||||
print("Website: https://crawl4ai.com")
|
||||
|
||||
|
||||
async def simple_crawl():
|
||||
print("\n--- Basic Usage ---")
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def simple_example_with_running_js_code():
|
||||
print("\n--- Executing JavaScript and Using CSS Selectors ---")
|
||||
# New code to handle the wait_for parameter
|
||||
wait_for = """() => {
|
||||
return Array.from(document.querySelectorAll('article.tease-card')).length > 10;
|
||||
}"""
|
||||
|
||||
# wait_for can be also just a css selector
|
||||
# wait_for = "article.tease-card:nth-child(10)"
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
js_code = [
|
||||
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
|
||||
]
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
js_code=js_code,
|
||||
# wait_for=wait_for,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def simple_example_with_css_selector():
|
||||
print("\n--- Using CSS Selectors ---")
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
css_selector=".wide-tease-item__description",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def use_proxy():
|
||||
print("\n--- Using a Proxy ---")
|
||||
print(
|
||||
"Note: Replace 'http://your-proxy-url:port' with a working proxy to run this example."
|
||||
)
|
||||
# Uncomment and modify the following lines to use a proxy
|
||||
async with AsyncWebCrawler(
|
||||
verbose=True, proxy="http://your-proxy-url:port"
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
if result.success:
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
|
||||
async def capture_and_save_screenshot(url: str, output_path: str):
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url, screenshot=True, cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
if result.success and result.screenshot:
|
||||
import base64
|
||||
|
||||
# Decode the base64 screenshot data
|
||||
screenshot_data = base64.b64decode(result.screenshot)
|
||||
|
||||
# Save the screenshot as a JPEG file
|
||||
with open(output_path, "wb") as f:
|
||||
f.write(screenshot_data)
|
||||
|
||||
print(f"Screenshot saved successfully to {output_path}")
|
||||
else:
|
||||
print("Failed to capture screenshot")
|
||||
|
||||
|
||||
class OpenAIModelFee(BaseModel):
|
||||
model_name: str = Field(..., description="Name of the OpenAI model.")
|
||||
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
|
||||
output_fee: str = Field(
|
||||
..., description="Fee for output token for the OpenAI model."
|
||||
)
|
||||
|
||||
|
||||
async def extract_structured_data_using_llm(
|
||||
provider: str, api_token: str = None, extra_headers: Dict[str, str] = None
|
||||
):
|
||||
print(f"\n--- Extracting Structured Data with {provider} ---")
|
||||
|
||||
if api_token is None and provider != "ollama":
|
||||
print(f"API token is required for {provider}. Skipping this example.")
|
||||
return
|
||||
|
||||
# extra_args = {}
|
||||
extra_args = {
|
||||
"temperature": 0,
|
||||
"top_p": 0.9,
|
||||
"max_tokens": 2000,
|
||||
# any other supported parameters for litellm
|
||||
}
|
||||
if extra_headers:
|
||||
extra_args["extra_headers"] = extra_headers
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://openai.com/api/pricing/",
|
||||
word_count_threshold=1,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llmConfig=LlmConfig(provider=provider,api_token=api_token),
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content. One extracted model JSON format should look like this:
|
||||
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}.""",
|
||||
extra_args=extra_args,
|
||||
),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.extracted_content)
|
||||
|
||||
|
||||
async def extract_structured_data_using_css_extractor():
|
||||
print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---")
|
||||
schema = {
|
||||
"name": "KidoCode Courses",
|
||||
"baseSelector": "section.charge-methodology .w-tab-content > div",
|
||||
"fields": [
|
||||
{
|
||||
"name": "section_title",
|
||||
"selector": "h3.heading-50",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "section_description",
|
||||
"selector": ".charge-content",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_name",
|
||||
"selector": ".text-block-93",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_description",
|
||||
"selector": ".course-content-text",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_icon",
|
||||
"selector": ".image-92",
|
||||
"type": "attribute",
|
||||
"attribute": "src",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
async with AsyncWebCrawler(headless=True, verbose=True) as crawler:
|
||||
# Create the JavaScript that handles clicking multiple times
|
||||
js_click_tabs = """
|
||||
(async () => {
|
||||
const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");
|
||||
|
||||
for(let tab of tabs) {
|
||||
// scroll to the tab
|
||||
tab.scrollIntoView();
|
||||
tab.click();
|
||||
// Wait for content to load and animations to complete
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://www.kidocode.com/degrees/technology",
|
||||
extraction_strategy=JsonCssExtractionStrategy(schema, verbose=True),
|
||||
js_code=[js_click_tabs],
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
companies = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(companies)} companies")
|
||||
print(json.dumps(companies[0], indent=2))
|
||||
|
||||
|
||||
# Advanced Session-Based Crawling with Dynamic Content 🔄
|
||||
async def crawl_dynamic_content_pages_method_1():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
|
||||
first_commit = ""
|
||||
|
||||
async def on_execution_started(page):
|
||||
nonlocal first_commit
|
||||
try:
|
||||
while True:
|
||||
await page.wait_for_selector("li.Box-sc-g0xbh4-0 h4")
|
||||
commit = await page.query_selector("li.Box-sc-g0xbh4-0 h4")
|
||||
commit = await commit.evaluate("(element) => element.textContent")
|
||||
commit = re.sub(r"\s+", "", commit)
|
||||
if commit and commit != first_commit:
|
||||
first_commit = commit
|
||||
break
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
print(f"Warning: New content didn't appear after JavaScript execution: {e}")
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
crawler.crawler_strategy.set_hook("on_execution_started", on_execution_started)
|
||||
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
js_next_page = """
|
||||
(() => {
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
})();
|
||||
"""
|
||||
|
||||
for page in range(3): # Crawl 3 pages
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
session_id=session_id,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
js=js_next_page if page > 0 else None,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
js_only=page > 0,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
soup = BeautifulSoup(result.cleaned_html, "html.parser")
|
||||
commits = soup.select("li")
|
||||
all_commits.extend(commits)
|
||||
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def crawl_dynamic_content_pages_method_2():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
last_commit = ""
|
||||
|
||||
js_next_page_and_wait = """
|
||||
(async () => {
|
||||
const getCurrentCommit = () => {
|
||||
const commits = document.querySelectorAll('li.Box-sc-g0xbh4-0 h4');
|
||||
return commits.length > 0 ? commits[0].textContent.trim() : null;
|
||||
};
|
||||
|
||||
const initialCommit = getCurrentCommit();
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
|
||||
// Poll for changes
|
||||
while (true) {
|
||||
await new Promise(resolve => setTimeout(resolve, 100)); // Wait 100ms
|
||||
const newCommit = getCurrentCommit();
|
||||
if (newCommit && newCommit !== initialCommit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"name": "Commit Extractor",
|
||||
"baseSelector": "li.Box-sc-g0xbh4-0",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h4.markdown-title",
|
||||
"type": "text",
|
||||
"transform": "strip",
|
||||
},
|
||||
],
|
||||
}
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
|
||||
|
||||
for page in range(3): # Crawl 3 pages
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
session_id=session_id,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=js_next_page_and_wait if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
commits = json.loads(result.extracted_content)
|
||||
all_commits.extend(commits)
|
||||
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def crawl_dynamic_content_pages_method_3():
|
||||
print(
|
||||
"\n--- Advanced Multi-Page Crawling with JavaScript Execution using `wait_for` ---"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
js_next_page = """
|
||||
const commits = document.querySelectorAll('li.Box-sc-g0xbh4-0 h4');
|
||||
if (commits.length > 0) {
|
||||
window.firstCommit = commits[0].textContent.trim();
|
||||
}
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
"""
|
||||
|
||||
wait_for = """() => {
|
||||
const commits = document.querySelectorAll('li.Box-sc-g0xbh4-0 h4');
|
||||
if (commits.length === 0) return false;
|
||||
const firstCommit = commits[0].textContent.trim();
|
||||
return firstCommit !== window.firstCommit;
|
||||
}"""
|
||||
|
||||
schema = {
|
||||
"name": "Commit Extractor",
|
||||
"baseSelector": "li.Box-sc-g0xbh4-0",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h4.markdown-title",
|
||||
"type": "text",
|
||||
"transform": "strip",
|
||||
},
|
||||
],
|
||||
}
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
|
||||
|
||||
for page in range(3): # Crawl 3 pages
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
session_id=session_id,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
wait_for=wait_for if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
commits = json.loads(result.extracted_content)
|
||||
all_commits.extend(commits)
|
||||
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def crawl_custom_browser_type():
|
||||
# Use Firefox
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(
|
||||
browser_type="firefox", verbose=True, headless=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
# Use WebKit
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(
|
||||
browser_type="webkit", verbose=True, headless=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
# Use Chromium (default)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(verbose=True, headless=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com", cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
|
||||
async def crawl_with_user_simultion():
|
||||
async with AsyncWebCrawler(verbose=True, headless=True) as crawler:
|
||||
url = "YOUR-URL-HERE"
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
magic=True, # Automatically detects and removes overlays, popups, and other elements that block content
|
||||
# simulate_user = True,# Causes a series of random mouse movements and clicks to simulate user interaction
|
||||
# override_navigator = True # Overrides the navigator object to make it look like a real user
|
||||
)
|
||||
|
||||
print(result.markdown)
|
||||
|
||||
|
||||
async def speed_comparison():
|
||||
# print("\n--- Speed Comparison ---")
|
||||
# print("Firecrawl (simulated):")
|
||||
# print("Time taken: 7.02 seconds")
|
||||
# print("Content length: 42074 characters")
|
||||
# print("Images found: 49")
|
||||
# print()
|
||||
# Simulated Firecrawl performance
|
||||
from firecrawl import FirecrawlApp
|
||||
|
||||
app = FirecrawlApp(api_key=os.environ["FIRECRAWL_API_KEY"])
|
||||
start = time.time()
|
||||
scrape_status = app.scrape_url(
|
||||
"https://www.nbcnews.com/business", params={"formats": ["markdown", "html"]}
|
||||
)
|
||||
end = time.time()
|
||||
print("Firecrawl:")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(scrape_status['markdown'])} characters")
|
||||
print(f"Images found: {scrape_status['markdown'].count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Crawl4AI simple crawl
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
word_count_threshold=0,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (simple crawl):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown)} characters")
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Crawl4AI with advanced content filtering
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
word_count_threshold=0,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
)
|
||||
# content_filter=BM25ContentFilter(user_query=None, bm25_threshold=1.0)
|
||||
),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (Markdown Plus):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown.raw_markdown)} characters")
|
||||
print(f"Fit Markdown: {len(result.markdown.fit_markdown)} characters")
|
||||
print(f"Images found: {result.markdown.raw_markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Crawl4AI with JavaScript execution
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
js_code=[
|
||||
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
|
||||
],
|
||||
word_count_threshold=0,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
)
|
||||
# content_filter=BM25ContentFilter(user_query=None, bm25_threshold=1.0)
|
||||
),
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (with JavaScript execution):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown.raw_markdown)} characters")
|
||||
print(f"Fit Markdown: {len(result.markdown.fit_markdown)} characters")
|
||||
print(f"Images found: {result.markdown.raw_markdown.count('cldnry.s-nbcnews.com')}")
|
||||
|
||||
print("\nNote on Speed Comparison:")
|
||||
print("The speed test conducted here may not reflect optimal conditions.")
|
||||
print("When we call Firecrawl's API, we're seeing its best performance,")
|
||||
print("while Crawl4AI's performance is limited by the local network speed.")
|
||||
print("For a more accurate comparison, it's recommended to run these tests")
|
||||
print("on servers with a stable and fast internet connection.")
|
||||
print("Despite these limitations, Crawl4AI still demonstrates faster performance.")
|
||||
print("If you run these tests in an environment with better network conditions,")
|
||||
print("you may observe an even more significant speed advantage for Crawl4AI.")
|
||||
|
||||
|
||||
async def generate_knowledge_graph():
|
||||
class Entity(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
|
||||
class Relationship(BaseModel):
|
||||
entity1: Entity
|
||||
entity2: Entity
|
||||
description: str
|
||||
relation_type: str
|
||||
|
||||
class KnowledgeGraph(BaseModel):
|
||||
entities: List[Entity]
|
||||
relationships: List[Relationship]
|
||||
|
||||
extraction_strategy = LLMExtractionStrategy(
|
||||
llmConfig=LlmConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY")), # In case of Ollama just pass "no-token"
|
||||
schema=KnowledgeGraph.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""Extract entities and relationships from the given text.""",
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
url = "https://paulgraham.com/love.html"
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=extraction_strategy,
|
||||
# magic=True
|
||||
)
|
||||
# print(result.extracted_content)
|
||||
with open(os.path.join(__location__, "kb.json"), "w") as f:
|
||||
f.write(result.extracted_content)
|
||||
|
||||
|
||||
async def fit_markdown_remove_overlay():
|
||||
async with AsyncWebCrawler(
|
||||
headless=True, # Set to False to see what is happening
|
||||
verbose=True,
|
||||
user_agent_mode="random",
|
||||
user_agent_generator_config={"device_type": "mobile", "os_type": "android"},
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.kidocode.com/degrees/technology",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
),
|
||||
options={"ignore_links": True},
|
||||
),
|
||||
# markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=BM25ContentFilter(user_query="", bm25_threshold=1.0),
|
||||
# options={
|
||||
# "ignore_links": True
|
||||
# }
|
||||
# ),
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print(len(result.markdown.raw_markdown))
|
||||
print(len(result.markdown.markdown_with_citations))
|
||||
print(len(result.markdown.fit_markdown))
|
||||
|
||||
# Save clean html
|
||||
with open(os.path.join(__location__, "output/cleaned_html.html"), "w") as f:
|
||||
f.write(result.cleaned_html)
|
||||
|
||||
with open(
|
||||
os.path.join(__location__, "output/output_raw_markdown.md"), "w"
|
||||
) as f:
|
||||
f.write(result.markdown.raw_markdown)
|
||||
|
||||
with open(
|
||||
os.path.join(__location__, "output/output_markdown_with_citations.md"),
|
||||
"w",
|
||||
) as f:
|
||||
f.write(result.markdown.markdown_with_citations)
|
||||
|
||||
with open(
|
||||
os.path.join(__location__, "output/output_fit_markdown.md"), "w"
|
||||
) as f:
|
||||
f.write(result.markdown.fit_markdown)
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
async def main():
|
||||
# await extract_structured_data_using_llm("openai/gpt-4o", os.getenv("OPENAI_API_KEY"))
|
||||
|
||||
# await simple_crawl()
|
||||
# await simple_example_with_running_js_code()
|
||||
# await simple_example_with_css_selector()
|
||||
# # await use_proxy()
|
||||
# await capture_and_save_screenshot("https://www.example.com", os.path.join(__location__, "tmp/example_screenshot.jpg"))
|
||||
# await extract_structured_data_using_css_extractor()
|
||||
|
||||
# LLM extraction examples
|
||||
# await extract_structured_data_using_llm()
|
||||
# await extract_structured_data_using_llm("huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct", os.getenv("HUGGINGFACE_API_KEY"))
|
||||
# await extract_structured_data_using_llm("ollama/llama3.2")
|
||||
|
||||
# You always can pass custom headers to the extraction strategy
|
||||
# custom_headers = {
|
||||
# "Authorization": "Bearer your-custom-token",
|
||||
# "X-Custom-Header": "Some-Value"
|
||||
# }
|
||||
# await extract_structured_data_using_llm(extra_headers=custom_headers)
|
||||
|
||||
# await crawl_dynamic_content_pages_method_1()
|
||||
# await crawl_dynamic_content_pages_method_2()
|
||||
await crawl_dynamic_content_pages_method_3()
|
||||
|
||||
# await crawl_custom_browser_type()
|
||||
|
||||
# await speed_comparison()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
412
docs/examples/quickstart_examples_set_1.py
Normal file
412
docs/examples/quickstart_examples_set_1.py
Normal file
@@ -0,0 +1,412 @@
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from crawl4ai import ProxyConfig
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode, CrawlResult
|
||||
from crawl4ai import RoundRobinProxyStrategy
|
||||
from crawl4ai import JsonCssExtractionStrategy, LLMExtractionStrategy
|
||||
from crawl4ai import LLMConfig
|
||||
from crawl4ai import PruningContentFilter, BM25ContentFilter
|
||||
from crawl4ai import DefaultMarkdownGenerator
|
||||
from crawl4ai import BFSDeepCrawlStrategy, DomainFilter, FilterChain
|
||||
from crawl4ai import BrowserConfig
|
||||
|
||||
__cur_dir__ = Path(__file__).parent
|
||||
|
||||
async def demo_basic_crawl():
|
||||
"""Basic web crawling with markdown generation"""
|
||||
print("\n=== 1. Basic Web Crawling ===")
|
||||
async with AsyncWebCrawler(config = BrowserConfig(
|
||||
viewport_height=800,
|
||||
viewport_width=1200,
|
||||
headless=True,
|
||||
verbose=True,
|
||||
)) as crawler:
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
url="https://news.ycombinator.com/"
|
||||
)
|
||||
|
||||
for i, result in enumerate(results):
|
||||
print(f"Result {i + 1}:")
|
||||
print(f"Success: {result.success}")
|
||||
if result.success:
|
||||
print(f"Markdown length: {len(result.markdown.raw_markdown)} chars")
|
||||
print(f"First 100 chars: {result.markdown.raw_markdown[:100]}...")
|
||||
else:
|
||||
print("Failed to crawl the URL")
|
||||
|
||||
async def demo_parallel_crawl():
|
||||
"""Crawl multiple URLs in parallel"""
|
||||
print("\n=== 2. Parallel Crawling ===")
|
||||
|
||||
urls = [
|
||||
"https://news.ycombinator.com/",
|
||||
"https://example.com/",
|
||||
"https://httpbin.org/html",
|
||||
]
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
results: List[CrawlResult] = await crawler.arun_many(
|
||||
urls=urls,
|
||||
)
|
||||
|
||||
print(f"Crawled {len(results)} URLs in parallel:")
|
||||
for i, result in enumerate(results):
|
||||
print(
|
||||
f" {i + 1}. {result.url} - {'Success' if result.success else 'Failed'}"
|
||||
)
|
||||
|
||||
async def demo_fit_markdown():
|
||||
"""Generate focused markdown with LLM content filter"""
|
||||
print("\n=== 3. Fit Markdown with LLM Content Filter ===")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result: CrawlResult = await crawler.arun(
|
||||
url = "https://en.wikipedia.org/wiki/Python_(programming_language)",
|
||||
config=CrawlerRunConfig(
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter()
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
# Print stats and save the fit markdown
|
||||
print(f"Raw: {len(result.markdown.raw_markdown)} chars")
|
||||
print(f"Fit: {len(result.markdown.fit_markdown)} chars")
|
||||
|
||||
async def demo_llm_structured_extraction_no_schema():
|
||||
# Create a simple LLM extraction strategy (no schema required)
|
||||
extraction_strategy = LLMExtractionStrategy(
|
||||
llm_config=LLMConfig(
|
||||
provider="groq/qwen-2.5-32b",
|
||||
api_token="env:GROQ_API_KEY",
|
||||
),
|
||||
instruction="This is news.ycombinator.com, extract all news, and for each, I want title, source url, number of comments.",
|
||||
extract_type="schema",
|
||||
schema="{title: string, url: string, comments: int}",
|
||||
extra_args={
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 4096,
|
||||
},
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(extraction_strategy=extraction_strategy)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
"https://news.ycombinator.com/", config=config
|
||||
)
|
||||
|
||||
for result in results:
|
||||
print(f"URL: {result.url}")
|
||||
print(f"Success: {result.success}")
|
||||
if result.success:
|
||||
data = json.loads(result.extracted_content)
|
||||
print(json.dumps(data, indent=2))
|
||||
else:
|
||||
print("Failed to extract structured data")
|
||||
|
||||
async def demo_css_structured_extraction_no_schema():
|
||||
"""Extract structured data using CSS selectors"""
|
||||
print("\n=== 5. CSS-Based Structured Extraction ===")
|
||||
# Sample HTML for schema generation (one-time cost)
|
||||
sample_html = """
|
||||
<div class="body-post clear">
|
||||
<a class="story-link" href="https://thehackernews.com/2025/04/malicious-python-packages-on-pypi.html">
|
||||
<div class="clear home-post-box cf">
|
||||
<div class="home-img clear">
|
||||
<div class="img-ratio">
|
||||
<img alt="..." src="...">
|
||||
</div>
|
||||
</div>
|
||||
<div class="clear home-right">
|
||||
<h2 class="home-title">Malicious Python Packages on PyPI Downloaded 39,000+ Times, Steal Sensitive Data</h2>
|
||||
<div class="item-label">
|
||||
<span class="h-datetime"><i class="icon-font icon-calendar"></i>Apr 05, 2025</span>
|
||||
<span class="h-tags">Malware / Supply Chain Attack</span>
|
||||
</div>
|
||||
<div class="home-desc"> Cybersecurity researchers have...</div>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
"""
|
||||
|
||||
# Check if schema file exists
|
||||
schema_file_path = f"{__cur_dir__}/tmp/schema.json"
|
||||
if os.path.exists(schema_file_path):
|
||||
with open(schema_file_path, "r") as f:
|
||||
schema = json.load(f)
|
||||
else:
|
||||
# Generate schema using LLM (one-time setup)
|
||||
schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=sample_html,
|
||||
llm_config=LLMConfig(
|
||||
provider="groq/qwen-2.5-32b",
|
||||
api_token="env:GROQ_API_KEY",
|
||||
),
|
||||
query="From https://thehackernews.com/, I have shared a sample of one news div with a title, date, and description. Please generate a schema for this news div.",
|
||||
)
|
||||
|
||||
print(f"Generated schema: {json.dumps(schema, indent=2)}")
|
||||
# Save the schema to a file , and use it for future extractions, in result for such extraction you will call LLM once
|
||||
with open(f"{__cur_dir__}/tmp/schema.json", "w") as f:
|
||||
json.dump(schema, f, indent=2)
|
||||
|
||||
# Create no-LLM extraction strategy with the generated schema
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema)
|
||||
config = CrawlerRunConfig(extraction_strategy=extraction_strategy)
|
||||
|
||||
# Use the fast CSS extraction (no LLM calls during extraction)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
"https://thehackernews.com", config=config
|
||||
)
|
||||
|
||||
for result in results:
|
||||
print(f"URL: {result.url}")
|
||||
print(f"Success: {result.success}")
|
||||
if result.success:
|
||||
data = json.loads(result.extracted_content)
|
||||
print(json.dumps(data, indent=2))
|
||||
else:
|
||||
print("Failed to extract structured data")
|
||||
|
||||
async def demo_deep_crawl():
|
||||
"""Deep crawling with BFS strategy"""
|
||||
print("\n=== 6. Deep Crawling ===")
|
||||
|
||||
filter_chain = FilterChain([DomainFilter(allowed_domains=["crawl4ai.com"])])
|
||||
|
||||
deep_crawl_strategy = BFSDeepCrawlStrategy(
|
||||
max_depth=1, max_pages=5, filter_chain=filter_chain
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
url="https://docs.crawl4ai.com",
|
||||
config=CrawlerRunConfig(deep_crawl_strategy=deep_crawl_strategy),
|
||||
)
|
||||
|
||||
print(f"Deep crawl returned {len(results)} pages:")
|
||||
for i, result in enumerate(results):
|
||||
depth = result.metadata.get("depth", "unknown")
|
||||
print(f" {i + 1}. {result.url} (Depth: {depth})")
|
||||
|
||||
async def demo_js_interaction():
|
||||
"""Execute JavaScript to load more content"""
|
||||
print("\n=== 7. JavaScript Interaction ===")
|
||||
|
||||
# A simple page that needs JS to reveal content
|
||||
async with AsyncWebCrawler(config=BrowserConfig(headless=False)) as crawler:
|
||||
# Initial load
|
||||
|
||||
news_schema = {
|
||||
"name": "news",
|
||||
"baseSelector": "tr.athing",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "span.titleline",
|
||||
"type": "text",
|
||||
}
|
||||
],
|
||||
}
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
url="https://news.ycombinator.com",
|
||||
config=CrawlerRunConfig(
|
||||
session_id="hn_session", # Keep session
|
||||
extraction_strategy=JsonCssExtractionStrategy(schema=news_schema),
|
||||
),
|
||||
)
|
||||
|
||||
news = []
|
||||
for result in results:
|
||||
if result.success:
|
||||
data = json.loads(result.extracted_content)
|
||||
news.extend(data)
|
||||
print(json.dumps(data, indent=2))
|
||||
else:
|
||||
print("Failed to extract structured data")
|
||||
|
||||
print(f"Initial items: {len(news)}")
|
||||
|
||||
# Click "More" link
|
||||
more_config = CrawlerRunConfig(
|
||||
js_code="document.querySelector('a.morelink').click();",
|
||||
js_only=True, # Continue in same page
|
||||
session_id="hn_session", # Keep session
|
||||
extraction_strategy=JsonCssExtractionStrategy(
|
||||
schema=news_schema,
|
||||
),
|
||||
)
|
||||
|
||||
result: List[CrawlResult] = await crawler.arun(
|
||||
url="https://news.ycombinator.com", config=more_config
|
||||
)
|
||||
|
||||
# Extract new items
|
||||
for result in results:
|
||||
if result.success:
|
||||
data = json.loads(result.extracted_content)
|
||||
news.extend(data)
|
||||
print(json.dumps(data, indent=2))
|
||||
else:
|
||||
print("Failed to extract structured data")
|
||||
print(f"Total items: {len(news)}")
|
||||
|
||||
async def demo_media_and_links():
|
||||
"""Extract media and links from a page"""
|
||||
print("\n=== 8. Media and Links Extraction ===")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result: List[CrawlResult] = await crawler.arun("https://en.wikipedia.org/wiki/Main_Page")
|
||||
|
||||
for i, result in enumerate(result):
|
||||
# Extract and save all images
|
||||
images = result.media.get("images", [])
|
||||
print(f"Found {len(images)} images")
|
||||
|
||||
# Extract and save all links (internal and external)
|
||||
internal_links = result.links.get("internal", [])
|
||||
external_links = result.links.get("external", [])
|
||||
print(f"Found {len(internal_links)} internal links")
|
||||
print(f"Found {len(external_links)} external links")
|
||||
|
||||
# Print some of the images and links
|
||||
for image in images[:3]:
|
||||
print(f"Image: {image['src']}")
|
||||
for link in internal_links[:3]:
|
||||
print(f"Internal link: {link['href']}")
|
||||
for link in external_links[:3]:
|
||||
print(f"External link: {link['href']}")
|
||||
|
||||
# # Save everything to files
|
||||
with open(f"{__cur_dir__}/tmp/images.json", "w") as f:
|
||||
json.dump(images, f, indent=2)
|
||||
|
||||
with open(f"{__cur_dir__}/tmp/links.json", "w") as f:
|
||||
json.dump(
|
||||
{"internal": internal_links, "external": external_links},
|
||||
f,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
async def demo_screenshot_and_pdf():
|
||||
"""Capture screenshot and PDF of a page"""
|
||||
print("\n=== 9. Screenshot and PDF Capture ===")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result: List[CrawlResult] = await crawler.arun(
|
||||
# url="https://example.com",
|
||||
url="https://en.wikipedia.org/wiki/Giant_anteater",
|
||||
config=CrawlerRunConfig(screenshot=True, pdf=True),
|
||||
)
|
||||
|
||||
for i, result in enumerate(result):
|
||||
# if result.screenshot_data:
|
||||
if result.screenshot:
|
||||
# Save screenshot
|
||||
screenshot_path = f"{__cur_dir__}/tmp/example_screenshot.png"
|
||||
with open(screenshot_path, "wb") as f:
|
||||
f.write(base64.b64decode(result.screenshot))
|
||||
print(f"Screenshot saved to {screenshot_path}")
|
||||
|
||||
# if result.pdf_data:
|
||||
if result.pdf:
|
||||
# Save PDF
|
||||
pdf_path = f"{__cur_dir__}/tmp/example.pdf"
|
||||
with open(pdf_path, "wb") as f:
|
||||
f.write(result.pdf)
|
||||
print(f"PDF saved to {pdf_path}")
|
||||
|
||||
async def demo_proxy_rotation():
|
||||
"""Proxy rotation for multiple requests"""
|
||||
print("\n=== 10. Proxy Rotation ===")
|
||||
|
||||
# Example proxies (replace with real ones)
|
||||
proxies = [
|
||||
ProxyConfig(server="http://proxy1.example.com:8080"),
|
||||
ProxyConfig(server="http://proxy2.example.com:8080"),
|
||||
]
|
||||
|
||||
proxy_strategy = RoundRobinProxyStrategy(proxies)
|
||||
|
||||
print(f"Using {len(proxies)} proxies in rotation")
|
||||
print(
|
||||
"Note: This example uses placeholder proxies - replace with real ones to test"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
proxy_rotation_strategy=proxy_strategy
|
||||
)
|
||||
|
||||
# In a real scenario, these would be run and the proxies would rotate
|
||||
print("In a real scenario, requests would rotate through the available proxies")
|
||||
|
||||
async def demo_raw_html_and_file():
|
||||
"""Process raw HTML and local files"""
|
||||
print("\n=== 11. Raw HTML and Local Files ===")
|
||||
|
||||
raw_html = """
|
||||
<html><body>
|
||||
<h1>Sample Article</h1>
|
||||
<p>This is sample content for testing Crawl4AI's raw HTML processing.</p>
|
||||
</body></html>
|
||||
"""
|
||||
|
||||
# Save to file
|
||||
file_path = Path("docs/examples/tmp/sample.html").absolute()
|
||||
with open(file_path, "w") as f:
|
||||
f.write(raw_html)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Crawl raw HTML
|
||||
raw_result = await crawler.arun(
|
||||
url="raw:" + raw_html, config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
)
|
||||
print("Raw HTML processing:")
|
||||
print(f" Markdown: {raw_result.markdown.raw_markdown[:50]}...")
|
||||
|
||||
# Crawl local file
|
||||
file_result = await crawler.arun(
|
||||
url=f"file://{file_path}",
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS),
|
||||
)
|
||||
print("\nLocal file processing:")
|
||||
print(f" Markdown: {file_result.markdown.raw_markdown[:50]}...")
|
||||
|
||||
# Clean up
|
||||
os.remove(file_path)
|
||||
print(f"Processed both raw HTML and local file ({file_path})")
|
||||
|
||||
async def main():
|
||||
"""Run all demo functions sequentially"""
|
||||
print("=== Comprehensive Crawl4AI Demo ===")
|
||||
print("Note: Some examples require API keys or other configurations")
|
||||
|
||||
# Run all demos
|
||||
await demo_basic_crawl()
|
||||
await demo_parallel_crawl()
|
||||
await demo_fit_markdown()
|
||||
await demo_llm_structured_extraction_no_schema()
|
||||
await demo_css_structured_extraction_no_schema()
|
||||
await demo_deep_crawl()
|
||||
await demo_js_interaction()
|
||||
await demo_media_and_links()
|
||||
await demo_screenshot_and_pdf()
|
||||
# # await demo_proxy_rotation()
|
||||
await demo_raw_html_and_file()
|
||||
|
||||
# Clean up any temp files that may have been created
|
||||
print("\n=== Demo Complete ===")
|
||||
print("Check for any generated files (screenshots, PDFs) in the current directory")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
562
docs/examples/quickstart_examples_set_2.py
Normal file
562
docs/examples/quickstart_examples_set_2.py
Normal file
@@ -0,0 +1,562 @@
|
||||
import os, sys
|
||||
|
||||
from crawl4ai.types import LLMConfig
|
||||
|
||||
sys.path.append(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
)
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
from typing import Dict
|
||||
from bs4 import BeautifulSoup
|
||||
from pydantic import BaseModel, Field
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode, BrowserConfig, CrawlerRunConfig
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
||||
from crawl4ai.extraction_strategy import (
|
||||
JsonCssExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
)
|
||||
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
|
||||
print("Crawl4AI: Advanced Web Crawling and Data Extraction")
|
||||
print("GitHub Repository: https://github.com/unclecode/crawl4ai")
|
||||
print("Twitter: @unclecode")
|
||||
print("Website: https://crawl4ai.com")
|
||||
|
||||
|
||||
# Basic Example - Simple Crawl
|
||||
async def simple_crawl():
|
||||
print("\n--- Basic Usage ---")
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
async def clean_content():
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
excluded_tags=["nav", "footer", "aside"],
|
||||
remove_overlay_elements=True,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(
|
||||
threshold=0.48, threshold_type="fixed", min_word_threshold=0
|
||||
),
|
||||
options={"ignore_links": True},
|
||||
),
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://en.wikipedia.org/wiki/Apple",
|
||||
config=crawler_config,
|
||||
)
|
||||
full_markdown_length = len(result.markdown.raw_markdown)
|
||||
fit_markdown_length = len(result.markdown.fit_markdown)
|
||||
print(f"Full Markdown Length: {full_markdown_length}")
|
||||
print(f"Fit Markdown Length: {fit_markdown_length}")
|
||||
|
||||
|
||||
async def link_analysis():
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.ENABLED,
|
||||
exclude_external_links=True,
|
||||
exclude_social_media_links=True,
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
config=crawler_config,
|
||||
)
|
||||
print(f"Found {len(result.links['internal'])} internal links")
|
||||
print(f"Found {len(result.links['external'])} external links")
|
||||
|
||||
for link in result.links["internal"][:5]:
|
||||
print(f"Href: {link['href']}\nText: {link['text']}\n")
|
||||
|
||||
|
||||
# JavaScript Execution Example
|
||||
async def simple_example_with_running_js_code():
|
||||
print("\n--- Executing JavaScript and Using CSS Selectors ---")
|
||||
|
||||
browser_config = BrowserConfig(headless=True, java_script_enabled=True)
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
js_code="const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();",
|
||||
# wait_for="() => { return Array.from(document.querySelectorAll('article.tease-card')).length > 10; }"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
# CSS Selector Example
|
||||
async def simple_example_with_css_selector():
|
||||
print("\n--- Using CSS Selectors ---")
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS, css_selector=".wide-tease-item__description"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
async def media_handling():
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS, exclude_external_images=True, screenshot=True
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
for img in result.media["images"][:5]:
|
||||
print(f"Image URL: {img['src']}, Alt: {img['alt']}, Score: {img['score']}")
|
||||
|
||||
|
||||
async def custom_hook_workflow(verbose=True):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Set a 'before_goto' hook to run custom code just before navigation
|
||||
crawler.crawler_strategy.set_hook(
|
||||
"before_goto",
|
||||
lambda page, context: print("[Hook] Preparing to navigate..."),
|
||||
)
|
||||
|
||||
# Perform the crawl operation
|
||||
result = await crawler.arun(url="https://crawl4ai.com")
|
||||
print(result.markdown.raw_markdown[:500].replace("\n", " -- "))
|
||||
|
||||
|
||||
# Proxy Example
|
||||
async def use_proxy():
|
||||
print("\n--- Using a Proxy ---")
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
proxy_config={
|
||||
"server": "http://proxy.example.com:8080",
|
||||
"username": "username",
|
||||
"password": "password",
|
||||
},
|
||||
)
|
||||
crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business", config=crawler_config
|
||||
)
|
||||
if result.success:
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
# Screenshot Example
|
||||
async def capture_and_save_screenshot(url: str, output_path: str):
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, screenshot=True)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
|
||||
if result.success and result.screenshot:
|
||||
import base64
|
||||
|
||||
screenshot_data = base64.b64decode(result.screenshot)
|
||||
with open(output_path, "wb") as f:
|
||||
f.write(screenshot_data)
|
||||
print(f"Screenshot saved successfully to {output_path}")
|
||||
else:
|
||||
print("Failed to capture screenshot")
|
||||
|
||||
|
||||
# LLM Extraction Example
|
||||
class OpenAIModelFee(BaseModel):
|
||||
model_name: str = Field(..., description="Name of the OpenAI model.")
|
||||
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
|
||||
output_fee: str = Field(
|
||||
..., description="Fee for output token for the OpenAI model."
|
||||
)
|
||||
|
||||
|
||||
async def extract_structured_data_using_llm(
|
||||
provider: str, api_token: str = None, extra_headers: Dict[str, str] = None
|
||||
):
|
||||
print(f"\n--- Extracting Structured Data with {provider} ---")
|
||||
|
||||
if api_token is None and provider != "ollama":
|
||||
print(f"API token is required for {provider}. Skipping this example.")
|
||||
return
|
||||
|
||||
browser_config = BrowserConfig(headless=True)
|
||||
|
||||
extra_args = {"temperature": 0, "top_p": 0.9, "max_tokens": 2000}
|
||||
if extra_headers:
|
||||
extra_args["extra_headers"] = extra_headers
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
word_count_threshold=1,
|
||||
page_timeout=80000,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llm_config=LLMConfig(provider=provider,api_token=api_token),
|
||||
schema=OpenAIModelFee.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content.""",
|
||||
extra_args=extra_args,
|
||||
),
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://openai.com/api/pricing/", config=crawler_config
|
||||
)
|
||||
print(result.extracted_content)
|
||||
|
||||
|
||||
# CSS Extraction Example
|
||||
async def extract_structured_data_using_css_extractor():
|
||||
print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---")
|
||||
schema = {
|
||||
"name": "KidoCode Courses",
|
||||
"baseSelector": "section.charge-methodology .framework-collection-item.w-dyn-item",
|
||||
"fields": [
|
||||
{
|
||||
"name": "section_title",
|
||||
"selector": "h3.heading-50",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "section_description",
|
||||
"selector": ".charge-content",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_name",
|
||||
"selector": ".text-block-93",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_description",
|
||||
"selector": ".course-content-text",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_icon",
|
||||
"selector": ".image-92",
|
||||
"type": "attribute",
|
||||
"attribute": "src",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
browser_config = BrowserConfig(headless=True, java_script_enabled=True)
|
||||
|
||||
js_click_tabs = """
|
||||
(async () => {
|
||||
const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");
|
||||
for(let tab of tabs) {
|
||||
tab.scrollIntoView();
|
||||
tab.click();
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=JsonCssExtractionStrategy(schema),
|
||||
js_code=[js_click_tabs],
|
||||
delay_before_return_html=1
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.kidocode.com/degrees/technology", config=crawler_config
|
||||
)
|
||||
|
||||
companies = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(companies)} companies")
|
||||
print(json.dumps(companies[0], indent=2))
|
||||
|
||||
|
||||
# Dynamic Content Examples - Method 1
|
||||
async def crawl_dynamic_content_pages_method_1():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
|
||||
first_commit = ""
|
||||
|
||||
async def on_execution_started(page, **kwargs):
|
||||
nonlocal first_commit
|
||||
try:
|
||||
while True:
|
||||
await page.wait_for_selector("li.Box-sc-g0xbh4-0 h4")
|
||||
commit = await page.query_selector("li.Box-sc-g0xbh4-0 h4")
|
||||
commit = await commit.evaluate("(element) => element.textContent")
|
||||
commit = re.sub(r"\s+", "", commit)
|
||||
if commit and commit != first_commit:
|
||||
first_commit = commit
|
||||
break
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
print(f"Warning: New content didn't appear after JavaScript execution: {e}")
|
||||
|
||||
browser_config = BrowserConfig(headless=False, java_script_enabled=True)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler.crawler_strategy.set_hook("on_execution_started", on_execution_started)
|
||||
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
js_next_page = """
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
"""
|
||||
|
||||
for page in range(3):
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
soup = BeautifulSoup(result.cleaned_html, "html.parser")
|
||||
commits = soup.select("li")
|
||||
all_commits.extend(commits)
|
||||
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
# Dynamic Content Examples - Method 2
|
||||
async def crawl_dynamic_content_pages_method_2():
|
||||
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
|
||||
|
||||
browser_config = BrowserConfig(headless=False, java_script_enabled=True)
|
||||
|
||||
js_next_page_and_wait = """
|
||||
(async () => {
|
||||
const getCurrentCommit = () => {
|
||||
const commits = document.querySelectorAll('li.Box-sc-g0xbh4-0 h4');
|
||||
return commits.length > 0 ? commits[0].textContent.trim() : null;
|
||||
};
|
||||
|
||||
const initialCommit = getCurrentCommit();
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
|
||||
while (true) {
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
const newCommit = getCurrentCommit();
|
||||
if (newCommit && newCommit !== initialCommit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"name": "Commit Extractor",
|
||||
"baseSelector": "li.Box-sc-g0xbh4-0",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h4.markdown-title",
|
||||
"type": "text",
|
||||
"transform": "strip",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema)
|
||||
|
||||
for page in range(3):
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=js_next_page_and_wait if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
result = await crawler.arun(url=url, config=crawler_config)
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
commits = json.loads(result.extracted_content)
|
||||
all_commits.extend(commits)
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
|
||||
async def cosine_similarity_extraction():
|
||||
from crawl4ai.extraction_strategy import CosineStrategy
|
||||
crawl_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=CosineStrategy(
|
||||
word_count_threshold=10,
|
||||
max_dist=0.2, # Maximum distance between two words
|
||||
linkage_method="ward", # Linkage method for hierarchical clustering (ward, complete, average, single)
|
||||
top_k=3, # Number of top keywords to extract
|
||||
sim_threshold=0.3, # Similarity threshold for clustering
|
||||
semantic_filter="McDonald's economic impact, American consumer trends", # Keywords to filter the content semantically using embeddings
|
||||
verbose=True,
|
||||
),
|
||||
)
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business/consumer/how-mcdonalds-e-coli-crisis-inflation-politics-reflect-american-story-rcna177156",
|
||||
config=crawl_config,
|
||||
)
|
||||
print(json.loads(result.extracted_content)[:5])
|
||||
|
||||
|
||||
# Browser Comparison
|
||||
async def crawl_custom_browser_type():
|
||||
print("\n--- Browser Comparison ---")
|
||||
|
||||
# Firefox
|
||||
browser_config_firefox = BrowserConfig(browser_type="firefox", headless=True)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(config=browser_config_firefox) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com",
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS),
|
||||
)
|
||||
print("Firefox:", time.time() - start)
|
||||
print(result.markdown[:500])
|
||||
|
||||
# WebKit
|
||||
browser_config_webkit = BrowserConfig(browser_type="webkit", headless=True)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(config=browser_config_webkit) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com",
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS),
|
||||
)
|
||||
print("WebKit:", time.time() - start)
|
||||
print(result.markdown[:500])
|
||||
|
||||
# Chromium (default)
|
||||
browser_config_chromium = BrowserConfig(browser_type="chromium", headless=True)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(config=browser_config_chromium) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.example.com",
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS),
|
||||
)
|
||||
print("Chromium:", time.time() - start)
|
||||
print(result.markdown[:500])
|
||||
|
||||
|
||||
# Anti-Bot and User Simulation
|
||||
async def crawl_with_user_simulation():
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
user_agent_mode="random",
|
||||
user_agent_generator_config={"device_type": "mobile", "os_type": "android"},
|
||||
)
|
||||
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
magic=True,
|
||||
simulate_user=True,
|
||||
override_navigator=True,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="YOUR-URL-HERE", config=crawler_config)
|
||||
print(result.markdown)
|
||||
|
||||
|
||||
async def ssl_certification():
|
||||
# Configure crawler to fetch SSL certificate
|
||||
config = CrawlerRunConfig(
|
||||
fetch_ssl_certificate=True,
|
||||
cache_mode=CacheMode.BYPASS, # Bypass cache to always get fresh certificates
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com", config=config)
|
||||
|
||||
if result.success and result.ssl_certificate:
|
||||
cert = result.ssl_certificate
|
||||
|
||||
tmp_dir = os.path.join(__location__, "tmp")
|
||||
os.makedirs(tmp_dir, exist_ok=True)
|
||||
|
||||
# 1. Access certificate properties directly
|
||||
print("\nCertificate Information:")
|
||||
print(f"Issuer: {cert.issuer.get('CN', '')}")
|
||||
print(f"Valid until: {cert.valid_until}")
|
||||
print(f"Fingerprint: {cert.fingerprint}")
|
||||
|
||||
# 2. Export certificate in different formats
|
||||
cert.to_json(os.path.join(tmp_dir, "certificate.json")) # For analysis
|
||||
print("\nCertificate exported to:")
|
||||
print(f"- JSON: {os.path.join(tmp_dir, 'certificate.json')}")
|
||||
|
||||
pem_data = cert.to_pem(
|
||||
os.path.join(tmp_dir, "certificate.pem")
|
||||
) # For web servers
|
||||
print(f"- PEM: {os.path.join(tmp_dir, 'certificate.pem')}")
|
||||
|
||||
der_data = cert.to_der(
|
||||
os.path.join(tmp_dir, "certificate.der")
|
||||
) # For Java apps
|
||||
print(f"- DER: {os.path.join(tmp_dir, 'certificate.der')}")
|
||||
|
||||
|
||||
# Main execution
|
||||
async def main():
|
||||
# Basic examples
|
||||
await simple_crawl()
|
||||
await simple_example_with_running_js_code()
|
||||
await simple_example_with_css_selector()
|
||||
|
||||
# Advanced examples
|
||||
await extract_structured_data_using_css_extractor()
|
||||
await extract_structured_data_using_llm(
|
||||
"openai/gpt-4o", os.getenv("OPENAI_API_KEY")
|
||||
)
|
||||
await crawl_dynamic_content_pages_method_1()
|
||||
await crawl_dynamic_content_pages_method_2()
|
||||
|
||||
# Browser comparisons
|
||||
await crawl_custom_browser_type()
|
||||
|
||||
# Screenshot example
|
||||
await capture_and_save_screenshot(
|
||||
"https://www.example.com",
|
||||
os.path.join(__location__, "tmp/example_screenshot.jpg")
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,405 +0,0 @@
|
||||
import os
|
||||
import time
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai.web_crawler import WebCrawler
|
||||
from crawl4ai.chunking_strategy import *
|
||||
from crawl4ai.extraction_strategy import *
|
||||
from crawl4ai.crawler_strategy import *
|
||||
from rich import print
|
||||
from rich.console import Console
|
||||
from functools import lru_cache
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def create_crawler():
|
||||
crawler = WebCrawler(verbose=True)
|
||||
crawler.warmup()
|
||||
return crawler
|
||||
|
||||
|
||||
def print_result(result):
|
||||
# Print each key in one line and just the first 10 characters of each one's value and three dots
|
||||
console.print("\t[bold]Result:[/bold]")
|
||||
for key, value in result.model_dump().items():
|
||||
if isinstance(value, str) and value:
|
||||
console.print(f"\t{key}: [green]{value[:20]}...[/green]")
|
||||
if result.extracted_content:
|
||||
items = json.loads(result.extracted_content)
|
||||
print(f"\t[bold]{len(items)} blocks is extracted![/bold]")
|
||||
|
||||
|
||||
def cprint(message, press_any_key=False):
|
||||
console.print(message)
|
||||
if press_any_key:
|
||||
console.print("Press any key to continue...", style="")
|
||||
input()
|
||||
|
||||
|
||||
def basic_usage(crawler):
|
||||
cprint(
|
||||
"🛠️ [bold cyan]Basic Usage: Simply provide a URL and let Crawl4ai do the magic![/bold cyan]"
|
||||
)
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", only_text=True)
|
||||
cprint("[LOG] 📦 [bold yellow]Basic crawl result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def basic_usage_some_params(crawler):
|
||||
cprint(
|
||||
"🛠️ [bold cyan]Basic Usage: Simply provide a URL and let Crawl4ai do the magic![/bold cyan]"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business", word_count_threshold=1, only_text=True
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]Basic crawl result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def screenshot_usage(crawler):
|
||||
cprint("\n📸 [bold cyan]Let's take a screenshot of the page![/bold cyan]")
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", screenshot=True)
|
||||
cprint("[LOG] 📦 [bold yellow]Screenshot result:[/bold yellow]")
|
||||
# Save the screenshot to a file
|
||||
with open("screenshot.png", "wb") as f:
|
||||
f.write(base64.b64decode(result.screenshot))
|
||||
cprint("Screenshot saved to 'screenshot.png'!")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def understanding_parameters(crawler):
|
||||
cprint(
|
||||
"\n🧠 [bold cyan]Understanding 'bypass_cache' and 'include_raw_html' parameters:[/bold cyan]"
|
||||
)
|
||||
cprint(
|
||||
"By default, Crawl4ai caches the results of your crawls. This means that subsequent crawls of the same URL will be much faster! Let's see this in action."
|
||||
)
|
||||
|
||||
# First crawl (reads from cache)
|
||||
cprint("1️⃣ First crawl (caches the result):", True)
|
||||
start_time = time.time()
|
||||
result = crawler.run(url="https://www.nbcnews.com/business")
|
||||
end_time = time.time()
|
||||
cprint(
|
||||
f"[LOG] 📦 [bold yellow]First crawl took {end_time - start_time} seconds and result (from cache):[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
# Force to crawl again
|
||||
cprint("2️⃣ Second crawl (Force to crawl again):", True)
|
||||
start_time = time.time()
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", bypass_cache=True)
|
||||
end_time = time.time()
|
||||
cprint(
|
||||
f"[LOG] 📦 [bold yellow]Second crawl took {end_time - start_time} seconds and result (forced to crawl):[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
|
||||
def add_chunking_strategy(crawler):
|
||||
# Adding a chunking strategy: RegexChunking
|
||||
cprint(
|
||||
"\n🧩 [bold cyan]Let's add a chunking strategy: RegexChunking![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"RegexChunking is a simple chunking strategy that splits the text based on a given regex pattern. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
chunking_strategy=RegexChunking(patterns=["\n\n"]),
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]RegexChunking result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
# Adding another chunking strategy: NlpSentenceChunking
|
||||
cprint(
|
||||
"\n🔍 [bold cyan]Time to explore another chunking strategy: NlpSentenceChunking![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"NlpSentenceChunking uses NLP techniques to split the text into sentences. Let's see how it performs!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business", chunking_strategy=NlpSentenceChunking()
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]NlpSentenceChunking result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def add_extraction_strategy(crawler):
|
||||
# Adding an extraction strategy: CosineStrategy
|
||||
cprint(
|
||||
"\n🧠 [bold cyan]Let's get smarter with an extraction strategy: CosineStrategy![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"CosineStrategy uses cosine similarity to extract semantically similar blocks of text. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=CosineStrategy(
|
||||
word_count_threshold=10,
|
||||
max_dist=0.2,
|
||||
linkage_method="ward",
|
||||
top_k=3,
|
||||
sim_threshold=0.3,
|
||||
verbose=True,
|
||||
),
|
||||
)
|
||||
cprint("[LOG] 📦 [bold yellow]CosineStrategy result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
# Using semantic_filter with CosineStrategy
|
||||
cprint(
|
||||
"You can pass other parameters like 'semantic_filter' to the CosineStrategy to extract semantically similar blocks of text. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=CosineStrategy(
|
||||
semantic_filter="inflation rent prices",
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]CosineStrategy result with semantic filter:[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
|
||||
def add_llm_extraction_strategy(crawler):
|
||||
# Adding an LLM extraction strategy without instructions
|
||||
cprint(
|
||||
"\n🤖 [bold cyan]Time to bring in the big guns: LLMExtractionStrategy without instructions![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"LLMExtractionStrategy uses a large language model to extract relevant information from the web page. Let's see it in action!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llmConfig = LlmConfig(provider="openai/gpt-4o", api_token=os.getenv("OPENAI_API_KEY"))
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]LLMExtractionStrategy (no instructions) result:[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
# Adding an LLM extraction strategy with instructions
|
||||
cprint(
|
||||
"\n📜 [bold cyan]Let's make it even more interesting: LLMExtractionStrategy with instructions![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"Let's say we are only interested in financial news. Let's see how LLMExtractionStrategy performs with instructions!"
|
||||
)
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llmConfig=LlmConfig(provider="openai/gpt-4o",api_token=os.getenv("OPENAI_API_KEY")),
|
||||
instruction="I am interested in only financial news",
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]LLMExtractionStrategy (with instructions) result:[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
result = crawler.run(
|
||||
url="https://www.nbcnews.com/business",
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llmConfig=LlmConfig(provider="openai/gpt-4o",api_token=os.getenv("OPENAI_API_KEY")),
|
||||
instruction="Extract only content related to technology",
|
||||
),
|
||||
)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]LLMExtractionStrategy (with technology instruction) result:[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
|
||||
def targeted_extraction(crawler):
|
||||
# Using a CSS selector to extract only H2 tags
|
||||
cprint(
|
||||
"\n🎯 [bold cyan]Targeted extraction: Let's use a CSS selector to extract only H2 tags![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", css_selector="h2")
|
||||
cprint("[LOG] 📦 [bold yellow]CSS Selector (H2 tags) result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def interactive_extraction(crawler):
|
||||
# Passing JavaScript code to interact with the page
|
||||
cprint(
|
||||
"\n🖱️ [bold cyan]Let's get interactive: Passing JavaScript code to click 'Load More' button![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"In this example we try to click the 'Load More' button on the page using JavaScript code."
|
||||
)
|
||||
js_code = """
|
||||
const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
|
||||
loadMoreButton && loadMoreButton.click();
|
||||
"""
|
||||
# crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code)
|
||||
# crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True)
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", js=js_code)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]JavaScript Code (Load More button) result:[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
|
||||
def multiple_scrip(crawler):
|
||||
# Passing JavaScript code to interact with the page
|
||||
cprint(
|
||||
"\n🖱️ [bold cyan]Let's get interactive: Passing JavaScript code to click 'Load More' button![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
cprint(
|
||||
"In this example we try to click the 'Load More' button on the page using JavaScript code."
|
||||
)
|
||||
js_code = [
|
||||
"""
|
||||
const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
|
||||
loadMoreButton && loadMoreButton.click();
|
||||
"""
|
||||
] * 2
|
||||
# crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code)
|
||||
# crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True)
|
||||
result = crawler.run(url="https://www.nbcnews.com/business", js=js_code)
|
||||
cprint(
|
||||
"[LOG] 📦 [bold yellow]JavaScript Code (Load More button) result:[/bold yellow]"
|
||||
)
|
||||
print_result(result)
|
||||
|
||||
|
||||
def using_crawler_hooks(crawler):
|
||||
# Example usage of the hooks for authentication and setting a cookie
|
||||
def on_driver_created(driver):
|
||||
print("[HOOK] on_driver_created")
|
||||
# Example customization: maximize the window
|
||||
driver.maximize_window()
|
||||
|
||||
# Example customization: logging in to a hypothetical website
|
||||
driver.get("https://example.com/login")
|
||||
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
|
||||
WebDriverWait(driver, 10).until(
|
||||
EC.presence_of_element_located((By.NAME, "username"))
|
||||
)
|
||||
driver.find_element(By.NAME, "username").send_keys("testuser")
|
||||
driver.find_element(By.NAME, "password").send_keys("password123")
|
||||
driver.find_element(By.NAME, "login").click()
|
||||
WebDriverWait(driver, 10).until(
|
||||
EC.presence_of_element_located((By.ID, "welcome"))
|
||||
)
|
||||
# Add a custom cookie
|
||||
driver.add_cookie({"name": "test_cookie", "value": "cookie_value"})
|
||||
return driver
|
||||
|
||||
def before_get_url(driver):
|
||||
print("[HOOK] before_get_url")
|
||||
# Example customization: add a custom header
|
||||
# Enable Network domain for sending headers
|
||||
driver.execute_cdp_cmd("Network.enable", {})
|
||||
# Add a custom header
|
||||
driver.execute_cdp_cmd(
|
||||
"Network.setExtraHTTPHeaders", {"headers": {"X-Test-Header": "test"}}
|
||||
)
|
||||
return driver
|
||||
|
||||
def after_get_url(driver):
|
||||
print("[HOOK] after_get_url")
|
||||
# Example customization: log the URL
|
||||
print(driver.current_url)
|
||||
return driver
|
||||
|
||||
def before_return_html(driver, html):
|
||||
print("[HOOK] before_return_html")
|
||||
# Example customization: log the HTML
|
||||
print(len(html))
|
||||
return driver
|
||||
|
||||
cprint(
|
||||
"\n🔗 [bold cyan]Using Crawler Hooks: Let's see how we can customize the crawler using hooks![/bold cyan]",
|
||||
True,
|
||||
)
|
||||
|
||||
crawler_strategy = LocalSeleniumCrawlerStrategy(verbose=True)
|
||||
crawler_strategy.set_hook("on_driver_created", on_driver_created)
|
||||
crawler_strategy.set_hook("before_get_url", before_get_url)
|
||||
crawler_strategy.set_hook("after_get_url", after_get_url)
|
||||
crawler_strategy.set_hook("before_return_html", before_return_html)
|
||||
|
||||
crawler = WebCrawler(verbose=True, crawler_strategy=crawler_strategy)
|
||||
crawler.warmup()
|
||||
result = crawler.run(url="https://example.com")
|
||||
|
||||
cprint("[LOG] 📦 [bold yellow]Crawler Hooks result:[/bold yellow]")
|
||||
print_result(result=result)
|
||||
|
||||
|
||||
def using_crawler_hooks_dleay_example(crawler):
|
||||
def delay(driver):
|
||||
print("Delaying for 5 seconds...")
|
||||
time.sleep(5)
|
||||
print("Resuming...")
|
||||
|
||||
def create_crawler():
|
||||
crawler_strategy = LocalSeleniumCrawlerStrategy(verbose=True)
|
||||
crawler_strategy.set_hook("after_get_url", delay)
|
||||
crawler = WebCrawler(verbose=True, crawler_strategy=crawler_strategy)
|
||||
crawler.warmup()
|
||||
return crawler
|
||||
|
||||
cprint(
|
||||
"\n🔗 [bold cyan]Using Crawler Hooks: Let's add a delay after fetching the url to make sure entire page is fetched.[/bold cyan]"
|
||||
)
|
||||
crawler = create_crawler()
|
||||
result = crawler.run(url="https://google.com", bypass_cache=True)
|
||||
|
||||
cprint("[LOG] 📦 [bold yellow]Crawler Hooks result:[/bold yellow]")
|
||||
print_result(result)
|
||||
|
||||
|
||||
def main():
|
||||
cprint(
|
||||
"🌟 [bold green]Welcome to the Crawl4ai Quickstart Guide! Let's dive into some web crawling fun! 🌐[/bold green]"
|
||||
)
|
||||
cprint(
|
||||
"⛳️ [bold cyan]First Step: Create an instance of WebCrawler and call the `warmup()` function.[/bold cyan]"
|
||||
)
|
||||
cprint(
|
||||
"If this is the first time you're running Crawl4ai, this might take a few seconds to load required model files."
|
||||
)
|
||||
|
||||
crawler = create_crawler()
|
||||
|
||||
crawler.always_by_pass_cache = True
|
||||
basic_usage(crawler)
|
||||
# basic_usage_some_params(crawler)
|
||||
understanding_parameters(crawler)
|
||||
|
||||
crawler.always_by_pass_cache = True
|
||||
screenshot_usage(crawler)
|
||||
add_chunking_strategy(crawler)
|
||||
add_extraction_strategy(crawler)
|
||||
add_llm_extraction_strategy(crawler)
|
||||
targeted_extraction(crawler)
|
||||
interactive_extraction(crawler)
|
||||
multiple_scrip(crawler)
|
||||
|
||||
cprint(
|
||||
"\n🎉 [bold green]Congratulations! You've made it through the Crawl4ai Quickstart Guide! Now go forth and crawl the web like a pro! 🕸️[/bold green]"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,735 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6yLvrXn7yZQI"
|
||||
},
|
||||
"source": [
|
||||
"# Crawl4AI: Advanced Web Crawling and Data Extraction\n",
|
||||
"\n",
|
||||
"Welcome to this interactive notebook showcasing Crawl4AI, an advanced asynchronous web crawling and data extraction library.\n",
|
||||
"\n",
|
||||
"- GitHub Repository: [https://github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)\n",
|
||||
"- Twitter: [@unclecode](https://twitter.com/unclecode)\n",
|
||||
"- Website: [https://crawl4ai.com](https://crawl4ai.com)\n",
|
||||
"\n",
|
||||
"Let's explore the powerful features of Crawl4AI!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "KIn_9nxFyZQK"
|
||||
},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"First, let's install Crawl4AI from GitHub:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "mSnaxLf3zMog"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!sudo apt-get update && sudo apt-get install -y libwoff1 libopus0 libwebp6 libwebpdemux2 libenchant1c2a libgudev-1.0-0 libsecret-1-0 libhyphen0 libgdk-pixbuf2.0-0 libegl1 libnotify4 libxslt1.1 libevent-2.1-7 libgles2 libvpx6 libxcomposite1 libatk1.0-0 libatk-bridge2.0-0 libepoxy0 libgtk-3-0 libharfbuzz-icu0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xlXqaRtayZQK"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install crawl4ai\n",
|
||||
"!pip install nest-asyncio\n",
|
||||
"!playwright install"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "qKCE7TI7yZQL"
|
||||
},
|
||||
"source": [
|
||||
"Now, let's import the necessary libraries:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"id": "I67tr7aAyZQL"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"import nest_asyncio\n",
|
||||
"from crawl4ai import AsyncWebCrawler\n",
|
||||
"from crawl4ai.extraction_strategy import JsonCssExtractionStrategy, LLMExtractionStrategy\n",
|
||||
"import json\n",
|
||||
"import time\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "h7yR_Rt_yZQM"
|
||||
},
|
||||
"source": [
|
||||
"## Basic Usage\n",
|
||||
"\n",
|
||||
"Let's start with a simple crawl example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "yBh6hf4WyZQM",
|
||||
"outputId": "0f83af5c-abba-4175-ed95-70b7512e6bcc"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.05 seconds\n",
|
||||
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.05 seconds.\n",
|
||||
"18102\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def simple_crawl():\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" result = await crawler.arun(url=\"https://www.nbcnews.com/business\")\n",
|
||||
" print(len(result.markdown))\n",
|
||||
"await simple_crawl()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "9rtkgHI28uI4"
|
||||
},
|
||||
"source": [
|
||||
"💡 By default, **Crawl4AI** caches the result of every URL, so the next time you call it, you’ll get an instant result. But if you want to bypass the cache, just set `bypass_cache=True`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "MzZ0zlJ9yZQM"
|
||||
},
|
||||
"source": [
|
||||
"## Advanced Features\n",
|
||||
"\n",
|
||||
"### Executing JavaScript and Using CSS Selectors"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "gHStF86xyZQM",
|
||||
"outputId": "34d0fb6d-4dec-4677-f76e-85a1f082829b"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://www.nbcnews.com/business using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://www.nbcnews.com/business successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://www.nbcnews.com/business, success: True, time taken: 6.06 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.10 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://www.nbcnews.com/business, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.11 seconds.\n",
|
||||
"41135\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def js_and_css():\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" js_code = [\"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();\"]\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" js_code=js_code,\n",
|
||||
" # css_selector=\"YOUR_CSS_SELECTOR_HERE\",\n",
|
||||
" bypass_cache=True\n",
|
||||
" )\n",
|
||||
" print(len(result.markdown))\n",
|
||||
"\n",
|
||||
"await js_and_css()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cqE_W4coyZQM"
|
||||
},
|
||||
"source": [
|
||||
"### Using a Proxy\n",
|
||||
"\n",
|
||||
"Note: You'll need to replace the proxy URL with a working proxy for this example to run successfully."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "QjAyiAGqyZQM"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"async def use_proxy():\n",
|
||||
" async with AsyncWebCrawler(verbose=True, proxy=\"http://your-proxy-url:port\") as crawler:\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" bypass_cache=True\n",
|
||||
" )\n",
|
||||
" print(result.markdown[:500]) # Print first 500 characters\n",
|
||||
"\n",
|
||||
"# Uncomment the following line to run the proxy example\n",
|
||||
"# await use_proxy()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "XTZ88lbayZQN"
|
||||
},
|
||||
"source": [
|
||||
"### Extracting Structured Data with OpenAI\n",
|
||||
"\n",
|
||||
"Note: You'll need to set your OpenAI API key as an environment variable for this example to work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "fIOlDayYyZQN",
|
||||
"outputId": "cb8359cc-dee0-4762-9698-5dfdcee055b8"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://openai.com/api/pricing/ using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://openai.com/api/pricing/ successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://openai.com/api/pricing/, success: True, time taken: 3.77 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://openai.com/api/pricing/, success: True, time taken: 0.21 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://openai.com/api/pricing/, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 0\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 1\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 2\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 3\n",
|
||||
"[LOG] Extracted 4 blocks from URL: https://openai.com/api/pricing/ block index: 3\n",
|
||||
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 4\n",
|
||||
"[LOG] Extracted 5 blocks from URL: https://openai.com/api/pricing/ block index: 0\n",
|
||||
"[LOG] Extracted 1 blocks from URL: https://openai.com/api/pricing/ block index: 4\n",
|
||||
"[LOG] Extracted 8 blocks from URL: https://openai.com/api/pricing/ block index: 1\n",
|
||||
"[LOG] Extracted 12 blocks from URL: https://openai.com/api/pricing/ block index: 2\n",
|
||||
"[LOG] 🚀 Extraction done for https://openai.com/api/pricing/, time taken: 8.55 seconds.\n",
|
||||
"5029\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from google.colab import userdata\n",
|
||||
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"class OpenAIModelFee(BaseModel):\n",
|
||||
" model_name: str = Field(..., description=\"Name of the OpenAI model.\")\n",
|
||||
" input_fee: str = Field(..., description=\"Fee for input token for the OpenAI model.\")\n",
|
||||
" output_fee: str = Field(..., description=\"Fee for output token for the OpenAI model.\")\n",
|
||||
"\n",
|
||||
"async def extract_openai_fees():\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url='https://openai.com/api/pricing/',\n",
|
||||
" word_count_threshold=1,\n",
|
||||
" extraction_strategy=LLMExtractionStrategy(\n",
|
||||
" provider=\"openai/gpt-4o\", api_token=os.getenv('OPENAI_API_KEY'),\n",
|
||||
" schema=OpenAIModelFee.schema(),\n",
|
||||
" extraction_type=\"schema\",\n",
|
||||
" instruction=\"\"\"From the crawled content, extract all mentioned model names along with their fees for input and output tokens.\n",
|
||||
" Do not miss any models in the entire content. One extracted model JSON format should look like this:\n",
|
||||
" {\"model_name\": \"GPT-4\", \"input_fee\": \"US$10.00 / 1M tokens\", \"output_fee\": \"US$30.00 / 1M tokens\"}.\"\"\"\n",
|
||||
" ),\n",
|
||||
" bypass_cache=True,\n",
|
||||
" )\n",
|
||||
" print(len(result.extracted_content))\n",
|
||||
"\n",
|
||||
"# Uncomment the following line to run the OpenAI extraction example\n",
|
||||
"await extract_openai_fees()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "BypA5YxEyZQN"
|
||||
},
|
||||
"source": [
|
||||
"### Advanced Multi-Page Crawling with JavaScript Execution"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "tfkcVQ0b7mw-"
|
||||
},
|
||||
"source": [
|
||||
"## Advanced Multi-Page Crawling with JavaScript Execution\n",
|
||||
"\n",
|
||||
"This example demonstrates Crawl4AI's ability to handle complex crawling scenarios, specifically extracting commits from multiple pages of a GitHub repository. The challenge here is that clicking the \"Next\" button doesn't load a new page, but instead uses asynchronous JavaScript to update the content. This is a common hurdle in modern web crawling.\n",
|
||||
"\n",
|
||||
"To overcome this, we use Crawl4AI's custom JavaScript execution to simulate clicking the \"Next\" button, and implement a custom hook to detect when new data has loaded. Our strategy involves comparing the first commit's text before and after \"clicking\" Next, waiting until it changes to confirm new data has rendered. This showcases Crawl4AI's flexibility in handling dynamic content and its ability to implement custom logic for even the most challenging crawling tasks."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "qUBKGpn3yZQN",
|
||||
"outputId": "3e555b6a-ed33-42f4-cce9-499a923fbe17"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 5.16 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.28 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.28 seconds.\n",
|
||||
"Page 1: Found 35 commits\n",
|
||||
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.78 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.90 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.90 seconds.\n",
|
||||
"Page 2: Found 35 commits\n",
|
||||
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 2.00 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.74 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.75 seconds.\n",
|
||||
"Page 3: Found 35 commits\n",
|
||||
"Successfully crawled 105 commits across 3 pages\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"\n",
|
||||
"async def crawl_typescript_commits():\n",
|
||||
" first_commit = \"\"\n",
|
||||
" async def on_execution_started(page):\n",
|
||||
" nonlocal first_commit\n",
|
||||
" try:\n",
|
||||
" while True:\n",
|
||||
" await page.wait_for_selector('li.Box-sc-g0xbh4-0 h4')\n",
|
||||
" commit = await page.query_selector('li.Box-sc-g0xbh4-0 h4')\n",
|
||||
" commit = await commit.evaluate('(element) => element.textContent')\n",
|
||||
" commit = re.sub(r'\\s+', '', commit)\n",
|
||||
" if commit and commit != first_commit:\n",
|
||||
" first_commit = commit\n",
|
||||
" break\n",
|
||||
" await asyncio.sleep(0.5)\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"Warning: New content didn't appear after JavaScript execution: {e}\")\n",
|
||||
"\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" crawler.crawler_strategy.set_hook('on_execution_started', on_execution_started)\n",
|
||||
"\n",
|
||||
" url = \"https://github.com/microsoft/TypeScript/commits/main\"\n",
|
||||
" session_id = \"typescript_commits_session\"\n",
|
||||
" all_commits = []\n",
|
||||
"\n",
|
||||
" js_next_page = \"\"\"\n",
|
||||
" const button = document.querySelector('a[data-testid=\"pagination-next-button\"]');\n",
|
||||
" if (button) button.click();\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" for page in range(3): # Crawl 3 pages\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=url,\n",
|
||||
" session_id=session_id,\n",
|
||||
" css_selector=\"li.Box-sc-g0xbh4-0\",\n",
|
||||
" js=js_next_page if page > 0 else None,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" js_only=page > 0\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" assert result.success, f\"Failed to crawl page {page + 1}\"\n",
|
||||
"\n",
|
||||
" soup = BeautifulSoup(result.cleaned_html, 'html.parser')\n",
|
||||
" commits = soup.select(\"li\")\n",
|
||||
" all_commits.extend(commits)\n",
|
||||
"\n",
|
||||
" print(f\"Page {page + 1}: Found {len(commits)} commits\")\n",
|
||||
"\n",
|
||||
" await crawler.crawler_strategy.kill_session(session_id)\n",
|
||||
" print(f\"Successfully crawled {len(all_commits)} commits across 3 pages\")\n",
|
||||
"\n",
|
||||
"await crawl_typescript_commits()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EJRnYsp6yZQN"
|
||||
},
|
||||
"source": [
|
||||
"### Using JsonCssExtractionStrategy for Fast Structured Output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "1ZMqIzB_8SYp"
|
||||
},
|
||||
"source": [
|
||||
"The JsonCssExtractionStrategy is a powerful feature of Crawl4AI that allows for precise, structured data extraction from web pages. Here's how it works:\n",
|
||||
"\n",
|
||||
"1. You define a schema that describes the pattern of data you're interested in extracting.\n",
|
||||
"2. The schema includes a base selector that identifies repeating elements on the page.\n",
|
||||
"3. Within the schema, you define fields, each with its own selector and type.\n",
|
||||
"4. These field selectors are applied within the context of each base selector element.\n",
|
||||
"5. The strategy supports nested structures, lists within lists, and various data types.\n",
|
||||
"6. You can even include computed fields for more complex data manipulation.\n",
|
||||
"\n",
|
||||
"This approach allows for highly flexible and precise data extraction, transforming semi-structured web content into clean, structured JSON data. It's particularly useful for extracting consistent data patterns from pages like product listings, news articles, or search results.\n",
|
||||
"\n",
|
||||
"For more details and advanced usage, check out the full documentation on the Crawl4AI website."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "trCMR2T9yZQN",
|
||||
"outputId": "718d36f4-cccf-40f4-8d8c-c3ba73524d16"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
|
||||
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
|
||||
"[LOG] 🕸️ Crawling https://www.nbcnews.com/business using AsyncPlaywrightCrawlerStrategy...\n",
|
||||
"[LOG] ✅ Crawled https://www.nbcnews.com/business successfully!\n",
|
||||
"[LOG] 🚀 Crawling done for https://www.nbcnews.com/business, success: True, time taken: 7.00 seconds\n",
|
||||
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.32 seconds\n",
|
||||
"[LOG] 🔥 Extracting semantic blocks for https://www.nbcnews.com/business, Strategy: AsyncWebCrawler\n",
|
||||
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.48 seconds.\n",
|
||||
"Successfully extracted 11 news teasers\n",
|
||||
"{\n",
|
||||
" \"category\": \"Business News\",\n",
|
||||
" \"headline\": \"NBC ripped up its Olympics playbook for 2024 \\u2014 so far, the new strategy paid off\",\n",
|
||||
" \"summary\": \"The Olympics have long been key to NBCUniversal. Paris marked the 18th Olympic Games broadcast by NBC in the U.S.\",\n",
|
||||
" \"time\": \"13h ago\",\n",
|
||||
" \"image\": {\n",
|
||||
" \"src\": \"https://media-cldnry.s-nbcnews.com/image/upload/t_focal-200x100,f_auto,q_auto:best/rockcms/2024-09/240903-nbc-olympics-ch-1344-c7a486.jpg\",\n",
|
||||
" \"alt\": \"Mike Tirico.\"\n",
|
||||
" },\n",
|
||||
" \"link\": \"https://www.nbcnews.com/business\"\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def extract_news_teasers():\n",
|
||||
" schema = {\n",
|
||||
" \"name\": \"News Teaser Extractor\",\n",
|
||||
" \"baseSelector\": \".wide-tease-item__wrapper\",\n",
|
||||
" \"fields\": [\n",
|
||||
" {\n",
|
||||
" \"name\": \"category\",\n",
|
||||
" \"selector\": \".unibrow span[data-testid='unibrow-text']\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"headline\",\n",
|
||||
" \"selector\": \".wide-tease-item__headline\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"summary\",\n",
|
||||
" \"selector\": \".wide-tease-item__description\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"time\",\n",
|
||||
" \"selector\": \"[data-testid='wide-tease-date']\",\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"image\",\n",
|
||||
" \"type\": \"nested\",\n",
|
||||
" \"selector\": \"picture.teasePicture img\",\n",
|
||||
" \"fields\": [\n",
|
||||
" {\"name\": \"src\", \"type\": \"attribute\", \"attribute\": \"src\"},\n",
|
||||
" {\"name\": \"alt\", \"type\": \"attribute\", \"attribute\": \"alt\"},\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"name\": \"link\",\n",
|
||||
" \"selector\": \"a[href]\",\n",
|
||||
" \"type\": \"attribute\",\n",
|
||||
" \"attribute\": \"href\",\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)\n",
|
||||
"\n",
|
||||
" async with AsyncWebCrawler(verbose=True) as crawler:\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" extraction_strategy=extraction_strategy,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" assert result.success, \"Failed to crawl the page\"\n",
|
||||
"\n",
|
||||
" news_teasers = json.loads(result.extracted_content)\n",
|
||||
" print(f\"Successfully extracted {len(news_teasers)} news teasers\")\n",
|
||||
" print(json.dumps(news_teasers[0], indent=2))\n",
|
||||
"\n",
|
||||
"await extract_news_teasers()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "FnyVhJaByZQN"
|
||||
},
|
||||
"source": [
|
||||
"## Speed Comparison\n",
|
||||
"\n",
|
||||
"Let's compare the speed of Crawl4AI with Firecrawl, a paid service. Note that we can't run Firecrawl in this Colab environment, so we'll simulate its performance based on previously recorded data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "agDD186f3wig"
|
||||
},
|
||||
"source": [
|
||||
"💡 **Note on Speed Comparison:**\n",
|
||||
"\n",
|
||||
"The speed test conducted here is running on Google Colab, where the internet speed and performance can vary and may not reflect optimal conditions. When we call Firecrawl's API, we're seeing its best performance, while Crawl4AI's performance is limited by Colab's network speed.\n",
|
||||
"\n",
|
||||
"For a more accurate comparison, it's recommended to run these tests on your own servers or computers with a stable and fast internet connection. Despite these limitations, Crawl4AI still demonstrates faster performance in this environment.\n",
|
||||
"\n",
|
||||
"If you run these tests locally, you may observe an even more significant speed advantage for Crawl4AI compared to other services."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "F7KwHv8G1LbY"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install firecrawl"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "91813zILyZQN",
|
||||
"outputId": "663223db-ab89-4976-b233-05ceca62b19b"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Firecrawl (simulated):\n",
|
||||
"Time taken: 4.38 seconds\n",
|
||||
"Content length: 41967 characters\n",
|
||||
"Images found: 49\n",
|
||||
"\n",
|
||||
"Crawl4AI (simple crawl):\n",
|
||||
"Time taken: 4.22 seconds\n",
|
||||
"Content length: 18221 characters\n",
|
||||
"Images found: 49\n",
|
||||
"\n",
|
||||
"Crawl4AI (with JavaScript execution):\n",
|
||||
"Time taken: 9.13 seconds\n",
|
||||
"Content length: 34243 characters\n",
|
||||
"Images found: 89\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from google.colab import userdata\n",
|
||||
"os.environ['FIRECRAWL_API_KEY'] = userdata.get('FIRECRAWL_API_KEY')\n",
|
||||
"import time\n",
|
||||
"from firecrawl import FirecrawlApp\n",
|
||||
"\n",
|
||||
"async def speed_comparison():\n",
|
||||
" # Simulated Firecrawl performance\n",
|
||||
" app = FirecrawlApp(api_key=os.environ['FIRECRAWL_API_KEY'])\n",
|
||||
" start = time.time()\n",
|
||||
" scrape_status = app.scrape_url(\n",
|
||||
" 'https://www.nbcnews.com/business',\n",
|
||||
" params={'formats': ['markdown', 'html']}\n",
|
||||
" )\n",
|
||||
" end = time.time()\n",
|
||||
" print(\"Firecrawl (simulated):\")\n",
|
||||
" print(f\"Time taken: {end - start:.2f} seconds\")\n",
|
||||
" print(f\"Content length: {len(scrape_status['markdown'])} characters\")\n",
|
||||
" print(f\"Images found: {scrape_status['markdown'].count('cldnry.s-nbcnews.com')}\")\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
" async with AsyncWebCrawler() as crawler:\n",
|
||||
" # Crawl4AI simple crawl\n",
|
||||
" start = time.time()\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" word_count_threshold=0,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" verbose=False\n",
|
||||
" )\n",
|
||||
" end = time.time()\n",
|
||||
" print(\"Crawl4AI (simple crawl):\")\n",
|
||||
" print(f\"Time taken: {end - start:.2f} seconds\")\n",
|
||||
" print(f\"Content length: {len(result.markdown)} characters\")\n",
|
||||
" print(f\"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}\")\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
" # Crawl4AI with JavaScript execution\n",
|
||||
" start = time.time()\n",
|
||||
" result = await crawler.arun(\n",
|
||||
" url=\"https://www.nbcnews.com/business\",\n",
|
||||
" js_code=[\"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();\"],\n",
|
||||
" word_count_threshold=0,\n",
|
||||
" bypass_cache=True,\n",
|
||||
" verbose=False\n",
|
||||
" )\n",
|
||||
" end = time.time()\n",
|
||||
" print(\"Crawl4AI (with JavaScript execution):\")\n",
|
||||
" print(f\"Time taken: {end - start:.2f} seconds\")\n",
|
||||
" print(f\"Content length: {len(result.markdown)} characters\")\n",
|
||||
" print(f\"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}\")\n",
|
||||
"\n",
|
||||
"await speed_comparison()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "OBFFYVJIyZQN"
|
||||
},
|
||||
"source": [
|
||||
"If you run on a local machine with a proper internet speed:\n",
|
||||
"- Simple crawl: Crawl4AI is typically over 3-4 times faster than Firecrawl.\n",
|
||||
"- With JavaScript execution: Even when executing JavaScript to load more content (potentially doubling the number of images found), Crawl4AI is still faster than Firecrawl's simple crawl.\n",
|
||||
"\n",
|
||||
"Please note that actual performance may vary depending on network conditions and the specific content being crawled."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "A6_1RK1_yZQO"
|
||||
},
|
||||
"source": [
|
||||
"## Conclusion\n",
|
||||
"\n",
|
||||
"In this notebook, we've explored the powerful features of Crawl4AI, including:\n",
|
||||
"\n",
|
||||
"1. Basic crawling\n",
|
||||
"2. JavaScript execution and CSS selector usage\n",
|
||||
"3. Proxy support\n",
|
||||
"4. Structured data extraction with OpenAI\n",
|
||||
"5. Advanced multi-page crawling with JavaScript execution\n",
|
||||
"6. Fast structured output using JsonCssExtractionStrategy\n",
|
||||
"7. Speed comparison with other services\n",
|
||||
"\n",
|
||||
"Crawl4AI offers a fast, flexible, and powerful solution for web crawling and data extraction tasks. Its asynchronous architecture and advanced features make it suitable for a wide range of applications, from simple web scraping to complex, multi-page data extraction scenarios.\n",
|
||||
"\n",
|
||||
"For more information and advanced usage, please visit the [Crawl4AI documentation](https://docs.crawl4ai.com/).\n",
|
||||
"\n",
|
||||
"Happy crawling!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
143
docs/examples/regex_extraction_quickstart.py
Normal file
143
docs/examples/regex_extraction_quickstart.py
Normal file
@@ -0,0 +1,143 @@
|
||||
# == File: regex_extraction_quickstart.py ==
|
||||
"""
|
||||
Mini–quick-start for RegexExtractionStrategy
|
||||
────────────────────────────────────────────
|
||||
3 bite-sized demos that parallel the style of *quickstart_examples_set_1.py*:
|
||||
|
||||
1. **Default catalog** – scrape a page and pull out e-mails / phones / URLs, etc.
|
||||
2. **Custom pattern** – add your own regex at instantiation time.
|
||||
3. **LLM-assisted schema** – ask the model to write a pattern, cache it, then
|
||||
run extraction _without_ further LLM calls.
|
||||
|
||||
Run the whole thing with::
|
||||
|
||||
python regex_extraction_quickstart.py
|
||||
"""
|
||||
|
||||
import os, json, asyncio
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
CrawlerRunConfig,
|
||||
CrawlResult,
|
||||
RegexExtractionStrategy,
|
||||
LLMConfig,
|
||||
)
|
||||
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
# 1. Default-catalog extraction
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
async def demo_regex_default() -> None:
|
||||
print("\n=== 1. Regex extraction – default patterns ===")
|
||||
|
||||
url = "https://www.iana.org/domains/example" # has e-mail + URLs
|
||||
strategy = RegexExtractionStrategy(
|
||||
pattern = RegexExtractionStrategy.Url | RegexExtractionStrategy.Currency
|
||||
)
|
||||
config = CrawlerRunConfig(extraction_strategy=strategy)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result: CrawlResult = await crawler.arun(url, config=config)
|
||||
|
||||
print(f"Fetched {url} - success={result.success}")
|
||||
if result.success:
|
||||
data = json.loads(result.extracted_content)
|
||||
for d in data[:10]:
|
||||
print(f" {d['label']:<12} {d['value']}")
|
||||
print(f"... total matches: {len(data)}")
|
||||
else:
|
||||
print(" !!! crawl failed")
|
||||
|
||||
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
# 2. Custom pattern override / extension
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
async def demo_regex_custom() -> None:
|
||||
print("\n=== 2. Regex extraction – custom price pattern ===")
|
||||
|
||||
url = "https://www.apple.com/shop/buy-mac/macbook-pro"
|
||||
price_pattern = {"usd_price": r"\$\s?\d{1,3}(?:,\d{3})*(?:\.\d{2})?"}
|
||||
|
||||
strategy = RegexExtractionStrategy(custom = price_pattern)
|
||||
config = CrawlerRunConfig(extraction_strategy=strategy)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result: CrawlResult = await crawler.arun(url, config=config)
|
||||
|
||||
if result.success:
|
||||
data = json.loads(result.extracted_content)
|
||||
for d in data:
|
||||
print(f" {d['value']}")
|
||||
if not data:
|
||||
print(" (No prices found - page layout may have changed)")
|
||||
else:
|
||||
print(" !!! crawl failed")
|
||||
|
||||
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
# 3. One-shot LLM pattern generation, then fast extraction
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
async def demo_regex_generate_pattern() -> None:
|
||||
print("\n=== 3. generate_pattern → regex extraction ===")
|
||||
|
||||
cache_dir = Path(__file__).parent / "tmp"
|
||||
cache_dir.mkdir(exist_ok=True)
|
||||
pattern_file = cache_dir / "price_pattern.json"
|
||||
|
||||
url = "https://www.lazada.sg/tag/smartphone/"
|
||||
|
||||
# ── 3-A. build or load the cached pattern
|
||||
if pattern_file.exists():
|
||||
pattern = json.load(pattern_file.open(encoding="utf-8"))
|
||||
print("Loaded cached pattern:", pattern)
|
||||
else:
|
||||
print("Generating pattern via LLM…")
|
||||
|
||||
llm_cfg = LLMConfig(
|
||||
provider="openai/gpt-4o-mini",
|
||||
api_token="env:OPENAI_API_KEY",
|
||||
)
|
||||
|
||||
# pull one sample page as HTML context
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
html = (await crawler.arun(url)).fit_html
|
||||
|
||||
pattern = RegexExtractionStrategy.generate_pattern(
|
||||
label="price",
|
||||
html=html,
|
||||
query="Prices in Malaysian Ringgit (e.g. RM1,299.00 or RM200)",
|
||||
llm_config=llm_cfg,
|
||||
)
|
||||
|
||||
json.dump(pattern, pattern_file.open("w", encoding="utf-8"), indent=2)
|
||||
print("Saved pattern:", pattern_file)
|
||||
|
||||
# ── 3-B. extraction pass – zero LLM calls
|
||||
strategy = RegexExtractionStrategy(custom=pattern)
|
||||
config = CrawlerRunConfig(extraction_strategy=strategy, delay_before_return_html=3)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result: CrawlResult = await crawler.arun(url, config=config)
|
||||
|
||||
if result.success:
|
||||
data = json.loads(result.extracted_content)
|
||||
for d in data[:15]:
|
||||
print(f" {d['value']}")
|
||||
print(f"... total matches: {len(data)}")
|
||||
else:
|
||||
print(" !!! crawl failed")
|
||||
|
||||
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
# Entrypoint
|
||||
# ────────────────────────────────────────────────────────────────────────────
|
||||
async def main() -> None:
|
||||
# await demo_regex_default()
|
||||
# await demo_regex_custom()
|
||||
await demo_regex_generate_pattern()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
38
docs/examples/session_id_example.py
Normal file
38
docs/examples/session_id_example.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import asyncio
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
DefaultMarkdownGenerator,
|
||||
PruningContentFilter,
|
||||
CrawlResult
|
||||
)
|
||||
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(
|
||||
headless=False,
|
||||
verbose=True,
|
||||
)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler_config = CrawlerRunConfig(
|
||||
session_id= "hello_world", # This help us to use the same page
|
||||
)
|
||||
result : CrawlResult = await crawler.arun(
|
||||
url="https://www.helloworld.org", config=crawler_config
|
||||
)
|
||||
# Add a breakpoint here, then you will the page is open and browser is not closed
|
||||
print(result.markdown.raw_markdown[:500])
|
||||
|
||||
new_config = crawler_config.clone(js_code=["(() => ({'data':'hello'}))()"], js_only=True)
|
||||
result : CrawlResult = await crawler.arun( # This time there is no fetch and this only executes JS in the same opened page
|
||||
url="https://www.helloworld.org", config= new_config
|
||||
)
|
||||
print(result.js_execution_result) # You should see {'data':'hello'} in the console
|
||||
|
||||
# Get direct access to Playwright paege object. This works only if you use the same session_id and pass same config
|
||||
page, context = crawler.crawler_strategy.get_page(new_config)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -13,11 +13,11 @@ from crawl4ai.deep_crawling import (
|
||||
)
|
||||
from crawl4ai.deep_crawling.scorers import KeywordRelevanceScorer
|
||||
from crawl4ai.async_crawler_strategy import AsyncHTTPCrawlerStrategy
|
||||
from crawl4ai.configs import ProxyConfig
|
||||
from crawl4ai import ProxyConfig
|
||||
from crawl4ai import RoundRobinProxyStrategy
|
||||
from crawl4ai.content_filter_strategy import LLMContentFilter
|
||||
from crawl4ai import DefaultMarkdownGenerator
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai import LLMConfig
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai.processors.pdf import PDFCrawlerStrategy, PDFContentScrapingStrategy
|
||||
from pprint import pprint
|
||||
@@ -284,9 +284,9 @@ async def llm_content_filter():
|
||||
PART 5: LLM Content Filter
|
||||
|
||||
This function demonstrates:
|
||||
- Configuring LLM providers via LlmConfig
|
||||
- Configuring LLM providers via LLMConfig
|
||||
- Using LLM to generate focused markdown
|
||||
- LlmConfig for configuration
|
||||
- LLMConfig for configuration
|
||||
|
||||
Note: Requires a valid API key for the chosen LLM provider
|
||||
"""
|
||||
@@ -296,7 +296,7 @@ async def llm_content_filter():
|
||||
|
||||
# Create LLM configuration
|
||||
# Replace with your actual API key or set as environment variable
|
||||
llm_config = LlmConfig(
|
||||
llm_config = LLMConfig(
|
||||
provider="gemini/gemini-1.5-pro",
|
||||
api_token="env:GEMINI_API_KEY" # Will read from GEMINI_API_KEY environment variable
|
||||
)
|
||||
@@ -309,7 +309,7 @@ async def llm_content_filter():
|
||||
# Create markdown generator with LLM filter
|
||||
markdown_generator = DefaultMarkdownGenerator(
|
||||
content_filter=LLMContentFilter(
|
||||
llmConfig=llm_config,
|
||||
llm_config=llm_config,
|
||||
instruction="Extract key concepts and summaries"
|
||||
)
|
||||
)
|
||||
@@ -381,7 +381,7 @@ async def llm_schema_generation():
|
||||
PART 7: LLM Schema Generation
|
||||
|
||||
This function demonstrates:
|
||||
- Configuring LLM providers via LlmConfig
|
||||
- Configuring LLM providers via LLMConfig
|
||||
- Using LLM to generate extraction schemas
|
||||
- JsonCssExtractionStrategy
|
||||
|
||||
@@ -406,9 +406,9 @@ async def llm_schema_generation():
|
||||
<div class="rating">4.7/5</div>
|
||||
</div>
|
||||
"""
|
||||
print("\n📊 Setting up LlmConfig...")
|
||||
print("\n📊 Setting up LLMConfig...")
|
||||
# Create LLM configuration
|
||||
llm_config = LlmConfig(
|
||||
llm_config = LLMConfig(
|
||||
provider="gemini/gemini-1.5-pro",
|
||||
api_token="env:GEMINI_API_KEY"
|
||||
)
|
||||
@@ -416,7 +416,7 @@ async def llm_schema_generation():
|
||||
print(" This would use the LLM to analyze HTML and create an extraction schema")
|
||||
schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=sample_html,
|
||||
llmConfig = llm_config,
|
||||
llm_config = llm_config,
|
||||
query="Extract product name and price"
|
||||
)
|
||||
print("\n✅ Generated Schema:")
|
||||
|
||||
70
docs/examples/use_geo_location.py
Normal file
70
docs/examples/use_geo_location.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# use_geo_location.py
|
||||
"""
|
||||
Example: override locale, timezone, and geolocation using Crawl4ai patterns.
|
||||
|
||||
This demo uses `AsyncWebCrawler.arun()` to fetch a page with
|
||||
browser context primed for specific locale, timezone, and GPS,
|
||||
and saves a screenshot for visual verification.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
CrawlerRunConfig,
|
||||
BrowserConfig,
|
||||
GeolocationConfig,
|
||||
CrawlResult,
|
||||
)
|
||||
|
||||
async def demo_geo_override():
|
||||
"""Demo: Crawl a geolocation-test page with overrides and screenshot."""
|
||||
print("\n=== Geo-Override Crawl ===")
|
||||
|
||||
# 1) Browser setup: use Playwright-managed contexts
|
||||
browser_cfg = BrowserConfig(
|
||||
headless=False,
|
||||
viewport_width=1280,
|
||||
viewport_height=720,
|
||||
use_managed_browser=False,
|
||||
)
|
||||
|
||||
# 2) Run config: include locale, timezone_id, geolocation, and screenshot
|
||||
run_cfg = CrawlerRunConfig(
|
||||
url="https://browserleaks.com/geo", # test page that shows your location
|
||||
locale="en-US", # Accept-Language & UI locale
|
||||
timezone_id="America/Los_Angeles", # JS Date()/Intl timezone
|
||||
geolocation=GeolocationConfig( # override GPS coords
|
||||
latitude=34.0522,
|
||||
longitude=-118.2437,
|
||||
accuracy=10.0,
|
||||
),
|
||||
screenshot=True, # capture screenshot after load
|
||||
session_id="geo_test", # reuse context if rerunning
|
||||
delay_before_return_html=5
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_cfg) as crawler:
|
||||
# 3) Run crawl (returns list even for single URL)
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
url=run_cfg.url,
|
||||
config=run_cfg,
|
||||
)
|
||||
result = results[0]
|
||||
|
||||
# 4) Save screenshot and report path
|
||||
if result.screenshot:
|
||||
__current_dir = Path(__file__).parent
|
||||
out_dir = __current_dir / "tmp"
|
||||
out_dir.mkdir(exist_ok=True)
|
||||
shot_path = out_dir / "geo_test.png"
|
||||
with open(shot_path, "wb") as f:
|
||||
f.write(base64.b64decode(result.screenshot))
|
||||
print(f"Saved screenshot to {shot_path}")
|
||||
else:
|
||||
print("No screenshot captured, check configuration.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(demo_geo_override())
|
||||
@@ -263,7 +263,102 @@ See the full example in `docs/examples/identity_based_browsing.py` for a complet
|
||||
|
||||
---
|
||||
|
||||
## 7. Summary
|
||||
## 7. Locale, Timezone, and Geolocation Control
|
||||
|
||||
In addition to using persistent profiles, Crawl4AI supports customizing your browser's locale, timezone, and geolocation settings. These features enhance your identity-based browsing experience by allowing you to control how websites perceive your location and regional settings.
|
||||
|
||||
### Setting Locale and Timezone
|
||||
|
||||
You can set the browser's locale and timezone through `CrawlerRunConfig`:
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
config=CrawlerRunConfig(
|
||||
# Set browser locale (language and region formatting)
|
||||
locale="fr-FR", # French (France)
|
||||
|
||||
# Set browser timezone
|
||||
timezone_id="Europe/Paris",
|
||||
|
||||
# Other normal options...
|
||||
magic=True,
|
||||
page_timeout=60000
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
- `locale` affects language preferences, date formats, number formats, etc.
|
||||
- `timezone_id` affects JavaScript's Date object and time-related functionality
|
||||
- These settings are applied when creating the browser context and maintained throughout the session
|
||||
|
||||
### Configuring Geolocation
|
||||
|
||||
Control the GPS coordinates reported by the browser's geolocation API:
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, GeolocationConfig
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://maps.google.com", # Or any location-aware site
|
||||
config=CrawlerRunConfig(
|
||||
# Configure precise GPS coordinates
|
||||
geolocation=GeolocationConfig(
|
||||
latitude=48.8566, # Paris coordinates
|
||||
longitude=2.3522,
|
||||
accuracy=100 # Accuracy in meters (optional)
|
||||
),
|
||||
|
||||
# This site will see you as being in Paris
|
||||
page_timeout=60000
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**Important notes:**
|
||||
- When `geolocation` is specified, the browser is automatically granted permission to access location
|
||||
- Websites using the Geolocation API will receive the exact coordinates you specify
|
||||
- This affects map services, store locators, delivery services, etc.
|
||||
- Combined with the appropriate `locale` and `timezone_id`, you can create a fully consistent location profile
|
||||
|
||||
### Combining with Managed Browsers
|
||||
|
||||
These settings work perfectly with managed browsers for a complete identity solution:
|
||||
|
||||
```python
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler, BrowserConfig, CrawlerRunConfig,
|
||||
GeolocationConfig
|
||||
)
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
use_managed_browser=True,
|
||||
user_data_dir="/path/to/my-profile",
|
||||
browser_type="chromium"
|
||||
)
|
||||
|
||||
crawl_config = CrawlerRunConfig(
|
||||
# Location settings
|
||||
locale="es-MX", # Spanish (Mexico)
|
||||
timezone_id="America/Mexico_City",
|
||||
geolocation=GeolocationConfig(
|
||||
latitude=19.4326, # Mexico City
|
||||
longitude=-99.1332
|
||||
)
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com", config=crawl_config)
|
||||
```
|
||||
|
||||
Combining persistent profiles with precise geolocation and region settings gives you complete control over your digital identity.
|
||||
|
||||
## 8. Summary
|
||||
|
||||
- **Create** your user-data directory either:
|
||||
- By launching Chrome/Chromium externally with `--user-data-dir=/some/path`
|
||||
@@ -271,6 +366,7 @@ See the full example in `docs/examples/identity_based_browsing.py` for a complet
|
||||
- Or through the interactive interface with `profiler.interactive_manager()`
|
||||
- **Log in** or configure sites as needed, then close the browser
|
||||
- **Reference** that folder in `BrowserConfig(user_data_dir="...")` + `use_managed_browser=True`
|
||||
- **Customize** identity aspects with `locale`, `timezone_id`, and `geolocation`
|
||||
- **List and reuse** profiles with `BrowserProfiler.list_profiles()`
|
||||
- **Manage** your profiles with the dedicated `BrowserProfiler` class
|
||||
- Enjoy **persistent** sessions that reflect your real identity
|
||||
|
||||
205
docs/md_v2/advanced/network-console-capture.md
Normal file
205
docs/md_v2/advanced/network-console-capture.md
Normal file
@@ -0,0 +1,205 @@
|
||||
# Network Requests & Console Message Capturing
|
||||
|
||||
Crawl4AI can capture all network requests and browser console messages during a crawl, which is invaluable for debugging, security analysis, or understanding page behavior.
|
||||
|
||||
## Configuration
|
||||
|
||||
To enable network and console capturing, use these configuration options:
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
|
||||
|
||||
# Enable both network request capture and console message capture
|
||||
config = CrawlerRunConfig(
|
||||
capture_network_requests=True, # Capture all network requests and responses
|
||||
capture_console_messages=True # Capture all browser console output
|
||||
)
|
||||
```
|
||||
|
||||
## Example Usage
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
import json
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
|
||||
|
||||
async def main():
|
||||
# Enable both network request capture and console message capture
|
||||
config = CrawlerRunConfig(
|
||||
capture_network_requests=True,
|
||||
capture_console_messages=True
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
config=config
|
||||
)
|
||||
|
||||
if result.success:
|
||||
# Analyze network requests
|
||||
if result.network_requests:
|
||||
print(f"Captured {len(result.network_requests)} network events")
|
||||
|
||||
# Count request types
|
||||
request_count = len([r for r in result.network_requests if r.get("event_type") == "request"])
|
||||
response_count = len([r for r in result.network_requests if r.get("event_type") == "response"])
|
||||
failed_count = len([r for r in result.network_requests if r.get("event_type") == "request_failed"])
|
||||
|
||||
print(f"Requests: {request_count}, Responses: {response_count}, Failed: {failed_count}")
|
||||
|
||||
# Find API calls
|
||||
api_calls = [r for r in result.network_requests
|
||||
if r.get("event_type") == "request" and "api" in r.get("url", "")]
|
||||
if api_calls:
|
||||
print(f"Detected {len(api_calls)} API calls:")
|
||||
for call in api_calls[:3]: # Show first 3
|
||||
print(f" - {call.get('method')} {call.get('url')}")
|
||||
|
||||
# Analyze console messages
|
||||
if result.console_messages:
|
||||
print(f"Captured {len(result.console_messages)} console messages")
|
||||
|
||||
# Group by type
|
||||
message_types = {}
|
||||
for msg in result.console_messages:
|
||||
msg_type = msg.get("type", "unknown")
|
||||
message_types[msg_type] = message_types.get(msg_type, 0) + 1
|
||||
|
||||
print("Message types:", message_types)
|
||||
|
||||
# Show errors (often the most important)
|
||||
errors = [msg for msg in result.console_messages if msg.get("type") == "error"]
|
||||
if errors:
|
||||
print(f"Found {len(errors)} console errors:")
|
||||
for err in errors[:2]: # Show first 2
|
||||
print(f" - {err.get('text', '')[:100]}")
|
||||
|
||||
# Export all captured data to a file for detailed analysis
|
||||
with open("network_capture.json", "w") as f:
|
||||
json.dump({
|
||||
"url": result.url,
|
||||
"network_requests": result.network_requests or [],
|
||||
"console_messages": result.console_messages or []
|
||||
}, f, indent=2)
|
||||
|
||||
print("Exported detailed capture data to network_capture.json")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## Captured Data Structure
|
||||
|
||||
### Network Requests
|
||||
|
||||
The `result.network_requests` contains a list of dictionaries, each representing a network event with these common fields:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `event_type` | Type of event: `"request"`, `"response"`, or `"request_failed"` |
|
||||
| `url` | The URL of the request |
|
||||
| `timestamp` | Unix timestamp when the event was captured |
|
||||
|
||||
#### Request Event Fields
|
||||
|
||||
```json
|
||||
{
|
||||
"event_type": "request",
|
||||
"url": "https://example.com/api/data.json",
|
||||
"method": "GET",
|
||||
"headers": {"User-Agent": "...", "Accept": "..."},
|
||||
"post_data": "key=value&otherkey=value",
|
||||
"resource_type": "fetch",
|
||||
"is_navigation_request": false,
|
||||
"timestamp": 1633456789.123
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Event Fields
|
||||
|
||||
```json
|
||||
{
|
||||
"event_type": "response",
|
||||
"url": "https://example.com/api/data.json",
|
||||
"status": 200,
|
||||
"status_text": "OK",
|
||||
"headers": {"Content-Type": "application/json", "Cache-Control": "..."},
|
||||
"from_service_worker": false,
|
||||
"request_timing": {"requestTime": 1234.56, "receiveHeadersEnd": 1234.78},
|
||||
"timestamp": 1633456789.456
|
||||
}
|
||||
```
|
||||
|
||||
#### Failed Request Event Fields
|
||||
|
||||
```json
|
||||
{
|
||||
"event_type": "request_failed",
|
||||
"url": "https://example.com/missing.png",
|
||||
"method": "GET",
|
||||
"resource_type": "image",
|
||||
"failure_text": "net::ERR_ABORTED 404",
|
||||
"timestamp": 1633456789.789
|
||||
}
|
||||
```
|
||||
|
||||
### Console Messages
|
||||
|
||||
The `result.console_messages` contains a list of dictionaries, each representing a console message with these common fields:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `type` | Message type: `"log"`, `"error"`, `"warning"`, `"info"`, etc. |
|
||||
| `text` | The message text |
|
||||
| `timestamp` | Unix timestamp when the message was captured |
|
||||
|
||||
#### Console Message Example
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "error",
|
||||
"text": "Uncaught TypeError: Cannot read property 'length' of undefined",
|
||||
"location": "https://example.com/script.js:123:45",
|
||||
"timestamp": 1633456790.123
|
||||
}
|
||||
```
|
||||
|
||||
## Key Benefits
|
||||
|
||||
- **Full Request Visibility**: Capture all network activity including:
|
||||
- Requests (URLs, methods, headers, post data)
|
||||
- Responses (status codes, headers, timing)
|
||||
- Failed requests (with error messages)
|
||||
|
||||
- **Console Message Access**: View all JavaScript console output:
|
||||
- Log messages
|
||||
- Warnings
|
||||
- Errors with stack traces
|
||||
- Developer debugging information
|
||||
|
||||
- **Debugging Power**: Identify issues such as:
|
||||
- Failed API calls or resource loading
|
||||
- JavaScript errors affecting page functionality
|
||||
- CORS or other security issues
|
||||
- Hidden API endpoints and data flows
|
||||
|
||||
- **Security Analysis**: Detect:
|
||||
- Unexpected third-party requests
|
||||
- Data leakage in request payloads
|
||||
- Suspicious script behavior
|
||||
|
||||
- **Performance Insights**: Analyze:
|
||||
- Request timing data
|
||||
- Resource loading patterns
|
||||
- Potential bottlenecks
|
||||
|
||||
## Use Cases
|
||||
|
||||
1. **API Discovery**: Identify hidden endpoints and data flows in single-page applications
|
||||
2. **Debugging**: Track down JavaScript errors affecting page functionality
|
||||
3. **Security Auditing**: Detect unwanted third-party requests or data leakage
|
||||
4. **Performance Analysis**: Identify slow-loading resources
|
||||
5. **Ad/Tracker Analysis**: Detect and catalog advertising or tracking calls
|
||||
|
||||
This capability is especially valuable for complex sites with heavy JavaScript, single-page applications, or when you need to understand the exact communication happening between a browser and servers.
|
||||
@@ -10,11 +10,13 @@ class CrawlResult(BaseModel):
|
||||
html: str
|
||||
success: bool
|
||||
cleaned_html: Optional[str] = None
|
||||
fit_html: Optional[str] = None # Preprocessed HTML optimized for extraction
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf : Optional[bytes] = None
|
||||
mhtml: Optional[str] = None
|
||||
markdown: Optional[Union[str, MarkdownGenerationResult]] = None
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
@@ -49,7 +51,7 @@ if not result.success:
|
||||
```
|
||||
|
||||
### 1.3 **`status_code`** *(Optional[int])*
|
||||
**What**: The page’s HTTP status code (e.g., 200, 404).
|
||||
**What**: The page's HTTP status code (e.g., 200, 404).
|
||||
**Usage**:
|
||||
```python
|
||||
if result.status_code == 404:
|
||||
@@ -81,7 +83,7 @@ if result.response_headers:
|
||||
```
|
||||
|
||||
### 1.7 **`ssl_certificate`** *(Optional[SSLCertificate])*
|
||||
**What**: If `fetch_ssl_certificate=True` in your CrawlerRunConfig, **`result.ssl_certificate`** contains a [**`SSLCertificate`**](../advanced/ssl-certificate.md) object describing the site’s certificate. You can export the cert in multiple formats (PEM/DER/JSON) or access its properties like `issuer`,
|
||||
**What**: If `fetch_ssl_certificate=True` in your CrawlerRunConfig, **`result.ssl_certificate`** contains a [**`SSLCertificate`**](../advanced/ssl-certificate.md) object describing the site's certificate. You can export the cert in multiple formats (PEM/DER/JSON) or access its properties like `issuer`,
|
||||
`subject`, `valid_from`, `valid_until`, etc.
|
||||
**Usage**:
|
||||
```python
|
||||
@@ -108,14 +110,6 @@ print(len(result.html))
|
||||
print(result.cleaned_html[:500]) # Show a snippet
|
||||
```
|
||||
|
||||
### 2.3 **`fit_html`** *(Optional[str])*
|
||||
**What**: If a **content filter** or heuristic (e.g., Pruning/BM25) modifies the HTML, the “fit” or post-filter version.
|
||||
**When**: This is **only** present if your `markdown_generator` or `content_filter` produces it.
|
||||
**Usage**:
|
||||
```python
|
||||
if result.markdown.fit_html:
|
||||
print("High-value HTML content:", result.markdown.fit_html[:300])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -134,7 +128,7 @@ Crawl4AI can convert HTML→Markdown, optionally including:
|
||||
- **`raw_markdown`** *(str)*: The full HTML→Markdown conversion.
|
||||
- **`markdown_with_citations`** *(str)*: Same markdown, but with link references as academic-style citations.
|
||||
- **`references_markdown`** *(str)*: The reference list or footnotes at the end.
|
||||
- **`fit_markdown`** *(Optional[str])*: If content filtering (Pruning/BM25) was applied, the filtered “fit” text.
|
||||
- **`fit_markdown`** *(Optional[str])*: If content filtering (Pruning/BM25) was applied, the filtered "fit" text.
|
||||
- **`fit_html`** *(Optional[str])*: The HTML that led to `fit_markdown`.
|
||||
|
||||
**Usage**:
|
||||
@@ -156,7 +150,7 @@ print(result.markdown.raw_markdown[:200])
|
||||
print(result.markdown.fit_markdown)
|
||||
print(result.markdown.fit_html)
|
||||
```
|
||||
**Important**: “Fit” content (in `fit_markdown`/`fit_html`) exists in result.markdown, only if you used a **filter** (like **PruningContentFilter** or **BM25ContentFilter**) within a `MarkdownGenerationStrategy`.
|
||||
**Important**: "Fit" content (in `fit_markdown`/`fit_html`) exists in result.markdown, only if you used a **filter** (like **PruningContentFilter** or **BM25ContentFilter**) within a `MarkdownGenerationStrategy`.
|
||||
|
||||
---
|
||||
|
||||
@@ -168,7 +162,7 @@ print(result.markdown.fit_html)
|
||||
|
||||
- `src` *(str)*: Media URL
|
||||
- `alt` or `title` *(str)*: Descriptive text
|
||||
- `score` *(float)*: Relevance score if the crawler’s heuristic found it “important”
|
||||
- `score` *(float)*: Relevance score if the crawler's heuristic found it "important"
|
||||
- `desc` or `description` *(Optional[str])*: Additional context extracted from surrounding text
|
||||
|
||||
**Usage**:
|
||||
@@ -236,7 +230,16 @@ if result.pdf:
|
||||
f.write(result.pdf)
|
||||
```
|
||||
|
||||
### 5.5 **`metadata`** *(Optional[dict])*
|
||||
### 5.5 **`mhtml`** *(Optional[str])*
|
||||
**What**: MHTML snapshot of the page if `capture_mhtml=True` in `CrawlerRunConfig`. MHTML (MIME HTML) format preserves the entire web page with all its resources (CSS, images, scripts, etc.) in a single file.
|
||||
**Usage**:
|
||||
```python
|
||||
if result.mhtml:
|
||||
with open("page.mhtml", "w", encoding="utf-8") as f:
|
||||
f.write(result.mhtml)
|
||||
```
|
||||
|
||||
### 5.6 **`metadata`** *(Optional[dict])*
|
||||
**What**: Page-level metadata if discovered (title, description, OG data, etc.).
|
||||
**Usage**:
|
||||
```python
|
||||
@@ -253,7 +256,7 @@ A `DispatchResult` object providing additional concurrency and resource usage in
|
||||
|
||||
- **`task_id`**: A unique identifier for the parallel task.
|
||||
- **`memory_usage`** (float): The memory (in MB) used at the time of completion.
|
||||
- **`peak_memory`** (float): The peak memory usage (in MB) recorded during the task’s execution.
|
||||
- **`peak_memory`** (float): The peak memory usage (in MB) recorded during the task's execution.
|
||||
- **`start_time`** / **`end_time`** (datetime): Time range for this crawling task.
|
||||
- **`error_message`** (str): Any dispatcher- or concurrency-related error encountered.
|
||||
|
||||
@@ -271,7 +274,69 @@ for result in results:
|
||||
|
||||
---
|
||||
|
||||
## 7. Example: Accessing Everything
|
||||
## 7. Network Requests & Console Messages
|
||||
|
||||
When you enable network and console message capturing in `CrawlerRunConfig` using `capture_network_requests=True` and `capture_console_messages=True`, the `CrawlResult` will include these fields:
|
||||
|
||||
### 7.1 **`network_requests`** *(Optional[List[Dict[str, Any]]])*
|
||||
**What**: A list of dictionaries containing information about all network requests, responses, and failures captured during the crawl.
|
||||
**Structure**:
|
||||
- Each item has an `event_type` field that can be `"request"`, `"response"`, or `"request_failed"`.
|
||||
- Request events include `url`, `method`, `headers`, `post_data`, `resource_type`, and `is_navigation_request`.
|
||||
- Response events include `url`, `status`, `status_text`, `headers`, and `request_timing`.
|
||||
- Failed request events include `url`, `method`, `resource_type`, and `failure_text`.
|
||||
- All events include a `timestamp` field.
|
||||
|
||||
**Usage**:
|
||||
```python
|
||||
if result.network_requests:
|
||||
# Count different types of events
|
||||
requests = [r for r in result.network_requests if r.get("event_type") == "request"]
|
||||
responses = [r for r in result.network_requests if r.get("event_type") == "response"]
|
||||
failures = [r for r in result.network_requests if r.get("event_type") == "request_failed"]
|
||||
|
||||
print(f"Captured {len(requests)} requests, {len(responses)} responses, and {len(failures)} failures")
|
||||
|
||||
# Analyze API calls
|
||||
api_calls = [r for r in requests if "api" in r.get("url", "")]
|
||||
|
||||
# Identify failed resources
|
||||
for failure in failures:
|
||||
print(f"Failed to load: {failure.get('url')} - {failure.get('failure_text')}")
|
||||
```
|
||||
|
||||
### 7.2 **`console_messages`** *(Optional[List[Dict[str, Any]]])*
|
||||
**What**: A list of dictionaries containing all browser console messages captured during the crawl.
|
||||
**Structure**:
|
||||
- Each item has a `type` field indicating the message type (e.g., `"log"`, `"error"`, `"warning"`, etc.).
|
||||
- The `text` field contains the actual message text.
|
||||
- Some messages include `location` information (URL, line, column).
|
||||
- All messages include a `timestamp` field.
|
||||
|
||||
**Usage**:
|
||||
```python
|
||||
if result.console_messages:
|
||||
# Count messages by type
|
||||
message_types = {}
|
||||
for msg in result.console_messages:
|
||||
msg_type = msg.get("type", "unknown")
|
||||
message_types[msg_type] = message_types.get(msg_type, 0) + 1
|
||||
|
||||
print(f"Message type counts: {message_types}")
|
||||
|
||||
# Display errors (which are usually most important)
|
||||
for msg in result.console_messages:
|
||||
if msg.get("type") == "error":
|
||||
print(f"Error: {msg.get('text')}")
|
||||
```
|
||||
|
||||
These fields provide deep visibility into the page's network activity and browser console, which is invaluable for debugging, security analysis, and understanding complex web applications.
|
||||
|
||||
For more details on network and console capturing, see the [Network & Console Capture documentation](../advanced/network-console-capture.md).
|
||||
|
||||
---
|
||||
|
||||
## 8. Example: Accessing Everything
|
||||
|
||||
```python
|
||||
async def handle_result(result: CrawlResult):
|
||||
@@ -286,7 +351,7 @@ async def handle_result(result: CrawlResult):
|
||||
# HTML
|
||||
print("Original HTML size:", len(result.html))
|
||||
print("Cleaned HTML size:", len(result.cleaned_html or ""))
|
||||
|
||||
|
||||
# Markdown output
|
||||
if result.markdown:
|
||||
print("Raw Markdown:", result.markdown.raw_markdown[:300])
|
||||
@@ -304,16 +369,36 @@ async def handle_result(result: CrawlResult):
|
||||
if result.extracted_content:
|
||||
print("Structured data:", result.extracted_content)
|
||||
|
||||
# Screenshot/PDF
|
||||
# Screenshot/PDF/MHTML
|
||||
if result.screenshot:
|
||||
print("Screenshot length:", len(result.screenshot))
|
||||
if result.pdf:
|
||||
print("PDF bytes length:", len(result.pdf))
|
||||
if result.mhtml:
|
||||
print("MHTML length:", len(result.mhtml))
|
||||
|
||||
# Network and console capturing
|
||||
if result.network_requests:
|
||||
print(f"Network requests captured: {len(result.network_requests)}")
|
||||
# Analyze request types
|
||||
req_types = {}
|
||||
for req in result.network_requests:
|
||||
if "resource_type" in req:
|
||||
req_types[req["resource_type"]] = req_types.get(req["resource_type"], 0) + 1
|
||||
print(f"Resource types: {req_types}")
|
||||
|
||||
if result.console_messages:
|
||||
print(f"Console messages captured: {len(result.console_messages)}")
|
||||
# Count by message type
|
||||
msg_types = {}
|
||||
for msg in result.console_messages:
|
||||
msg_types[msg.get("type", "unknown")] = msg_types.get(msg.get("type", "unknown"), 0) + 1
|
||||
print(f"Message types: {msg_types}")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Key Points & Future
|
||||
## 9. Key Points & Future
|
||||
|
||||
1. **Deprecated legacy properties of CrawlResult**
|
||||
- `markdown_v2` - Deprecated in v0.5. Just use `markdown`. It holds the `MarkdownGenerationResult` now!
|
||||
|
||||
@@ -70,8 +70,9 @@ We group them by category.
|
||||
|------------------------------|--------------------------------------|-------------------------------------------------------------------------------------------------|
|
||||
| **`word_count_threshold`** | `int` (default: ~200) | Skips text blocks below X words. Helps ignore trivial sections. |
|
||||
| **`extraction_strategy`** | `ExtractionStrategy` (default: None) | If set, extracts structured data (CSS-based, LLM-based, etc.). |
|
||||
| **`markdown_generator`** | `MarkdownGenerationStrategy` (None) | If you want specialized markdown output (citations, filtering, chunking, etc.). |
|
||||
| **`css_selector`** | `str` (None) | Retains only the part of the page matching this selector. |
|
||||
| **`markdown_generator`** | `MarkdownGenerationStrategy` (None) | If you want specialized markdown output (citations, filtering, chunking, etc.). Can be customized with options such as `content_source` parameter to select the HTML input source ('cleaned_html', 'raw_html', or 'fit_html'). |
|
||||
| **`css_selector`** | `str` (None) | Retains only the part of the page matching this selector. Affects the entire extraction process. |
|
||||
| **`target_elements`** | `List[str]` (None) | List of CSS selectors for elements to focus on for markdown generation and data extraction, while still processing the entire page for links, media, etc. Provides more flexibility than `css_selector`. |
|
||||
| **`excluded_tags`** | `list` (None) | Removes entire tags (e.g. `["script", "style"]`). |
|
||||
| **`excluded_selector`** | `str` (None) | Like `css_selector` but to exclude. E.g. `"#ads, .tracker"`. |
|
||||
| **`only_text`** | `bool` (False) | If `True`, tries to extract text-only content. |
|
||||
@@ -139,6 +140,7 @@ If your page is a single-page app with repeated JS updates, set `js_only=True` i
|
||||
| **`screenshot_wait_for`** | `float or None` | Extra wait time before the screenshot. |
|
||||
| **`screenshot_height_threshold`** | `int` (~20000) | If the page is taller than this, alternate screenshot strategies are used. |
|
||||
| **`pdf`** | `bool` (False) | If `True`, returns a PDF in `result.pdf`. |
|
||||
| **`capture_mhtml`** | `bool` (False) | If `True`, captures an MHTML snapshot of the page in `result.mhtml`. MHTML includes all page resources (CSS, images, etc.) in a single file. |
|
||||
| **`image_description_min_word_threshold`** | `int` (~50) | Minimum words for an image’s alt text or description to be considered valid. |
|
||||
| **`image_score_threshold`** | `int` (~3) | Filter out low-scoring images. The crawler scores images by relevance (size, context, etc.). |
|
||||
| **`exclude_external_images`** | `bool` (False) | Exclude images from other domains. |
|
||||
@@ -230,6 +232,7 @@ async def main():
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## 2.4 Compliance & Ethics
|
||||
|
||||
@@ -245,8 +248,8 @@ run_config = CrawlerRunConfig(
|
||||
)
|
||||
```
|
||||
|
||||
# 3. **LlmConfig** - Setting up LLM providers
|
||||
LlmConfig is useful to pass LLM provider config to strategies and functions that rely on LLMs to do extraction, filtering, schema generation etc. Currently it can be used in the following -
|
||||
# 3. **LLMConfig** - Setting up LLM providers
|
||||
LLMConfig is useful to pass LLM provider config to strategies and functions that rely on LLMs to do extraction, filtering, schema generation etc. Currently it can be used in the following -
|
||||
|
||||
1. LLMExtractionStrategy
|
||||
2. LLMContentFilter
|
||||
@@ -262,7 +265,7 @@ LlmConfig is useful to pass LLM provider config to strategies and functions that
|
||||
|
||||
## 3.2 Example Usage
|
||||
```python
|
||||
llmConfig = LlmConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY"))
|
||||
llm_config = LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY"))
|
||||
```
|
||||
|
||||
## 4. Putting It All Together
|
||||
@@ -270,7 +273,7 @@ llmConfig = LlmConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI
|
||||
- **Use** `BrowserConfig` for **global** browser settings: engine, headless, proxy, user agent.
|
||||
- **Use** `CrawlerRunConfig` for each crawl’s **context**: how to filter content, handle caching, wait for dynamic elements, or run JS.
|
||||
- **Pass** both configs to `AsyncWebCrawler` (the `BrowserConfig`) and then to `arun()` (the `CrawlerRunConfig`).
|
||||
- **Use** `LlmConfig` for LLM provider configurations that can be used across all extraction, filtering, schema generation tasks. Can be used in - `LLMExtractionStrategy`, `LLMContentFilter`, `JsonCssExtractionStrategy.generate_schema` & `JsonXPathExtractionStrategy.generate_schema`
|
||||
- **Use** `LLMConfig` for LLM provider configurations that can be used across all extraction, filtering, schema generation tasks. Can be used in - `LLMExtractionStrategy`, `LLMContentFilter`, `JsonCssExtractionStrategy.generate_schema` & `JsonXPathExtractionStrategy.generate_schema`
|
||||
|
||||
```python
|
||||
# Create a modified copy with the clone() method
|
||||
|
||||
@@ -36,6 +36,45 @@ LLMExtractionStrategy(
|
||||
)
|
||||
```
|
||||
|
||||
### RegexExtractionStrategy
|
||||
|
||||
Used for fast pattern-based extraction of common entities using regular expressions.
|
||||
|
||||
```python
|
||||
RegexExtractionStrategy(
|
||||
# Pattern Configuration
|
||||
pattern: IntFlag = RegexExtractionStrategy.Nothing, # Bit flags of built-in patterns to use
|
||||
custom: Optional[Dict[str, str]] = None, # Custom pattern dictionary {label: regex}
|
||||
|
||||
# Input Format
|
||||
input_format: str = "fit_html", # "html", "markdown", "text" or "fit_html"
|
||||
)
|
||||
|
||||
# Built-in Patterns as Bit Flags
|
||||
RegexExtractionStrategy.Email # Email addresses
|
||||
RegexExtractionStrategy.PhoneIntl # International phone numbers
|
||||
RegexExtractionStrategy.PhoneUS # US-format phone numbers
|
||||
RegexExtractionStrategy.Url # HTTP/HTTPS URLs
|
||||
RegexExtractionStrategy.IPv4 # IPv4 addresses
|
||||
RegexExtractionStrategy.IPv6 # IPv6 addresses
|
||||
RegexExtractionStrategy.Uuid # UUIDs
|
||||
RegexExtractionStrategy.Currency # Currency values (USD, EUR, etc)
|
||||
RegexExtractionStrategy.Percentage # Percentage values
|
||||
RegexExtractionStrategy.Number # Numeric values
|
||||
RegexExtractionStrategy.DateIso # ISO format dates
|
||||
RegexExtractionStrategy.DateUS # US format dates
|
||||
RegexExtractionStrategy.Time24h # 24-hour format times
|
||||
RegexExtractionStrategy.PostalUS # US postal codes
|
||||
RegexExtractionStrategy.PostalUK # UK postal codes
|
||||
RegexExtractionStrategy.HexColor # HTML hex color codes
|
||||
RegexExtractionStrategy.TwitterHandle # Twitter handles
|
||||
RegexExtractionStrategy.Hashtag # Hashtags
|
||||
RegexExtractionStrategy.MacAddr # MAC addresses
|
||||
RegexExtractionStrategy.Iban # International bank account numbers
|
||||
RegexExtractionStrategy.CreditCard # Credit card numbers
|
||||
RegexExtractionStrategy.All # All available patterns
|
||||
```
|
||||
|
||||
### CosineStrategy
|
||||
|
||||
Used for content similarity-based extraction and clustering.
|
||||
@@ -131,7 +170,7 @@ OverlappingWindowChunking(
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai import LLMConfig
|
||||
|
||||
# Define schema
|
||||
class Article(BaseModel):
|
||||
@@ -141,7 +180,7 @@ class Article(BaseModel):
|
||||
|
||||
# Create strategy
|
||||
strategy = LLMExtractionStrategy(
|
||||
llmConfig = LlmConfig(provider="ollama/llama2"),
|
||||
llm_config = LLMConfig(provider="ollama/llama2"),
|
||||
schema=Article.schema(),
|
||||
instruction="Extract article details"
|
||||
)
|
||||
@@ -156,6 +195,55 @@ result = await crawler.arun(
|
||||
data = json.loads(result.extracted_content)
|
||||
```
|
||||
|
||||
### Regex Extraction
|
||||
|
||||
```python
|
||||
import json
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, RegexExtractionStrategy
|
||||
|
||||
# Method 1: Use built-in patterns
|
||||
strategy = RegexExtractionStrategy(
|
||||
pattern = RegexExtractionStrategy.Email | RegexExtractionStrategy.Url
|
||||
)
|
||||
|
||||
# Method 2: Use custom patterns
|
||||
price_pattern = {"usd_price": r"\$\s?\d{1,3}(?:,\d{3})*(?:\.\d{2})?"}
|
||||
strategy = RegexExtractionStrategy(custom=price_pattern)
|
||||
|
||||
# Method 3: Generate pattern with LLM assistance (one-time)
|
||||
from crawl4ai import LLMConfig
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Get sample HTML first
|
||||
sample_result = await crawler.arun("https://example.com/products")
|
||||
html = sample_result.fit_html
|
||||
|
||||
# Generate regex pattern once
|
||||
pattern = RegexExtractionStrategy.generate_pattern(
|
||||
label="price",
|
||||
html=html,
|
||||
query="Product prices in USD format",
|
||||
llm_config=LLMConfig(provider="openai/gpt-4o-mini")
|
||||
)
|
||||
|
||||
# Save pattern for reuse
|
||||
import json
|
||||
with open("price_pattern.json", "w") as f:
|
||||
json.dump(pattern, f)
|
||||
|
||||
# Use pattern for extraction (no LLM calls)
|
||||
strategy = RegexExtractionStrategy(custom=pattern)
|
||||
result = await crawler.arun(
|
||||
url="https://example.com/products",
|
||||
config=CrawlerRunConfig(extraction_strategy=strategy)
|
||||
)
|
||||
|
||||
# Process results
|
||||
data = json.loads(result.extracted_content)
|
||||
for item in data:
|
||||
print(f"{item['label']}: {item['value']}")
|
||||
```
|
||||
|
||||
### CSS Extraction
|
||||
|
||||
```python
|
||||
@@ -198,7 +286,7 @@ result = await crawler.arun(
|
||||
|
||||
```python
|
||||
from crawl4ai.chunking_strategy import OverlappingWindowChunking
|
||||
from crawl4ai.async_configs import LlmConfig
|
||||
from crawl4ai import LLMConfig
|
||||
|
||||
# Create chunking strategy
|
||||
chunker = OverlappingWindowChunking(
|
||||
@@ -208,7 +296,7 @@ chunker = OverlappingWindowChunking(
|
||||
|
||||
# Use with extraction strategy
|
||||
strategy = LLMExtractionStrategy(
|
||||
llmConfig = LlmConfig(provider="ollama/llama2"),
|
||||
llm_config = LLMConfig(provider="ollama/llama2"),
|
||||
chunking_strategy=chunker
|
||||
)
|
||||
|
||||
@@ -220,12 +308,28 @@ result = await crawler.arun(
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Choose the Right Strategy**
|
||||
- Use `LLMExtractionStrategy` for complex, unstructured content
|
||||
- Use `JsonCssExtractionStrategy` for well-structured HTML
|
||||
1. **Choose the Right Strategy**
|
||||
- Use `RegexExtractionStrategy` for common data types like emails, phones, URLs, dates
|
||||
- Use `JsonCssExtractionStrategy` for well-structured HTML with consistent patterns
|
||||
- Use `LLMExtractionStrategy` for complex, unstructured content requiring reasoning
|
||||
- Use `CosineStrategy` for content similarity and clustering
|
||||
|
||||
2. **Optimize Chunking**
|
||||
2. **Strategy Selection Guide**
|
||||
```
|
||||
Is the target data a common type (email/phone/date/URL)?
|
||||
→ RegexExtractionStrategy
|
||||
|
||||
Does the page have consistent HTML structure?
|
||||
→ JsonCssExtractionStrategy or JsonXPathExtractionStrategy
|
||||
|
||||
Is the data semantically complex or unstructured?
|
||||
→ LLMExtractionStrategy
|
||||
|
||||
Need to find content similar to a specific topic?
|
||||
→ CosineStrategy
|
||||
```
|
||||
|
||||
3. **Optimize Chunking**
|
||||
```python
|
||||
# For long documents
|
||||
strategy = LLMExtractionStrategy(
|
||||
@@ -234,7 +338,26 @@ result = await crawler.arun(
|
||||
)
|
||||
```
|
||||
|
||||
3. **Handle Errors**
|
||||
4. **Combine Strategies for Best Performance**
|
||||
```python
|
||||
# First pass: Extract structure with CSS
|
||||
css_strategy = JsonCssExtractionStrategy(product_schema)
|
||||
css_result = await crawler.arun(url, config=CrawlerRunConfig(extraction_strategy=css_strategy))
|
||||
product_data = json.loads(css_result.extracted_content)
|
||||
|
||||
# Second pass: Extract specific fields with regex
|
||||
descriptions = [product["description"] for product in product_data]
|
||||
regex_strategy = RegexExtractionStrategy(
|
||||
pattern=RegexExtractionStrategy.Email | RegexExtractionStrategy.PhoneUS,
|
||||
custom={"dimension": r"\d+x\d+x\d+ (?:cm|in)"}
|
||||
)
|
||||
|
||||
# Process descriptions with regex
|
||||
for text in descriptions:
|
||||
matches = regex_strategy.extract("", text) # Direct extraction
|
||||
```
|
||||
|
||||
5. **Handle Errors**
|
||||
```python
|
||||
try:
|
||||
result = await crawler.arun(
|
||||
@@ -247,11 +370,31 @@ result = await crawler.arun(
|
||||
print(f"Extraction failed: {e}")
|
||||
```
|
||||
|
||||
4. **Monitor Performance**
|
||||
6. **Monitor Performance**
|
||||
```python
|
||||
strategy = CosineStrategy(
|
||||
verbose=True, # Enable logging
|
||||
word_count_threshold=20, # Filter short content
|
||||
top_k=5 # Limit results
|
||||
)
|
||||
```
|
||||
|
||||
7. **Cache Generated Patterns**
|
||||
```python
|
||||
# For RegexExtractionStrategy pattern generation
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
cache_dir = Path("./pattern_cache")
|
||||
cache_dir.mkdir(exist_ok=True)
|
||||
pattern_file = cache_dir / "product_pattern.json"
|
||||
|
||||
if pattern_file.exists():
|
||||
with open(pattern_file) as f:
|
||||
pattern = json.load(f)
|
||||
else:
|
||||
# Generate once with LLM
|
||||
pattern = RegexExtractionStrategy.generate_pattern(...)
|
||||
with open(pattern_file, "w") as f:
|
||||
json.dump(pattern, f)
|
||||
```
|
||||
444
docs/md_v2/ask_ai/ask-ai.css
Normal file
444
docs/md_v2/ask_ai/ask-ai.css
Normal file
@@ -0,0 +1,444 @@
|
||||
/* ==== File: docs/ask_ai/ask_ai.css ==== */
|
||||
|
||||
/* --- Basic Reset & Font --- */
|
||||
body {
|
||||
/* Attempt to inherit variables from parent window (iframe context) */
|
||||
/* Fallback values if variables are not inherited */
|
||||
--fallback-bg: #070708;
|
||||
--fallback-font: #e8e9ed;
|
||||
--fallback-secondary: #a3abba;
|
||||
--fallback-primary: #50ffff;
|
||||
--fallback-primary-dimmed: #09b5a5;
|
||||
--fallback-border: #1d1d20;
|
||||
--fallback-code-bg: #1e1e1e;
|
||||
--fallback-invert-font: #222225;
|
||||
--font-stack: dm, Monaco, Courier New, monospace, serif;
|
||||
|
||||
font-family: var(--font-stack, "Courier New", monospace); /* Use theme font stack */
|
||||
background-color: var(--background-color, var(--fallback-bg));
|
||||
color: var(--font-color, var(--fallback-font));
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
font-size: 14px; /* Match global font size */
|
||||
line-height: 1.5em; /* Match global line height */
|
||||
height: 100vh; /* Ensure body takes full height */
|
||||
overflow: hidden; /* Prevent body scrollbars, panels handle scroll */
|
||||
display: flex; /* Use flex for the main container */
|
||||
}
|
||||
|
||||
a {
|
||||
color: var(--secondary-color, var(--fallback-secondary));
|
||||
text-decoration: none;
|
||||
transition: color 0.2s;
|
||||
}
|
||||
a:hover {
|
||||
color: var(--primary-color, var(--fallback-primary));
|
||||
}
|
||||
|
||||
/* --- Main Container Layout --- */
|
||||
.ai-assistant-container {
|
||||
display: flex;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background-color: var(--background-color, var(--fallback-bg));
|
||||
}
|
||||
|
||||
/* --- Sidebar Styling --- */
|
||||
.sidebar {
|
||||
flex-shrink: 0; /* Prevent sidebars from shrinking */
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
/* background-color: var(--code-bg-color, var(--fallback-code-bg)); */
|
||||
overflow-y: hidden; /* Header fixed, list scrolls */
|
||||
}
|
||||
|
||||
.left-sidebar {
|
||||
flex-basis: 240px; /* Width of history panel */
|
||||
border-right: 1px solid var(--progress-bar-background, var(--fallback-border));
|
||||
}
|
||||
|
||||
.right-sidebar {
|
||||
flex-basis: 280px; /* Width of citations panel */
|
||||
border-left: 1px solid var(--progress-bar-background, var(--fallback-border));
|
||||
}
|
||||
|
||||
.sidebar header {
|
||||
padding: 0.6em 1em;
|
||||
border-bottom: 1px solid var(--progress-bar-background, var(--fallback-border));
|
||||
flex-shrink: 0;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.sidebar header h3 {
|
||||
margin: 0;
|
||||
font-size: 1.1em;
|
||||
color: var(--font-color, var(--fallback-font));
|
||||
}
|
||||
|
||||
.sidebar ul {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
overflow-y: auto; /* Enable scrolling for the list */
|
||||
flex-grow: 1; /* Allow list to take remaining space */
|
||||
padding: 0.5em 0;
|
||||
}
|
||||
|
||||
.sidebar ul li {
|
||||
padding: 0.3em 1em;
|
||||
}
|
||||
.sidebar ul li.no-citations,
|
||||
.sidebar ul li.no-history {
|
||||
color: var(--secondary-color, var(--fallback-secondary));
|
||||
font-style: italic;
|
||||
font-size: 0.9em;
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.sidebar ul li a {
|
||||
color: var(--secondary-color, var(--fallback-secondary));
|
||||
text-decoration: none;
|
||||
display: block;
|
||||
padding: 0.2em 0.5em;
|
||||
border-radius: 3px;
|
||||
transition: background-color 0.2s, color 0.2s;
|
||||
}
|
||||
|
||||
.sidebar ul li a:hover {
|
||||
color: var(--primary-color, var(--fallback-primary));
|
||||
background-color: rgba(80, 255, 255, 0.08); /* Use primary color with alpha */
|
||||
}
|
||||
/* Style for active history item */
|
||||
#history-list li.active a {
|
||||
color: var(--primary-dimmed-color, var(--fallback-primary-dimmed));
|
||||
font-weight: bold;
|
||||
background-color: rgba(80, 255, 255, 0.12);
|
||||
}
|
||||
|
||||
/* --- Chat Panel Styling --- */
|
||||
#chat-panel {
|
||||
flex-grow: 1; /* Take remaining space */
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
height: 100%;
|
||||
overflow: hidden; /* Prevent overflow, internal elements handle scroll */
|
||||
}
|
||||
|
||||
#chat-messages {
|
||||
flex-grow: 1;
|
||||
overflow-y: auto; /* Scrollable chat history */
|
||||
padding: 1em 1.5em;
|
||||
border-bottom: 1px solid var(--progress-bar-background, var(--fallback-border));
|
||||
}
|
||||
|
||||
.message {
|
||||
margin-bottom: 1em;
|
||||
padding: 0.8em 1.2em;
|
||||
border-radius: 8px;
|
||||
max-width: 90%; /* Slightly wider */
|
||||
line-height: 1.6;
|
||||
/* Apply pre-wrap for better handling of spaces/newlines AND wrapping */
|
||||
white-space: pre-wrap;
|
||||
word-wrap: break-word; /* Ensure long words break */
|
||||
}
|
||||
|
||||
.user-message {
|
||||
background-color: var(--progress-bar-background, var(--fallback-border)); /* User message background */
|
||||
color: var(--font-color, var(--fallback-font));
|
||||
margin-left: auto; /* Align user messages to the right */
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.ai-message {
|
||||
background-color: var(--code-bg-color, var(--fallback-code-bg)); /* AI message background */
|
||||
color: var(--font-color, var(--fallback-font));
|
||||
margin-right: auto; /* Align AI messages to the left */
|
||||
border: 1px solid var(--progress-bar-background, var(--fallback-border));
|
||||
}
|
||||
.ai-message.welcome-message {
|
||||
border: none;
|
||||
background-color: transparent;
|
||||
max-width: 100%;
|
||||
text-align: center;
|
||||
color: var(--secondary-color, var(--fallback-secondary));
|
||||
white-space: normal;
|
||||
}
|
||||
|
||||
/* Styles for code within messages */
|
||||
.ai-message code {
|
||||
background-color: var(--invert-font-color, var(--fallback-invert-font)) !important; /* Use light bg for code */
|
||||
/* color: var(--background-color, var(--fallback-bg)) !important; Dark text */
|
||||
padding: 0.1em 0.4em;
|
||||
border-radius: 4px;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
.ai-message pre {
|
||||
background-color: var(--invert-font-color, var(--fallback-invert-font)) !important;
|
||||
color: var(--background-color, var(--fallback-bg)) !important;
|
||||
padding: 1em;
|
||||
border-radius: 5px;
|
||||
overflow-x: auto;
|
||||
margin: 0.8em 0;
|
||||
white-space: pre;
|
||||
}
|
||||
.ai-message pre code {
|
||||
background-color: transparent !important;
|
||||
padding: 0;
|
||||
font-size: inherit;
|
||||
}
|
||||
|
||||
/* Override white-space for specific elements generated by Markdown */
|
||||
.ai-message p,
|
||||
.ai-message ul,
|
||||
.ai-message ol,
|
||||
.ai-message blockquote {
|
||||
white-space: normal; /* Allow standard wrapping for block elements */
|
||||
}
|
||||
|
||||
/* --- Markdown Element Styling within Messages --- */
|
||||
.message p {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
.message p:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
.message ul,
|
||||
.message ol {
|
||||
margin: 0.5em 0 0.5em 1.5em;
|
||||
padding: 0;
|
||||
}
|
||||
.message li {
|
||||
margin-bottom: 0.2em;
|
||||
}
|
||||
|
||||
/* Code block styling (adjusts previous rules slightly) */
|
||||
.message code {
|
||||
/* Inline code */
|
||||
background-color: var(--invert-font-color, var(--fallback-invert-font)) !important;
|
||||
color: var(--font-color);
|
||||
padding: 0.1em 0.4em;
|
||||
border-radius: 4px;
|
||||
font-size: 0.9em;
|
||||
/* Ensure inline code breaks nicely */
|
||||
word-break: break-all;
|
||||
white-space: normal; /* Allow inline code to wrap if needed */
|
||||
}
|
||||
.message pre {
|
||||
/* Code block container */
|
||||
background-color: var(--invert-font-color, var(--fallback-invert-font)) !important;
|
||||
color: var(--background-color, var(--fallback-bg)) !important;
|
||||
padding: 1em;
|
||||
border-radius: 5px;
|
||||
overflow-x: auto;
|
||||
margin: 0.8em 0;
|
||||
font-size: 0.9em; /* Slightly smaller code blocks */
|
||||
}
|
||||
.message pre code {
|
||||
/* Code within code block */
|
||||
background-color: transparent !important;
|
||||
padding: 0;
|
||||
font-size: inherit;
|
||||
word-break: normal; /* Don't break words in code blocks */
|
||||
white-space: pre; /* Preserve whitespace strictly in code blocks */
|
||||
}
|
||||
|
||||
/* Thinking indicator */
|
||||
.message-thinking {
|
||||
display: inline-block;
|
||||
width: 5px;
|
||||
height: 5px;
|
||||
background-color: var(--primary-color, var(--fallback-primary));
|
||||
border-radius: 50%;
|
||||
margin-left: 8px;
|
||||
vertical-align: middle;
|
||||
animation: thinking 1s infinite ease-in-out;
|
||||
}
|
||||
@keyframes thinking {
|
||||
0%,
|
||||
100% {
|
||||
opacity: 0.5;
|
||||
transform: scale(0.8);
|
||||
}
|
||||
50% {
|
||||
opacity: 1;
|
||||
transform: scale(1.2);
|
||||
}
|
||||
}
|
||||
|
||||
/* --- Thinking Indicator (Blinking Cursor Style) --- */
|
||||
.thinking-indicator-cursor {
|
||||
display: inline-block;
|
||||
width: 10px; /* Width of the cursor */
|
||||
height: 1.1em; /* Match line height */
|
||||
background-color: var(--primary-color, var(--fallback-primary));
|
||||
margin-left: 5px;
|
||||
vertical-align: text-bottom; /* Align with text baseline */
|
||||
animation: blink-cursor 1s step-end infinite;
|
||||
}
|
||||
|
||||
@keyframes blink-cursor {
|
||||
from,
|
||||
to {
|
||||
background-color: transparent;
|
||||
}
|
||||
50% {
|
||||
background-color: var(--primary-color, var(--fallback-primary));
|
||||
}
|
||||
}
|
||||
|
||||
#chat-input-area {
|
||||
flex-shrink: 0; /* Prevent input area from shrinking */
|
||||
padding: 1em 1.5em;
|
||||
display: flex;
|
||||
align-items: flex-end; /* Align items to bottom */
|
||||
gap: 10px;
|
||||
background-color: var(--code-bg-color, var(--fallback-code-bg)); /* Match sidebars */
|
||||
}
|
||||
|
||||
#chat-input-area textarea {
|
||||
flex-grow: 1;
|
||||
padding: 0.8em 1em;
|
||||
border: 1px solid var(--progress-bar-background, var(--fallback-border));
|
||||
background-color: var(--background-color, var(--fallback-bg));
|
||||
color: var(--font-color, var(--fallback-font));
|
||||
border-radius: 5px;
|
||||
resize: none; /* Disable manual resize */
|
||||
font-family: inherit;
|
||||
font-size: 1em;
|
||||
line-height: 1.4;
|
||||
max-height: 150px; /* Limit excessive height */
|
||||
overflow-y: auto;
|
||||
/* rows: 2; */
|
||||
}
|
||||
|
||||
#chat-input-area button {
|
||||
/* Basic button styling - maybe inherit from main theme? */
|
||||
padding: 0.6em 1.2em;
|
||||
border: 1px solid var(--primary-dimmed-color, var(--fallback-primary-dimmed));
|
||||
background-color: var(--primary-dimmed-color, var(--fallback-primary-dimmed));
|
||||
color: var(--background-color, var(--fallback-bg));
|
||||
border-radius: 5px;
|
||||
cursor: pointer;
|
||||
font-size: 0.9em;
|
||||
transition: background-color 0.2s, border-color 0.2s;
|
||||
height: min-content; /* Align with bottom of textarea */
|
||||
}
|
||||
|
||||
#chat-input-area button:hover {
|
||||
background-color: var(--primary-color, var(--fallback-primary));
|
||||
border-color: var(--primary-color, var(--fallback-primary));
|
||||
}
|
||||
#chat-input-area button:disabled {
|
||||
opacity: 0.6;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
.loading-indicator {
|
||||
font-size: 0.9em;
|
||||
color: var(--secondary-color, var(--fallback-secondary));
|
||||
margin-right: 10px;
|
||||
align-self: center;
|
||||
}
|
||||
|
||||
/* --- Buttons --- */
|
||||
/* Inherit some button styles if possible */
|
||||
.btn.btn-sm {
|
||||
color: var(--font-color, var(--fallback-font));
|
||||
padding: 0.2em 0.5em;
|
||||
font-size: 0.8em;
|
||||
border: 1px solid var(--secondary-color, var(--fallback-secondary));
|
||||
background: none;
|
||||
border-radius: 3px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.btn.btn-sm:hover {
|
||||
border-color: var(--font-color, var(--fallback-font));
|
||||
background-color: var(--progress-bar-background, var(--fallback-border));
|
||||
}
|
||||
|
||||
/* --- Basic Responsiveness --- */
|
||||
@media screen and (max-width: 900px) {
|
||||
.left-sidebar {
|
||||
flex-basis: 200px; /* Shrink history */
|
||||
}
|
||||
.right-sidebar {
|
||||
flex-basis: 240px; /* Shrink citations */
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 768px) {
|
||||
/* Stack layout on mobile? Or hide sidebars? Hiding for now */
|
||||
.sidebar {
|
||||
display: none; /* Hide sidebars on small screens */
|
||||
}
|
||||
/* Could add toggle buttons later */
|
||||
}
|
||||
|
||||
|
||||
/* ==== File: docs/ask_ai/ask-ai.css (Updates V4 - Delete Button) ==== */
|
||||
|
||||
|
||||
.sidebar ul li {
|
||||
/* Use flexbox to align link and delete button */
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 0; /* Remove padding from li, add to link/button */
|
||||
margin: 0.1em 0; /* Small vertical margin */
|
||||
}
|
||||
|
||||
.sidebar ul li a {
|
||||
/* Link takes most space */
|
||||
flex-grow: 1;
|
||||
padding: 0.3em 0.5em 0.3em 1em; /* Adjust padding */
|
||||
/* Make ellipsis work for long titles */
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
/* Keep existing link styles */
|
||||
color: var(--secondary-color, var(--fallback-secondary));
|
||||
text-decoration: none;
|
||||
display: block;
|
||||
border-radius: 3px;
|
||||
transition: background-color 0.2s, color 0.2s;
|
||||
}
|
||||
.sidebar ul li a:hover {
|
||||
color: var(--primary-color, var(--fallback-primary));
|
||||
background-color: rgba(80, 255, 255, 0.08);
|
||||
}
|
||||
|
||||
/* Style for active history item's link */
|
||||
#history-list li.active a {
|
||||
color: var(--primary-dimmed-color, var(--fallback-primary-dimmed));
|
||||
font-weight: bold;
|
||||
background-color: rgba(80, 255, 255, 0.12);
|
||||
}
|
||||
|
||||
/* --- Delete Chat Button --- */
|
||||
.delete-chat-btn {
|
||||
flex-shrink: 0; /* Don't shrink */
|
||||
background: none;
|
||||
border: none;
|
||||
color: var(--secondary-color, var(--fallback-secondary));
|
||||
cursor: pointer;
|
||||
padding: 0.4em 0.8em; /* Padding around icon */
|
||||
font-size: 0.9em;
|
||||
opacity: 0.5; /* Dimmed by default */
|
||||
transition: opacity 0.2s, color 0.2s;
|
||||
margin-left: 5px; /* Space between link and button */
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.sidebar ul li:hover .delete-chat-btn,
|
||||
.delete-chat-btn:hover {
|
||||
opacity: 1; /* Show fully on hover */
|
||||
color: var(--error-color, #ff3c74); /* Use error color on hover */
|
||||
}
|
||||
.delete-chat-btn:focus {
|
||||
outline: 1px dashed var(--error-color, #ff3c74); /* Accessibility */
|
||||
opacity: 1;
|
||||
}
|
||||
607
docs/md_v2/ask_ai/ask-ai.js
Normal file
607
docs/md_v2/ask_ai/ask-ai.js
Normal file
@@ -0,0 +1,607 @@
|
||||
// ==== File: docs/ask_ai/ask-ai.js (Marked, Streaming, History) ====
|
||||
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
console.log("AI Assistant JS V2 Loaded");
|
||||
|
||||
// --- DOM Element Selectors ---
|
||||
const historyList = document.getElementById("history-list");
|
||||
const newChatButton = document.getElementById("new-chat-button");
|
||||
const chatMessages = document.getElementById("chat-messages");
|
||||
const chatInput = document.getElementById("chat-input");
|
||||
const sendButton = document.getElementById("send-button");
|
||||
const citationsList = document.getElementById("citations-list");
|
||||
|
||||
// --- Constants ---
|
||||
const CHAT_INDEX_KEY = "aiAssistantChatIndex_v1";
|
||||
const CHAT_PREFIX = "aiAssistantChat_v1_";
|
||||
|
||||
// --- State ---
|
||||
let currentChatId = null;
|
||||
let conversationHistory = []; // Holds message objects { sender: 'user'/'ai', text: '...' }
|
||||
let isThinking = false;
|
||||
let streamInterval = null; // To control the streaming interval
|
||||
|
||||
// --- Event Listeners ---
|
||||
sendButton.addEventListener("click", handleSendMessage);
|
||||
chatInput.addEventListener("keydown", handleInputKeydown);
|
||||
newChatButton.addEventListener("click", handleNewChat);
|
||||
chatInput.addEventListener("input", autoGrowTextarea);
|
||||
|
||||
// --- Initialization ---
|
||||
loadChatHistoryIndex(); // Load history list on startup
|
||||
const initialQuery = checkForInitialQuery(window.parent.location); // Check for query param
|
||||
if (!initialQuery) {
|
||||
loadInitialChat(); // Load normally if no query
|
||||
}
|
||||
|
||||
// --- Core Functions ---
|
||||
|
||||
function handleSendMessage() {
|
||||
const userMessageText = chatInput.value.trim();
|
||||
if (!userMessageText || isThinking) return;
|
||||
|
||||
setThinking(true); // Start thinking state
|
||||
|
||||
// Add user message to state and UI
|
||||
const userMessage = { sender: "user", text: userMessageText };
|
||||
conversationHistory.push(userMessage);
|
||||
addMessageToChat(userMessage, false); // Add user message without parsing markdown
|
||||
|
||||
chatInput.value = "";
|
||||
autoGrowTextarea(); // Reset textarea height
|
||||
|
||||
// Prepare for AI response (create empty div)
|
||||
const aiMessageDiv = addMessageToChat({ sender: "ai", text: "" }, true); // Add empty div with thinking indicator
|
||||
|
||||
// TODO: Generate fingerprint/JWT here
|
||||
|
||||
// TODO: Send `conversationHistory` + JWT to backend API
|
||||
// Replace placeholder below with actual API call
|
||||
// The backend should ideally return a stream of text tokens
|
||||
|
||||
// --- Placeholder Streaming Simulation ---
|
||||
const simulatedFullResponse = `Okay, Here’s a minimal Python script that creates an AsyncWebCrawler, fetches a webpage, and prints the first 300 characters of its Markdown output:
|
||||
|
||||
\`\`\`python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun("https://example.com")
|
||||
print(result.markdown[:300]) # Print first 300 chars
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
\`\`\`
|
||||
|
||||
A code snippet: \`crawler.run()\`. Check the [quickstart](/core/quickstart).`;
|
||||
|
||||
// Simulate receiving the response stream
|
||||
streamSimulatedResponse(aiMessageDiv, simulatedFullResponse);
|
||||
|
||||
// // Simulate receiving citations *after* stream starts (or with first chunk)
|
||||
// setTimeout(() => {
|
||||
// addCitations([
|
||||
// { title: "Simulated Doc 1", url: "#sim1" },
|
||||
// { title: "Another Concept", url: "#sim2" },
|
||||
// ]);
|
||||
// }, 500); // Citations appear shortly after thinking starts
|
||||
}
|
||||
|
||||
function handleInputKeydown(event) {
|
||||
if (event.key === "Enter" && !event.shiftKey) {
|
||||
event.preventDefault();
|
||||
handleSendMessage();
|
||||
}
|
||||
}
|
||||
|
||||
function addMessageToChat(message, addThinkingIndicator = false) {
|
||||
const messageDiv = document.createElement("div");
|
||||
messageDiv.classList.add("message", `${message.sender}-message`);
|
||||
|
||||
// Parse markdown and set HTML
|
||||
messageDiv.innerHTML = message.text ? marked.parse(message.text) : "";
|
||||
|
||||
if (message.sender === "ai") {
|
||||
// Apply Syntax Highlighting AFTER setting innerHTML
|
||||
messageDiv.querySelectorAll("pre code:not(.hljs)").forEach((block) => {
|
||||
if (typeof hljs !== "undefined") {
|
||||
// Check if already highlighted to prevent double-highlighting issues
|
||||
if (!block.classList.contains("hljs")) {
|
||||
hljs.highlightElement(block);
|
||||
}
|
||||
} else {
|
||||
console.warn("highlight.js (hljs) not found for syntax highlighting.");
|
||||
}
|
||||
});
|
||||
|
||||
// Add thinking indicator if needed (and not already present)
|
||||
if (addThinkingIndicator && !message.text && !messageDiv.querySelector(".thinking-indicator-cursor")) {
|
||||
const thinkingDiv = document.createElement("div");
|
||||
thinkingDiv.className = "thinking-indicator-cursor";
|
||||
messageDiv.appendChild(thinkingDiv);
|
||||
}
|
||||
} else {
|
||||
// User messages remain plain text
|
||||
// messageDiv.textContent = message.text;
|
||||
}
|
||||
|
||||
// wrap each pre in a div.terminal
|
||||
messageDiv.querySelectorAll("pre").forEach((block) => {
|
||||
const wrapper = document.createElement("div");
|
||||
wrapper.className = "terminal";
|
||||
block.parentNode.insertBefore(wrapper, block);
|
||||
wrapper.appendChild(block);
|
||||
});
|
||||
|
||||
chatMessages.appendChild(messageDiv);
|
||||
// Scroll only if user is near the bottom? (More advanced)
|
||||
// Simple scroll for now:
|
||||
scrollToBottom();
|
||||
return messageDiv; // Return the created element
|
||||
}
|
||||
|
||||
function streamSimulatedResponse(messageDiv, fullText) {
|
||||
const thinkingIndicator = messageDiv.querySelector(".thinking-indicator-cursor");
|
||||
if (thinkingIndicator) thinkingIndicator.remove();
|
||||
|
||||
const tokens = fullText.split(/(\s+)/);
|
||||
let currentText = "";
|
||||
let tokenIndex = 0;
|
||||
// Clear previous interval just in case
|
||||
if (streamInterval) clearInterval(streamInterval);
|
||||
|
||||
streamInterval = setInterval(() => {
|
||||
const cursorSpan = '<span class="thinking-indicator-cursor"></span>'; // Cursor for streaming
|
||||
if (tokenIndex < tokens.length) {
|
||||
currentText += tokens[tokenIndex];
|
||||
// Render intermediate markdown + cursor
|
||||
messageDiv.innerHTML = marked.parse(currentText + cursorSpan);
|
||||
// Re-highlight code blocks on each stream update - might be slightly inefficient
|
||||
// but ensures partial code blocks look okay. Highlight only final on completion.
|
||||
// messageDiv.querySelectorAll('pre code:not(.hljs)').forEach((block) => {
|
||||
// hljs.highlightElement(block);
|
||||
// });
|
||||
scrollToBottom(); // Keep scrolling as content streams
|
||||
tokenIndex++;
|
||||
} else {
|
||||
// Streaming finished
|
||||
clearInterval(streamInterval);
|
||||
streamInterval = null;
|
||||
|
||||
// Final render without cursor
|
||||
messageDiv.innerHTML = marked.parse(currentText);
|
||||
|
||||
// === Final Syntax Highlighting ===
|
||||
messageDiv.querySelectorAll("pre code:not(.hljs)").forEach((block) => {
|
||||
if (typeof hljs !== "undefined" && !block.classList.contains("hljs")) {
|
||||
hljs.highlightElement(block);
|
||||
}
|
||||
});
|
||||
|
||||
// === Extract Citations ===
|
||||
const citations = extractMarkdownLinks(currentText);
|
||||
|
||||
// Wrap each pre in a div.terminal
|
||||
messageDiv.querySelectorAll("pre").forEach((block) => {
|
||||
const wrapper = document.createElement("div");
|
||||
wrapper.className = "terminal";
|
||||
block.parentNode.insertBefore(wrapper, block);
|
||||
wrapper.appendChild(block);
|
||||
});
|
||||
|
||||
const aiMessage = { sender: "ai", text: currentText, citations: citations };
|
||||
conversationHistory.push(aiMessage);
|
||||
updateCitationsDisplay();
|
||||
saveCurrentChat();
|
||||
setThinking(false);
|
||||
}
|
||||
}, 50); // Adjust speed
|
||||
}
|
||||
|
||||
// === NEW Function to Extract Links ===
|
||||
function extractMarkdownLinks(markdownText) {
|
||||
const regex = /\[([^\]]+)\]\(([^)]+)\)/g; // [text](url)
|
||||
const citations = [];
|
||||
let match;
|
||||
while ((match = regex.exec(markdownText)) !== null) {
|
||||
// Avoid adding self-links from within the citations list if AI includes them
|
||||
if (!match[2].startsWith("#citation-")) {
|
||||
citations.push({
|
||||
title: match[1].trim(),
|
||||
url: match[2].trim(),
|
||||
});
|
||||
}
|
||||
}
|
||||
// Optional: Deduplicate links based on URL
|
||||
const uniqueCitations = citations.filter(
|
||||
(citation, index, self) => index === self.findIndex((c) => c.url === citation.url)
|
||||
);
|
||||
return uniqueCitations;
|
||||
}
|
||||
|
||||
// === REVISED Function to Display Citations ===
|
||||
function updateCitationsDisplay() {
|
||||
let lastCitations = null;
|
||||
// Find the most recent AI message with citations
|
||||
for (let i = conversationHistory.length - 1; i >= 0; i--) {
|
||||
if (
|
||||
conversationHistory[i].sender === "ai" &&
|
||||
conversationHistory[i].citations &&
|
||||
conversationHistory[i].citations.length > 0
|
||||
) {
|
||||
lastCitations = conversationHistory[i].citations;
|
||||
break; // Found the latest citations
|
||||
}
|
||||
}
|
||||
|
||||
citationsList.innerHTML = ""; // Clear previous
|
||||
if (!lastCitations) {
|
||||
citationsList.innerHTML = '<li class="no-citations">No citations available.</li>';
|
||||
return;
|
||||
}
|
||||
|
||||
lastCitations.forEach((citation, index) => {
|
||||
const li = document.createElement("li");
|
||||
const a = document.createElement("a");
|
||||
// Generate a unique ID for potential internal linking if needed
|
||||
// a.id = `citation-${index}`;
|
||||
a.href = citation.url || "#";
|
||||
a.textContent = citation.title;
|
||||
a.target = "_top"; // Open in main window
|
||||
li.appendChild(a);
|
||||
citationsList.appendChild(li);
|
||||
});
|
||||
}
|
||||
|
||||
function addCitations(citations) {
|
||||
citationsList.innerHTML = ""; // Clear
|
||||
if (!citations || citations.length === 0) {
|
||||
citationsList.innerHTML = '<li class="no-citations">No citations available.</li>';
|
||||
return;
|
||||
}
|
||||
citations.forEach((citation) => {
|
||||
const li = document.createElement("li");
|
||||
const a = document.createElement("a");
|
||||
a.href = citation.url || "#";
|
||||
a.textContent = citation.title;
|
||||
a.target = "_top"; // Open in main window
|
||||
li.appendChild(a);
|
||||
citationsList.appendChild(li);
|
||||
});
|
||||
}
|
||||
|
||||
function setThinking(thinking) {
|
||||
isThinking = thinking;
|
||||
sendButton.disabled = thinking;
|
||||
chatInput.disabled = thinking;
|
||||
chatInput.placeholder = thinking ? "AI is responding..." : "Ask about Crawl4AI...";
|
||||
// Stop any existing stream if we start thinking again (e.g., rapid resend)
|
||||
if (thinking && streamInterval) {
|
||||
clearInterval(streamInterval);
|
||||
streamInterval = null;
|
||||
}
|
||||
}
|
||||
|
||||
function autoGrowTextarea() {
|
||||
chatInput.style.height = "auto";
|
||||
chatInput.style.height = `${chatInput.scrollHeight}px`;
|
||||
}
|
||||
|
||||
function scrollToBottom() {
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
}
|
||||
|
||||
// --- Query Parameter Handling ---
|
||||
function checkForInitialQuery(locationToCheck) {
|
||||
// <-- Receive location object
|
||||
if (!locationToCheck) {
|
||||
console.warn("Ask AI: Could not access parent window location.");
|
||||
return false;
|
||||
}
|
||||
const urlParams = new URLSearchParams(locationToCheck.search); // <-- Use passed location's search string
|
||||
const encodedQuery = urlParams.get("qq"); // <-- Use 'qq'
|
||||
|
||||
if (encodedQuery) {
|
||||
console.log("Initial query found (qq):", encodedQuery);
|
||||
try {
|
||||
const decodedText = decodeURIComponent(escape(atob(encodedQuery)));
|
||||
console.log("Decoded query:", decodedText);
|
||||
|
||||
// Start new chat immediately
|
||||
handleNewChat(true);
|
||||
|
||||
// Delay setting input and sending message slightly
|
||||
setTimeout(() => {
|
||||
chatInput.value = decodedText;
|
||||
autoGrowTextarea();
|
||||
handleSendMessage();
|
||||
|
||||
// Clean the PARENT window's URL
|
||||
try {
|
||||
const cleanUrl = locationToCheck.pathname;
|
||||
// Use parent's history object
|
||||
window.parent.history.replaceState({}, window.parent.document.title, cleanUrl);
|
||||
} catch (e) {
|
||||
console.warn("Ask AI: Could not clean parent URL using replaceState.", e);
|
||||
// This might fail due to cross-origin restrictions if served differently,
|
||||
// but should work fine with mkdocs serve on the same origin.
|
||||
}
|
||||
}, 100);
|
||||
|
||||
return true; // Query processed
|
||||
} catch (e) {
|
||||
console.error("Error decoding initial query (qq):", e);
|
||||
// Clean the PARENT window's URL even on error
|
||||
try {
|
||||
const cleanUrl = locationToCheck.pathname;
|
||||
window.parent.history.replaceState({}, window.parent.document.title, cleanUrl);
|
||||
} catch (cleanError) {
|
||||
console.warn("Ask AI: Could not clean parent URL after decode error.", cleanError);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false; // No 'qq' query found
|
||||
}
|
||||
|
||||
// --- History Management ---
|
||||
|
||||
function handleNewChat(isFromQuery = false) {
|
||||
if (isThinking) return; // Don't allow new chat while responding
|
||||
|
||||
// Only save if NOT triggered immediately by a query parameter load
|
||||
if (!isFromQuery) {
|
||||
saveCurrentChat();
|
||||
}
|
||||
|
||||
currentChatId = `chat_${Date.now()}`;
|
||||
conversationHistory = []; // Clear message history state
|
||||
chatMessages.innerHTML = ""; // Start with clean slate for query
|
||||
if (!isFromQuery) {
|
||||
// Show welcome only if manually started
|
||||
// chatMessages.innerHTML =
|
||||
// '<div class="message ai-message welcome-message">Started a new chat! Ask me anything about Crawl4AI.</div>';
|
||||
chatMessages.innerHTML =
|
||||
'<div class="message ai-message welcome-message">We will launch this feature very soon.</div>';
|
||||
}
|
||||
addCitations([]); // Clear citations
|
||||
updateCitationsDisplay(); // Clear UI
|
||||
|
||||
// Add to index and save
|
||||
let index = loadChatIndex();
|
||||
// Generate a generic title initially, update later
|
||||
const newTitle = isFromQuery ? "Chat from Selection" : `Chat ${new Date().toLocaleString()}`;
|
||||
// index.unshift({ id: currentChatId, title: `Chat ${new Date().toLocaleString()}` }); // Add to start
|
||||
index.unshift({ id: currentChatId, title: newTitle });
|
||||
saveChatIndex(index);
|
||||
|
||||
renderHistoryList(index); // Update UI
|
||||
setActiveHistoryItem(currentChatId);
|
||||
saveCurrentChat(); // Save the empty new chat state
|
||||
}
|
||||
|
||||
function loadChat(chatId) {
|
||||
if (isThinking || chatId === currentChatId) return;
|
||||
|
||||
// Check if chat data actually exists before proceeding
|
||||
const storedChat = localStorage.getItem(CHAT_PREFIX + chatId);
|
||||
if (storedChat === null) {
|
||||
console.warn(`Attempted to load non-existent chat: ${chatId}. Removing from index.`);
|
||||
deleteChatData(chatId); // Clean up index
|
||||
loadChatHistoryIndex(); // Reload history list
|
||||
loadInitialChat(); // Load next available chat
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Loading chat: ${chatId}`);
|
||||
saveCurrentChat(); // Save current before switching
|
||||
|
||||
try {
|
||||
conversationHistory = JSON.parse(storedChat);
|
||||
currentChatId = chatId;
|
||||
renderChatMessages(conversationHistory);
|
||||
updateCitationsDisplay();
|
||||
setActiveHistoryItem(chatId);
|
||||
} catch (e) {
|
||||
console.error("Error loading chat:", chatId, e);
|
||||
alert("Failed to load chat data.");
|
||||
conversationHistory = [];
|
||||
renderChatMessages(conversationHistory);
|
||||
updateCitationsDisplay();
|
||||
}
|
||||
}
|
||||
|
||||
function saveCurrentChat() {
|
||||
if (currentChatId && conversationHistory.length > 0) {
|
||||
try {
|
||||
localStorage.setItem(CHAT_PREFIX + currentChatId, JSON.stringify(conversationHistory));
|
||||
console.log(`Chat ${currentChatId} saved.`);
|
||||
|
||||
// Update title in index (e.g., use first user message)
|
||||
let index = loadChatIndex();
|
||||
const currentItem = index.find((item) => item.id === currentChatId);
|
||||
if (
|
||||
currentItem &&
|
||||
conversationHistory[0]?.sender === "user" &&
|
||||
!currentItem.title.startsWith("Chat about:")
|
||||
) {
|
||||
currentItem.title = `Chat about: ${conversationHistory[0].text.substring(0, 30)}...`;
|
||||
saveChatIndex(index);
|
||||
// Re-render history list if title changed - small optimization needed here maybe
|
||||
renderHistoryList(index);
|
||||
setActiveHistoryItem(currentChatId); // Re-set active after re-render
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Error saving chat:", currentChatId, e);
|
||||
// Handle potential storage full errors
|
||||
if (e.name === "QuotaExceededError") {
|
||||
alert("Local storage is full. Cannot save chat history.");
|
||||
// Consider implementing history pruning logic here
|
||||
}
|
||||
}
|
||||
} else if (currentChatId) {
|
||||
// Save empty state for newly created chats if needed, or remove?
|
||||
localStorage.setItem(CHAT_PREFIX + currentChatId, JSON.stringify([]));
|
||||
}
|
||||
}
|
||||
|
||||
function loadChatIndex() {
|
||||
try {
|
||||
const storedIndex = localStorage.getItem(CHAT_INDEX_KEY);
|
||||
return storedIndex ? JSON.parse(storedIndex) : [];
|
||||
} catch (e) {
|
||||
console.error("Error loading chat index:", e);
|
||||
return []; // Return empty array on error
|
||||
}
|
||||
}
|
||||
|
||||
function saveChatIndex(indexArray) {
|
||||
try {
|
||||
localStorage.setItem(CHAT_INDEX_KEY, JSON.stringify(indexArray));
|
||||
} catch (e) {
|
||||
console.error("Error saving chat index:", e);
|
||||
}
|
||||
}
|
||||
|
||||
function renderHistoryList(indexArray) {
|
||||
historyList.innerHTML = ""; // Clear existing
|
||||
if (!indexArray || indexArray.length === 0) {
|
||||
historyList.innerHTML = '<li class="no-history">No past chats found.</li>';
|
||||
return;
|
||||
}
|
||||
indexArray.forEach((item) => {
|
||||
const li = document.createElement("li");
|
||||
li.dataset.chatId = item.id; // Add ID to li for easier selection
|
||||
|
||||
const a = document.createElement("a");
|
||||
a.href = "#";
|
||||
a.dataset.chatId = item.id;
|
||||
a.textContent = item.title || `Chat ${item.id.split("_")[1] || item.id}`;
|
||||
a.title = a.textContent; // Tooltip for potentially long titles
|
||||
a.addEventListener("click", (e) => {
|
||||
e.preventDefault();
|
||||
loadChat(item.id);
|
||||
});
|
||||
|
||||
// === Add Delete Button ===
|
||||
const deleteBtn = document.createElement("button");
|
||||
deleteBtn.className = "delete-chat-btn";
|
||||
deleteBtn.innerHTML = "✕"; // Trash can emoji/icon (or use text/SVG/FontAwesome)
|
||||
deleteBtn.title = "Delete Chat";
|
||||
deleteBtn.dataset.chatId = item.id; // Store ID on button too
|
||||
deleteBtn.addEventListener("click", handleDeleteChat);
|
||||
|
||||
li.appendChild(a);
|
||||
li.appendChild(deleteBtn); // Append button to the list item
|
||||
historyList.appendChild(li);
|
||||
});
|
||||
}
|
||||
|
||||
function renderChatMessages(messages) {
|
||||
chatMessages.innerHTML = ""; // Clear existing messages
|
||||
messages.forEach((message) => {
|
||||
// Ensure highlighting is applied when loading from history
|
||||
addMessageToChat(message, false);
|
||||
});
|
||||
if (messages.length === 0) {
|
||||
// chatMessages.innerHTML =
|
||||
// '<div class="message ai-message welcome-message">Chat history loaded. Ask a question!</div>';
|
||||
chatMessages.innerHTML =
|
||||
'<div class="message ai-message welcome-message">We will launch this feature very soon.</div>';
|
||||
}
|
||||
// Scroll to bottom after loading messages
|
||||
scrollToBottom();
|
||||
}
|
||||
|
||||
function setActiveHistoryItem(chatId) {
|
||||
document.querySelectorAll("#history-list li").forEach((li) => li.classList.remove("active"));
|
||||
// Select the LI element directly now
|
||||
const activeLi = document.querySelector(`#history-list li[data-chat-id="${chatId}"]`);
|
||||
if (activeLi) {
|
||||
activeLi.classList.add("active");
|
||||
}
|
||||
}
|
||||
|
||||
function loadInitialChat() {
|
||||
const index = loadChatIndex();
|
||||
if (index.length > 0) {
|
||||
loadChat(index[0].id);
|
||||
} else {
|
||||
// Check if handleNewChat wasn't already called by query handler
|
||||
if (!currentChatId) {
|
||||
handleNewChat();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function loadChatHistoryIndex() {
|
||||
const index = loadChatIndex();
|
||||
renderHistoryList(index);
|
||||
if (currentChatId) setActiveHistoryItem(currentChatId);
|
||||
}
|
||||
|
||||
// === NEW Function to Handle Delete Click ===
|
||||
function handleDeleteChat(event) {
|
||||
event.stopPropagation(); // Prevent triggering loadChat on the link behind it
|
||||
const button = event.currentTarget;
|
||||
const chatIdToDelete = button.dataset.chatId;
|
||||
|
||||
if (!chatIdToDelete) return;
|
||||
|
||||
// Confirmation dialog
|
||||
if (
|
||||
window.confirm(
|
||||
`Are you sure you want to delete this chat session?\n"${
|
||||
button.previousElementSibling?.textContent || "Chat " + chatIdToDelete
|
||||
}"`
|
||||
)
|
||||
) {
|
||||
console.log(`Deleting chat: ${chatIdToDelete}`);
|
||||
|
||||
// Perform deletion
|
||||
const updatedIndex = deleteChatData(chatIdToDelete);
|
||||
|
||||
// If the deleted chat was the currently active one, load another chat
|
||||
if (currentChatId === chatIdToDelete) {
|
||||
currentChatId = null; // Reset current ID
|
||||
conversationHistory = []; // Clear state
|
||||
if (updatedIndex.length > 0) {
|
||||
// Load the new top chat (most recent remaining)
|
||||
loadChat(updatedIndex[0].id);
|
||||
} else {
|
||||
// No chats left, start a new one
|
||||
handleNewChat();
|
||||
}
|
||||
} else {
|
||||
// If a different chat was deleted, just re-render the list
|
||||
renderHistoryList(updatedIndex);
|
||||
// Re-apply active state in case IDs shifted (though they shouldn't)
|
||||
setActiveHistoryItem(currentChatId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// === NEW Function to Delete Chat Data ===
|
||||
function deleteChatData(chatId) {
|
||||
// Remove chat data
|
||||
localStorage.removeItem(CHAT_PREFIX + chatId);
|
||||
|
||||
// Update index
|
||||
let index = loadChatIndex();
|
||||
index = index.filter((item) => item.id !== chatId);
|
||||
saveChatIndex(index);
|
||||
|
||||
console.log(`Chat ${chatId} data and index entry removed.`);
|
||||
return index; // Return the updated index
|
||||
}
|
||||
|
||||
// --- Virtual Scrolling Placeholder ---
|
||||
// NOTE: Virtual scrolling is complex. For now, we do direct rendering.
|
||||
// If performance becomes an issue with very long chats/history,
|
||||
// investigate libraries like 'simple-virtual-scroll' or 'virtual-scroller'.
|
||||
// You would replace parts of `renderChatMessages` and `renderHistoryList`
|
||||
// to work with the chosen library's API (providing data and item renderers).
|
||||
console.warn("Virtual scrolling not implemented. Performance may degrade with very long chat histories.");
|
||||
});
|
||||
64
docs/md_v2/ask_ai/index.html
Normal file
64
docs/md_v2/ask_ai/index.html
Normal file
@@ -0,0 +1,64 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Crawl4AI Assistant</title>
|
||||
<!-- Link main styles first for variable access -->
|
||||
<link rel="stylesheet" href="../assets/layout.css">
|
||||
<link rel="stylesheet" href="../assets/styles.css">
|
||||
<!-- Link specific AI styles -->
|
||||
<link rel="stylesheet" href="../assets/highlight.css">
|
||||
<link rel="stylesheet" href="ask-ai.css">
|
||||
</head>
|
||||
<body>
|
||||
<div class="ai-assistant-container">
|
||||
|
||||
<!-- Left Sidebar: Conversation History -->
|
||||
<aside id="history-panel" class="sidebar left-sidebar">
|
||||
<header>
|
||||
<h3>History</h3>
|
||||
<button id="new-chat-button" class="btn btn-sm">New Chat</button>
|
||||
</header>
|
||||
<ul id="history-list">
|
||||
<!-- History items populated by JS -->
|
||||
</ul>
|
||||
</aside>
|
||||
|
||||
<!-- Main Area: Chat Interface -->
|
||||
<main id="chat-panel">
|
||||
<div id="chat-messages">
|
||||
<!-- Chat messages populated by JS -->
|
||||
<div class="message ai-message welcome-message">
|
||||
Welcome to the Crawl4AI Assistant! How can I help you today?
|
||||
</div>
|
||||
</div>
|
||||
<div id="chat-input-area">
|
||||
<!-- Loading indicator for general waiting (optional) -->
|
||||
<!-- <div class="loading-indicator" style="display: none;">Thinking...</div> -->
|
||||
<textarea id="chat-input" placeholder="We will roll out this feature very soon." rows="2" disabled></textarea>
|
||||
<button id="send-button">Send</button>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<!-- Right Sidebar: Citations / Context -->
|
||||
<aside id="citations-panel" class="sidebar right-sidebar">
|
||||
<header>
|
||||
<h3>Citations</h3>
|
||||
</header>
|
||||
<ul id="citations-list">
|
||||
<!-- Citations populated by JS -->
|
||||
<li class="no-citations">No citations for this response yet.</li>
|
||||
</ul>
|
||||
</aside>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Include Marked.js library -->
|
||||
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
|
||||
<script src="../assets/highlight.min.js"></script>
|
||||
|
||||
<!-- Your AI Assistant Logic -->
|
||||
<script src="ask-ai.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
62
docs/md_v2/assets/copy_code.js
Normal file
62
docs/md_v2/assets/copy_code.js
Normal file
@@ -0,0 +1,62 @@
|
||||
// ==== File: docs/assets/copy_code.js ====
|
||||
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
// Target specifically code blocks within the main content area
|
||||
const codeBlocks = document.querySelectorAll('#terminal-mkdocs-main-content pre > code');
|
||||
|
||||
codeBlocks.forEach((codeElement) => {
|
||||
const preElement = codeElement.parentElement; // The <pre> tag
|
||||
|
||||
// Ensure the <pre> tag can contain a positioned button
|
||||
if (window.getComputedStyle(preElement).position === 'static') {
|
||||
preElement.style.position = 'relative';
|
||||
}
|
||||
|
||||
// Create the button
|
||||
const copyButton = document.createElement('button');
|
||||
copyButton.className = 'copy-code-button';
|
||||
copyButton.type = 'button';
|
||||
copyButton.setAttribute('aria-label', 'Copy code to clipboard');
|
||||
copyButton.title = 'Copy code to clipboard';
|
||||
copyButton.innerHTML = 'Copy'; // Or use an icon like an SVG or FontAwesome class
|
||||
|
||||
// Append the button to the <pre> element
|
||||
preElement.appendChild(copyButton);
|
||||
|
||||
// Add click event listener
|
||||
copyButton.addEventListener('click', () => {
|
||||
copyCodeToClipboard(codeElement, copyButton);
|
||||
});
|
||||
});
|
||||
|
||||
async function copyCodeToClipboard(codeElement, button) {
|
||||
// Use innerText to get the rendered text content, preserving line breaks
|
||||
const textToCopy = codeElement.innerText;
|
||||
|
||||
try {
|
||||
await navigator.clipboard.writeText(textToCopy);
|
||||
|
||||
// Visual feedback
|
||||
button.innerHTML = 'Copied!';
|
||||
button.classList.add('copied');
|
||||
button.disabled = true; // Temporarily disable
|
||||
|
||||
// Revert button state after a short delay
|
||||
setTimeout(() => {
|
||||
button.innerHTML = 'Copy';
|
||||
button.classList.remove('copied');
|
||||
button.disabled = false;
|
||||
}, 2000); // Show "Copied!" for 2 seconds
|
||||
|
||||
} catch (err) {
|
||||
console.error('Failed to copy code: ', err);
|
||||
// Optional: Provide error feedback on the button
|
||||
button.innerHTML = 'Error';
|
||||
setTimeout(() => {
|
||||
button.innerHTML = 'Copy';
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
console.log("Copy Code Button script loaded.");
|
||||
});
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user