Compare commits
15 Commits
fix/linkPr
...
docker/add
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3b02be5c3 | ||
|
|
00e9904609 | ||
|
|
3877335d89 | ||
|
|
74eeff4c51 | ||
|
|
674d0741da | ||
|
|
aebf5a3694 | ||
|
|
8cca9704eb | ||
|
|
201843a204 | ||
|
|
f00e8cbf35 | ||
|
|
5dc34dd210 | ||
|
|
a599db8f7b | ||
|
|
1a8e0236af | ||
|
|
a62cfeebd9 | ||
|
|
bb3b29042f | ||
|
|
1ea021b721 |
81
.github/workflows/docker-release.yml
vendored
81
.github/workflows/docker-release.yml
vendored
@@ -1,81 +0,0 @@
|
||||
name: Docker Release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- 'docker-rebuild-v*' # Allow manual Docker rebuilds via tags
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Extract version from release or tag
|
||||
id: get_version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "release" ]; then
|
||||
# Triggered by release event
|
||||
VERSION="${{ github.event.release.tag_name }}"
|
||||
VERSION=${VERSION#v} # Remove 'v' prefix
|
||||
else
|
||||
# Triggered by docker-rebuild-v* tag
|
||||
VERSION=${GITHUB_REF#refs/tags/docker-rebuild-v}
|
||||
fi
|
||||
echo "VERSION=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Building Docker images for version: $VERSION"
|
||||
|
||||
- name: Extract major and minor versions
|
||||
id: versions
|
||||
run: |
|
||||
VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1)
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2)
|
||||
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
|
||||
echo "Semantic versions - Major: $MAJOR, Minor: $MINOR"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
|
||||
unclecode/crawl4ai:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🐳 Docker Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Published Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Platforms" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- linux/amd64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- linux/arm64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🚀 Pull Command" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
|
||||
echo "docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
|
||||
917
.github/workflows/docs/ARCHITECTURE.md
vendored
917
.github/workflows/docs/ARCHITECTURE.md
vendored
@@ -1,917 +0,0 @@
|
||||
# Workflow Architecture Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the technical architecture of the split release pipeline for Crawl4AI.
|
||||
|
||||
---
|
||||
|
||||
## Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Developer │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ git tag v1.2.3 │
|
||||
│ git push --tags │
|
||||
└──────────────────────────────┬──────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ Tag Event: v1.2.3 │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ release.yml (Release Pipeline) │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 1. Extract Version │ │ │
|
||||
│ │ │ v1.2.3 → 1.2.3 │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 2. Validate Version │ │ │
|
||||
│ │ │ Tag == __version__.py │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 3. Build Python Package │ │ │
|
||||
│ │ │ - Source dist (.tar.gz) │ │ │
|
||||
│ │ │ - Wheel (.whl) │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 4. Upload to PyPI │ │ │
|
||||
│ │ │ - Authenticate with token │ │ │
|
||||
│ │ │ - Upload dist/* │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 5. Create GitHub Release │ │ │
|
||||
│ │ │ - Tag: v1.2.3 │ │ │
|
||||
│ │ │ - Body: Install instructions │ │ │
|
||||
│ │ │ - Status: Published │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ Release Event: published (v1.2.3) │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ docker-release.yml (Docker Pipeline) │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 1. Extract Version from Release │ │ │
|
||||
│ │ │ github.event.release.tag_name → 1.2.3 │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 2. Parse Semantic Versions │ │ │
|
||||
│ │ │ 1.2.3 → Major: 1, Minor: 1.2 │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 3. Setup Multi-Arch Build │ │ │
|
||||
│ │ │ - Docker Buildx │ │ │
|
||||
│ │ │ - QEMU emulation │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 4. Authenticate Docker Hub │ │ │
|
||||
│ │ │ - Username: DOCKER_USERNAME │ │ │
|
||||
│ │ │ - Token: DOCKER_TOKEN │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 5. Build Multi-Arch Images │ │ │
|
||||
│ │ │ ┌────────────────┬────────────────┐ │ │ │
|
||||
│ │ │ │ linux/amd64 │ linux/arm64 │ │ │ │
|
||||
│ │ │ └────────────────┴────────────────┘ │ │ │
|
||||
│ │ │ Cache: GitHub Actions (type=gha) │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 6. Push to Docker Hub │ │ │
|
||||
│ │ │ Tags: │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:1.2.3 │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:1.2 │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:1 │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:latest │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ External Services │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ PyPI │ │ Docker Hub │ │ GitHub │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ crawl4ai │ │ unclecode/ │ │ Releases │ │
|
||||
│ │ 1.2.3 │ │ crawl4ai │ │ v1.2.3 │ │
|
||||
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Component Details
|
||||
|
||||
### 1. Release Pipeline (release.yml)
|
||||
|
||||
#### Purpose
|
||||
Fast publication of Python package and GitHub release.
|
||||
|
||||
#### Input
|
||||
- **Trigger**: Git tag matching `v*` (excluding `test-v*`)
|
||||
- **Example**: `v1.2.3`
|
||||
|
||||
#### Processing Stages
|
||||
|
||||
##### Stage 1: Version Extraction
|
||||
```bash
|
||||
Input: refs/tags/v1.2.3
|
||||
Output: VERSION=1.2.3
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v} # Remove 'refs/tags/v' prefix
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
```
|
||||
|
||||
##### Stage 2: Version Validation
|
||||
```bash
|
||||
Input: TAG_VERSION=1.2.3
|
||||
Check: crawl4ai/__version__.py contains __version__ = "1.2.3"
|
||||
Output: Pass/Fail
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
##### Stage 3: Package Build
|
||||
```bash
|
||||
Input: Source code + pyproject.toml
|
||||
Output: dist/crawl4ai-1.2.3.tar.gz
|
||||
dist/crawl4ai-1.2.3-py3-none-any.whl
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
python -m build
|
||||
# Uses build backend defined in pyproject.toml
|
||||
```
|
||||
|
||||
##### Stage 4: PyPI Upload
|
||||
```bash
|
||||
Input: dist/*.{tar.gz,whl}
|
||||
Auth: PYPI_TOKEN
|
||||
Output: Package published to PyPI
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
twine upload dist/*
|
||||
# Environment:
|
||||
# TWINE_USERNAME: __token__
|
||||
# TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
```
|
||||
|
||||
##### Stage 5: GitHub Release Creation
|
||||
```bash
|
||||
Input: Tag: v1.2.3
|
||||
Body: Markdown content
|
||||
Output: Published GitHub release
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```yaml
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: v1.2.3
|
||||
name: Release v1.2.3
|
||||
body: |
|
||||
Installation instructions and changelog
|
||||
draft: false
|
||||
prerelease: false
|
||||
```
|
||||
|
||||
#### Output
|
||||
- **PyPI Package**: https://pypi.org/project/crawl4ai/1.2.3/
|
||||
- **GitHub Release**: Published release on repository
|
||||
- **Event**: `release.published` (triggers Docker workflow)
|
||||
|
||||
#### Timeline
|
||||
```
|
||||
0:00 - Tag pushed
|
||||
0:01 - Checkout + Python setup
|
||||
0:02 - Version validation
|
||||
0:03 - Package build
|
||||
0:04 - PyPI upload starts
|
||||
0:06 - PyPI upload complete
|
||||
0:07 - GitHub release created
|
||||
0:08 - Workflow complete
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. Docker Release Pipeline (docker-release.yml)
|
||||
|
||||
#### Purpose
|
||||
Build and publish multi-architecture Docker images.
|
||||
|
||||
#### Inputs
|
||||
|
||||
##### Input 1: Release Event (Automatic)
|
||||
```yaml
|
||||
Event: release.published
|
||||
Data: github.event.release.tag_name = "v1.2.3"
|
||||
```
|
||||
|
||||
##### Input 2: Docker Rebuild Tag (Manual)
|
||||
```yaml
|
||||
Tag: docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
#### Processing Stages
|
||||
|
||||
##### Stage 1: Version Detection
|
||||
```bash
|
||||
# From release event:
|
||||
VERSION = github.event.release.tag_name.strip("v")
|
||||
# Result: "1.2.3"
|
||||
|
||||
# From rebuild tag:
|
||||
VERSION = GITHUB_REF.replace("refs/tags/docker-rebuild-v", "")
|
||||
# Result: "1.2.3"
|
||||
```
|
||||
|
||||
##### Stage 2: Semantic Version Parsing
|
||||
```bash
|
||||
Input: VERSION=1.2.3
|
||||
Output: MAJOR=1
|
||||
MINOR=1.2
|
||||
PATCH=3 (implicit)
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1) # Extract first component
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2) # Extract first two components
|
||||
```
|
||||
|
||||
##### Stage 3: Multi-Architecture Setup
|
||||
```yaml
|
||||
Setup:
|
||||
- Docker Buildx (multi-platform builder)
|
||||
- QEMU (ARM emulation on x86)
|
||||
|
||||
Platforms:
|
||||
- linux/amd64 (x86_64)
|
||||
- linux/arm64 (aarch64)
|
||||
```
|
||||
|
||||
**Architecture**:
|
||||
```
|
||||
GitHub Runner (linux/amd64)
|
||||
├─ Buildx Builder
|
||||
│ ├─ Native: Build linux/amd64 image
|
||||
│ └─ QEMU: Emulate ARM to build linux/arm64 image
|
||||
└─ Generate manifest list (points to both images)
|
||||
```
|
||||
|
||||
##### Stage 4: Docker Hub Authentication
|
||||
```bash
|
||||
Input: DOCKER_USERNAME
|
||||
DOCKER_TOKEN
|
||||
Output: Authenticated Docker client
|
||||
```
|
||||
|
||||
##### Stage 5: Build with Cache
|
||||
```yaml
|
||||
Cache Configuration:
|
||||
cache-from: type=gha # Read from GitHub Actions cache
|
||||
cache-to: type=gha,mode=max # Write all layers
|
||||
|
||||
Cache Key Components:
|
||||
- Workflow file path
|
||||
- Branch name
|
||||
- Architecture (amd64/arm64)
|
||||
```
|
||||
|
||||
**Cache Hierarchy**:
|
||||
```
|
||||
Cache Entry: main/docker-release.yml/linux-amd64
|
||||
├─ Layer: sha256:abc123... (FROM python:3.12)
|
||||
├─ Layer: sha256:def456... (RUN apt-get update)
|
||||
├─ Layer: sha256:ghi789... (COPY requirements.txt)
|
||||
├─ Layer: sha256:jkl012... (RUN pip install)
|
||||
└─ Layer: sha256:mno345... (COPY . /app)
|
||||
|
||||
Cache Hit/Miss Logic:
|
||||
- If layer input unchanged → cache hit → skip build
|
||||
- If layer input changed → cache miss → rebuild + all subsequent layers
|
||||
```
|
||||
|
||||
##### Stage 6: Tag Generation
|
||||
```bash
|
||||
Input: VERSION=1.2.3, MAJOR=1, MINOR=1.2
|
||||
|
||||
Output Tags:
|
||||
- unclecode/crawl4ai:1.2.3 (exact version)
|
||||
- unclecode/crawl4ai:1.2 (minor version)
|
||||
- unclecode/crawl4ai:1 (major version)
|
||||
- unclecode/crawl4ai:latest (latest stable)
|
||||
```
|
||||
|
||||
**Tag Strategy**:
|
||||
- All tags point to same image SHA
|
||||
- Users can pin to desired stability level
|
||||
- Pushing new version updates `1`, `1.2`, and `latest` automatically
|
||||
|
||||
##### Stage 7: Push to Registry
|
||||
```bash
|
||||
For each tag:
|
||||
For each platform (amd64, arm64):
|
||||
Push image to Docker Hub
|
||||
|
||||
Create manifest list:
|
||||
Manifest: unclecode/crawl4ai:1.2.3
|
||||
├─ linux/amd64: sha256:abc...
|
||||
└─ linux/arm64: sha256:def...
|
||||
|
||||
Docker CLI automatically selects correct platform on pull
|
||||
```
|
||||
|
||||
#### Output
|
||||
- **Docker Images**: 4 tags × 2 platforms = 8 image variants + 4 manifests
|
||||
- **Docker Hub**: https://hub.docker.com/r/unclecode/crawl4ai/tags
|
||||
|
||||
#### Timeline
|
||||
|
||||
**Cold Cache (First Build)**:
|
||||
```
|
||||
0:00 - Release event received
|
||||
0:01 - Checkout + Buildx setup
|
||||
0:02 - Docker Hub auth
|
||||
0:03 - Start build (amd64)
|
||||
0:08 - Complete amd64 build
|
||||
0:09 - Start build (arm64)
|
||||
0:14 - Complete arm64 build
|
||||
0:15 - Generate manifests
|
||||
0:16 - Push all tags
|
||||
0:17 - Workflow complete
|
||||
```
|
||||
|
||||
**Warm Cache (Code Change Only)**:
|
||||
```
|
||||
0:00 - Release event received
|
||||
0:01 - Checkout + Buildx setup
|
||||
0:02 - Docker Hub auth
|
||||
0:03 - Start build (amd64) - cache hit for layers 1-4
|
||||
0:04 - Complete amd64 build (only layer 5 rebuilt)
|
||||
0:05 - Start build (arm64) - cache hit for layers 1-4
|
||||
0:06 - Complete arm64 build (only layer 5 rebuilt)
|
||||
0:07 - Generate manifests
|
||||
0:08 - Push all tags
|
||||
0:09 - Workflow complete
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Version Information Flow
|
||||
|
||||
```
|
||||
Developer
|
||||
│
|
||||
▼
|
||||
crawl4ai/__version__.py
|
||||
__version__ = "1.2.3"
|
||||
│
|
||||
├─► Git Tag
|
||||
│ v1.2.3
|
||||
│ │
|
||||
│ ▼
|
||||
│ release.yml
|
||||
│ │
|
||||
│ ├─► Validation
|
||||
│ │ ✓ Match
|
||||
│ │
|
||||
│ ├─► PyPI Package
|
||||
│ │ crawl4ai==1.2.3
|
||||
│ │
|
||||
│ └─► GitHub Release
|
||||
│ v1.2.3
|
||||
│ │
|
||||
│ ▼
|
||||
│ docker-release.yml
|
||||
│ │
|
||||
│ └─► Docker Tags
|
||||
│ 1.2.3, 1.2, 1, latest
|
||||
│
|
||||
└─► Package Metadata
|
||||
pyproject.toml
|
||||
version = "1.2.3"
|
||||
```
|
||||
|
||||
### Secrets Flow
|
||||
|
||||
```
|
||||
GitHub Secrets (Encrypted at Rest)
|
||||
│
|
||||
├─► PYPI_TOKEN
|
||||
│ │
|
||||
│ ▼
|
||||
│ release.yml
|
||||
│ │
|
||||
│ ▼
|
||||
│ TWINE_PASSWORD env var (masked in logs)
|
||||
│ │
|
||||
│ ▼
|
||||
│ PyPI API (HTTPS)
|
||||
│
|
||||
├─► DOCKER_USERNAME
|
||||
│ │
|
||||
│ ▼
|
||||
│ docker-release.yml
|
||||
│ │
|
||||
│ ▼
|
||||
│ docker/login-action (masked in logs)
|
||||
│ │
|
||||
│ ▼
|
||||
│ Docker Hub API (HTTPS)
|
||||
│
|
||||
└─► DOCKER_TOKEN
|
||||
│
|
||||
▼
|
||||
docker-release.yml
|
||||
│
|
||||
▼
|
||||
docker/login-action (masked in logs)
|
||||
│
|
||||
▼
|
||||
Docker Hub API (HTTPS)
|
||||
```
|
||||
|
||||
### Artifact Flow
|
||||
|
||||
```
|
||||
Source Code
|
||||
│
|
||||
├─► release.yml
|
||||
│ │
|
||||
│ ▼
|
||||
│ python -m build
|
||||
│ │
|
||||
│ ├─► crawl4ai-1.2.3.tar.gz
|
||||
│ │ │
|
||||
│ │ ▼
|
||||
│ │ PyPI Storage
|
||||
│ │ │
|
||||
│ │ ▼
|
||||
│ │ pip install crawl4ai
|
||||
│ │
|
||||
│ └─► crawl4ai-1.2.3-py3-none-any.whl
|
||||
│ │
|
||||
│ ▼
|
||||
│ PyPI Storage
|
||||
│ │
|
||||
│ ▼
|
||||
│ pip install crawl4ai
|
||||
│
|
||||
└─► docker-release.yml
|
||||
│
|
||||
▼
|
||||
docker build
|
||||
│
|
||||
├─► Image: linux/amd64
|
||||
│ │
|
||||
│ └─► Docker Hub
|
||||
│ unclecode/crawl4ai:1.2.3-amd64
|
||||
│
|
||||
└─► Image: linux/arm64
|
||||
│
|
||||
└─► Docker Hub
|
||||
unclecode/crawl4ai:1.2.3-arm64
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## State Machines
|
||||
|
||||
### Release Pipeline State Machine
|
||||
|
||||
```
|
||||
┌─────────┐
|
||||
│ START │
|
||||
└────┬────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Extract │
|
||||
│ Version │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Validate │─────►│ FAILED │
|
||||
│ Version │ No │ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Yes
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Build │
|
||||
│ Package │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Upload │─────►│ FAILED │
|
||||
│ to PyPI │ Error│ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Success
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Create │
|
||||
│ GH Release │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ SUCCESS │
|
||||
│ (Emit Event) │
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
### Docker Pipeline State Machine
|
||||
|
||||
```
|
||||
┌─────────┐
|
||||
│ START │
|
||||
│ (Event) │
|
||||
└────┬────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Detect │
|
||||
│ Version │
|
||||
│ Source │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Parse │
|
||||
│ Semantic │
|
||||
│ Versions │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Authenticate │─────►│ FAILED │
|
||||
│ Docker Hub │ Error│ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Success
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Build │
|
||||
│ amd64 │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Build │─────►│ FAILED │
|
||||
│ arm64 │ Error│ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Success
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Push All │
|
||||
│ Tags │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ SUCCESS │
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security Architecture
|
||||
|
||||
### Threat Model
|
||||
|
||||
#### Threats Mitigated
|
||||
|
||||
1. **Secret Exposure**
|
||||
- Mitigation: GitHub Actions secret masking
|
||||
- Evidence: Secrets never appear in logs
|
||||
|
||||
2. **Unauthorized Package Upload**
|
||||
- Mitigation: Scoped PyPI tokens
|
||||
- Evidence: Token limited to `crawl4ai` project
|
||||
|
||||
3. **Man-in-the-Middle**
|
||||
- Mitigation: HTTPS for all API calls
|
||||
- Evidence: PyPI, Docker Hub, GitHub all use TLS
|
||||
|
||||
4. **Supply Chain Tampering**
|
||||
- Mitigation: Immutable artifacts, content checksums
|
||||
- Evidence: PyPI stores SHA256, Docker uses content-addressable storage
|
||||
|
||||
#### Trust Boundaries
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Trusted Zone │
|
||||
│ ┌────────────────────────────────┐ │
|
||||
│ │ GitHub Actions Runner │ │
|
||||
│ │ - Ephemeral VM │ │
|
||||
│ │ - Isolated environment │ │
|
||||
│ │ - Access to secrets │ │
|
||||
│ └────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ │ HTTPS (TLS 1.2+) │
|
||||
│ ▼ │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌────────┐ ┌─────────┐ ┌──────────┐
|
||||
│ PyPI │ │ Docker │ │ GitHub │
|
||||
│ API │ │ Hub │ │ API │
|
||||
└────────┘ └─────────┘ └──────────┘
|
||||
External External External
|
||||
Service Service Service
|
||||
```
|
||||
|
||||
### Secret Management
|
||||
|
||||
#### Secret Lifecycle
|
||||
|
||||
```
|
||||
Creation (Developer)
|
||||
│
|
||||
├─► PyPI: Create API token (scoped to project)
|
||||
├─► Docker Hub: Create access token (read/write)
|
||||
│
|
||||
▼
|
||||
Storage (GitHub)
|
||||
│
|
||||
├─► Encrypted at rest (AES-256)
|
||||
├─► Access controlled (repo-scoped)
|
||||
│
|
||||
▼
|
||||
Usage (Workflow)
|
||||
│
|
||||
├─► Injected as env vars
|
||||
├─► Masked in logs (GitHub redacts on output)
|
||||
├─► Never persisted to disk (in-memory only)
|
||||
│
|
||||
▼
|
||||
Transmission (API Call)
|
||||
│
|
||||
├─► HTTPS only
|
||||
├─► TLS 1.2+ with strong ciphers
|
||||
│
|
||||
▼
|
||||
Rotation (Manual)
|
||||
│
|
||||
└─► Regenerate on PyPI/Docker Hub
|
||||
Update GitHub secret
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Release Pipeline Performance
|
||||
|
||||
| Metric | Value | Notes |
|
||||
|--------|-------|-------|
|
||||
| Cold start | ~2-3 min | First run on new runner |
|
||||
| Warm start | ~2-3 min | Minimal caching benefit |
|
||||
| PyPI upload | ~30-60 sec | Network-bound |
|
||||
| Package build | ~30 sec | CPU-bound |
|
||||
| Parallelization | None | Sequential by design |
|
||||
|
||||
### Docker Pipeline Performance
|
||||
|
||||
| Metric | Cold Cache | Warm Cache (code) | Warm Cache (deps) |
|
||||
|--------|-----------|-------------------|-------------------|
|
||||
| Total time | 10-15 min | 1-2 min | 3-5 min |
|
||||
| amd64 build | 5-7 min | 30-60 sec | 1-2 min |
|
||||
| arm64 build | 5-7 min | 30-60 sec | 1-2 min |
|
||||
| Push time | 1-2 min | 30 sec | 30 sec |
|
||||
| Cache hit rate | 0% | 85% | 60% |
|
||||
|
||||
### Cache Performance Model
|
||||
|
||||
```python
|
||||
def estimate_build_time(changes):
|
||||
base_time = 60 # seconds (setup + push)
|
||||
|
||||
if "Dockerfile" in changes:
|
||||
return base_time + (10 * 60) # Full rebuild: ~11 min
|
||||
elif "requirements.txt" in changes:
|
||||
return base_time + (3 * 60) # Deps rebuild: ~4 min
|
||||
elif any(f.endswith(".py") for f in changes):
|
||||
return base_time + 60 # Code only: ~2 min
|
||||
else:
|
||||
return base_time # No changes: ~1 min
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scalability Considerations
|
||||
|
||||
### Current Limits
|
||||
|
||||
| Resource | Limit | Impact |
|
||||
|----------|-------|--------|
|
||||
| Workflow concurrency | 20 (default) | Max 20 releases in parallel |
|
||||
| Artifact storage | 500 MB/artifact | PyPI packages small (<10 MB) |
|
||||
| Cache storage | 10 GB/repo | Docker layers fit comfortably |
|
||||
| Workflow run time | 6 hours | Plenty of headroom |
|
||||
|
||||
### Scaling Strategies
|
||||
|
||||
#### Horizontal Scaling (Multiple Repos)
|
||||
```
|
||||
crawl4ai (main)
|
||||
├─ release.yml
|
||||
└─ docker-release.yml
|
||||
|
||||
crawl4ai-plugins (separate)
|
||||
├─ release.yml
|
||||
└─ docker-release.yml
|
||||
|
||||
Each repo has independent:
|
||||
- Secrets
|
||||
- Cache (10 GB each)
|
||||
- Concurrency limits (20 each)
|
||||
```
|
||||
|
||||
#### Vertical Scaling (Larger Runners)
|
||||
```yaml
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest-8-cores # GitHub-hosted larger runner
|
||||
# 4x faster builds for CPU-bound layers
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Disaster Recovery
|
||||
|
||||
### Failure Scenarios
|
||||
|
||||
#### Scenario 1: Release Pipeline Fails
|
||||
|
||||
**Failure Point**: PyPI upload fails (network error)
|
||||
|
||||
**State**:
|
||||
- ✓ Version validated
|
||||
- ✓ Package built
|
||||
- ✗ PyPI upload
|
||||
- ✗ GitHub release
|
||||
|
||||
**Recovery**:
|
||||
```bash
|
||||
# Manual upload
|
||||
twine upload dist/*
|
||||
|
||||
# Retry workflow (re-run from GitHub Actions UI)
|
||||
```
|
||||
|
||||
**Prevention**: Add retry logic to PyPI upload
|
||||
|
||||
#### Scenario 2: Docker Pipeline Fails
|
||||
|
||||
**Failure Point**: ARM build fails (dependency issue)
|
||||
|
||||
**State**:
|
||||
- ✓ PyPI published
|
||||
- ✓ GitHub release created
|
||||
- ✓ amd64 image built
|
||||
- ✗ arm64 image build
|
||||
|
||||
**Recovery**:
|
||||
```bash
|
||||
# Fix Dockerfile
|
||||
git commit -am "fix: ARM build dependency"
|
||||
|
||||
# Trigger rebuild
|
||||
git tag docker-rebuild-v1.2.3
|
||||
git push origin docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
**Impact**: PyPI package available, only Docker ARM users affected
|
||||
|
||||
#### Scenario 3: Partial Release
|
||||
|
||||
**Failure Point**: GitHub release creation fails
|
||||
|
||||
**State**:
|
||||
- ✓ PyPI published
|
||||
- ✗ GitHub release
|
||||
- ✗ Docker images
|
||||
|
||||
**Recovery**:
|
||||
```bash
|
||||
# Create release manually
|
||||
gh release create v1.2.3 \
|
||||
--title "Release v1.2.3" \
|
||||
--notes "..."
|
||||
|
||||
# This triggers docker-release.yml automatically
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Monitoring and Observability
|
||||
|
||||
### Metrics to Track
|
||||
|
||||
#### Release Pipeline
|
||||
- Success rate (target: >99%)
|
||||
- Duration (target: <3 min)
|
||||
- PyPI upload time (target: <60 sec)
|
||||
|
||||
#### Docker Pipeline
|
||||
- Success rate (target: >95%)
|
||||
- Duration (target: <15 min cold, <2 min warm)
|
||||
- Cache hit rate (target: >80% for code changes)
|
||||
|
||||
### Alerting
|
||||
|
||||
**Critical Alerts**:
|
||||
- Release pipeline failure (blocks release)
|
||||
- PyPI authentication failure (expired token)
|
||||
|
||||
**Warning Alerts**:
|
||||
- Docker build >15 min (performance degradation)
|
||||
- Cache hit rate <50% (cache issue)
|
||||
|
||||
### Logging
|
||||
|
||||
**GitHub Actions Logs**:
|
||||
- Retention: 90 days
|
||||
- Downloadable: Yes
|
||||
- Searchable: Limited
|
||||
|
||||
**Recommended External Logging**:
|
||||
```yaml
|
||||
- name: Send logs to external service
|
||||
if: failure()
|
||||
run: |
|
||||
curl -X POST https://logs.example.com/api/v1/logs \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"workflow\": \"${{ github.workflow }}\", \"status\": \"failed\"}"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Improvements
|
||||
|
||||
1. **Automated Changelog Generation**
|
||||
- Use conventional commits
|
||||
- Generate CHANGELOG.md automatically
|
||||
|
||||
2. **Pre-release Testing**
|
||||
- Test builds on `test-v*` tags
|
||||
- Upload to TestPyPI
|
||||
|
||||
3. **Notification System**
|
||||
- Slack/Discord notifications on release
|
||||
- Email on failure
|
||||
|
||||
4. **Performance Optimization**
|
||||
- Parallel Docker builds (amd64 + arm64 simultaneously)
|
||||
- Persistent runners for better caching
|
||||
|
||||
5. **Enhanced Validation**
|
||||
- Smoke tests after PyPI upload
|
||||
- Container security scanning
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- [GitHub Actions Architecture](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions)
|
||||
- [Docker Build Cache](https://docs.docker.com/build/cache/)
|
||||
- [PyPI API Documentation](https://warehouse.pypa.io/api-reference/)
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-21
|
||||
**Version**: 2.0
|
||||
1029
.github/workflows/docs/README.md
vendored
1029
.github/workflows/docs/README.md
vendored
File diff suppressed because it is too large
Load Diff
287
.github/workflows/docs/WORKFLOW_REFERENCE.md
vendored
287
.github/workflows/docs/WORKFLOW_REFERENCE.md
vendored
@@ -1,287 +0,0 @@
|
||||
# Workflow Quick Reference
|
||||
|
||||
## Quick Commands
|
||||
|
||||
### Standard Release
|
||||
```bash
|
||||
# 1. Update version
|
||||
vim crawl4ai/__version__.py # Set to "1.2.3"
|
||||
|
||||
# 2. Commit and tag
|
||||
git add crawl4ai/__version__.py
|
||||
git commit -m "chore: bump version to 1.2.3"
|
||||
git tag v1.2.3
|
||||
git push origin main
|
||||
git push origin v1.2.3
|
||||
|
||||
# 3. Monitor
|
||||
# - PyPI: ~2-3 minutes
|
||||
# - Docker: ~1-15 minutes
|
||||
```
|
||||
|
||||
### Docker Rebuild Only
|
||||
```bash
|
||||
git tag docker-rebuild-v1.2.3
|
||||
git push origin docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
### Delete Tag (Undo Release)
|
||||
```bash
|
||||
# Local
|
||||
git tag -d v1.2.3
|
||||
|
||||
# Remote
|
||||
git push --delete origin v1.2.3
|
||||
|
||||
# GitHub Release
|
||||
gh release delete v1.2.3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Workflow Triggers
|
||||
|
||||
### release.yml
|
||||
| Event | Pattern | Example |
|
||||
|-------|---------|---------|
|
||||
| Tag push | `v*` | `v1.2.3` |
|
||||
| Excludes | `test-v*` | `test-v1.2.3` |
|
||||
|
||||
### docker-release.yml
|
||||
| Event | Pattern | Example |
|
||||
|-------|---------|---------|
|
||||
| Release published | `release.published` | Automatic |
|
||||
| Tag push | `docker-rebuild-v*` | `docker-rebuild-v1.2.3` |
|
||||
|
||||
---
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### release.yml
|
||||
| Variable | Source | Example |
|
||||
|----------|--------|---------|
|
||||
| `VERSION` | Git tag | `1.2.3` |
|
||||
| `TWINE_USERNAME` | Static | `__token__` |
|
||||
| `TWINE_PASSWORD` | Secret | `pypi-Ag...` |
|
||||
| `GITHUB_TOKEN` | Auto | `ghp_...` |
|
||||
|
||||
### docker-release.yml
|
||||
| Variable | Source | Example |
|
||||
|----------|--------|---------|
|
||||
| `VERSION` | Release/Tag | `1.2.3` |
|
||||
| `MAJOR` | Computed | `1` |
|
||||
| `MINOR` | Computed | `1.2` |
|
||||
| `DOCKER_USERNAME` | Secret | `unclecode` |
|
||||
| `DOCKER_TOKEN` | Secret | `dckr_pat_...` |
|
||||
|
||||
---
|
||||
|
||||
## Docker Tags Generated
|
||||
|
||||
| Version | Tags Created |
|
||||
|---------|-------------|
|
||||
| v1.0.0 | `1.0.0`, `1.0`, `1`, `latest` |
|
||||
| v1.1.0 | `1.1.0`, `1.1`, `1`, `latest` |
|
||||
| v1.2.3 | `1.2.3`, `1.2`, `1`, `latest` |
|
||||
| v2.0.0 | `2.0.0`, `2.0`, `2`, `latest` |
|
||||
|
||||
---
|
||||
|
||||
## Workflow Outputs
|
||||
|
||||
### release.yml
|
||||
| Output | Location | Time |
|
||||
|--------|----------|------|
|
||||
| PyPI Package | https://pypi.org/project/crawl4ai/ | ~2-3 min |
|
||||
| GitHub Release | Repository → Releases | ~2-3 min |
|
||||
| Workflow Summary | Actions → Run → Summary | Immediate |
|
||||
|
||||
### docker-release.yml
|
||||
| Output | Location | Time |
|
||||
|--------|----------|------|
|
||||
| Docker Images | https://hub.docker.com/r/unclecode/crawl4ai | ~1-15 min |
|
||||
| Workflow Summary | Actions → Run → Summary | Immediate |
|
||||
|
||||
---
|
||||
|
||||
## Common Issues
|
||||
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Version mismatch | Update `crawl4ai/__version__.py` to match tag |
|
||||
| PyPI 403 Forbidden | Check `PYPI_TOKEN` secret |
|
||||
| PyPI 400 File exists | Version already published, increment version |
|
||||
| Docker auth failed | Regenerate `DOCKER_TOKEN` |
|
||||
| Docker build timeout | Check Dockerfile, review build logs |
|
||||
| Cache not working | First build on branch always cold |
|
||||
|
||||
---
|
||||
|
||||
## Secrets Checklist
|
||||
|
||||
- [ ] `PYPI_TOKEN` - PyPI API token (project or account scope)
|
||||
- [ ] `DOCKER_USERNAME` - Docker Hub username
|
||||
- [ ] `DOCKER_TOKEN` - Docker Hub access token (read/write)
|
||||
- [ ] `GITHUB_TOKEN` - Auto-provided (no action needed)
|
||||
|
||||
---
|
||||
|
||||
## Workflow Dependencies
|
||||
|
||||
### release.yml Dependencies
|
||||
```yaml
|
||||
Python: 3.12
|
||||
Actions:
|
||||
- actions/checkout@v4
|
||||
- actions/setup-python@v5
|
||||
- softprops/action-gh-release@v2
|
||||
PyPI Packages:
|
||||
- build
|
||||
- twine
|
||||
```
|
||||
|
||||
### docker-release.yml Dependencies
|
||||
```yaml
|
||||
Actions:
|
||||
- actions/checkout@v4
|
||||
- docker/setup-buildx-action@v3
|
||||
- docker/login-action@v3
|
||||
- docker/build-push-action@v5
|
||||
Docker:
|
||||
- Buildx
|
||||
- QEMU (for multi-arch)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Cache Information
|
||||
|
||||
### Type
|
||||
- GitHub Actions Cache (`type=gha`)
|
||||
|
||||
### Storage
|
||||
- **Limit**: 10GB per repository
|
||||
- **Retention**: 7 days for unused entries
|
||||
- **Cleanup**: Automatic LRU eviction
|
||||
|
||||
### Performance
|
||||
| Scenario | Cache Hit | Build Time |
|
||||
|----------|-----------|------------|
|
||||
| First build | 0% | 10-15 min |
|
||||
| Code change only | 85% | 1-2 min |
|
||||
| Dependency update | 60% | 3-5 min |
|
||||
| No changes | 100% | 30-60 sec |
|
||||
|
||||
---
|
||||
|
||||
## Build Platforms
|
||||
|
||||
| Platform | Architecture | Devices |
|
||||
|----------|--------------|---------|
|
||||
| linux/amd64 | x86_64 | Intel/AMD servers, AWS EC2, GCP |
|
||||
| linux/arm64 | aarch64 | Apple Silicon, AWS Graviton, Raspberry Pi |
|
||||
|
||||
---
|
||||
|
||||
## Version Validation
|
||||
|
||||
### Pre-Tag Checklist
|
||||
```bash
|
||||
# Check current version
|
||||
python -c "from crawl4ai.__version__ import __version__; print(__version__)"
|
||||
|
||||
# Verify it matches intended tag
|
||||
# If tag is v1.2.3, version should be "1.2.3"
|
||||
```
|
||||
|
||||
### Post-Release Verification
|
||||
```bash
|
||||
# PyPI
|
||||
pip install crawl4ai==1.2.3
|
||||
python -c "import crawl4ai; print(crawl4ai.__version__)"
|
||||
|
||||
# Docker
|
||||
docker pull unclecode/crawl4ai:1.2.3
|
||||
docker run unclecode/crawl4ai:1.2.3 python -c "import crawl4ai; print(crawl4ai.__version__)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Monitoring URLs
|
||||
|
||||
| Service | URL |
|
||||
|---------|-----|
|
||||
| GitHub Actions | `https://github.com/{owner}/{repo}/actions` |
|
||||
| PyPI Project | `https://pypi.org/project/crawl4ai/` |
|
||||
| Docker Hub | `https://hub.docker.com/r/unclecode/crawl4ai` |
|
||||
| GitHub Releases | `https://github.com/{owner}/{repo}/releases` |
|
||||
|
||||
---
|
||||
|
||||
## Rollback Strategy
|
||||
|
||||
### PyPI (Cannot Delete)
|
||||
```bash
|
||||
# Increment patch version
|
||||
git tag v1.2.4
|
||||
git push origin v1.2.4
|
||||
```
|
||||
|
||||
### Docker (Can Overwrite)
|
||||
```bash
|
||||
# Rebuild with fix
|
||||
git tag docker-rebuild-v1.2.3
|
||||
git push origin docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
### GitHub Release
|
||||
```bash
|
||||
# Delete release
|
||||
gh release delete v1.2.3
|
||||
|
||||
# Delete tag
|
||||
git push --delete origin v1.2.3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Status Badge Markdown
|
||||
|
||||
```markdown
|
||||
[](https://github.com/{owner}/{repo}/actions/workflows/release.yml)
|
||||
|
||||
[](https://github.com/{owner}/{repo}/actions/workflows/docker-release.yml)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Timeline Example
|
||||
|
||||
```
|
||||
0:00 - Push tag v1.2.3
|
||||
0:01 - release.yml starts
|
||||
0:02 - Version validation passes
|
||||
0:03 - Package built
|
||||
0:04 - PyPI upload starts
|
||||
0:06 - PyPI upload complete ✓
|
||||
0:07 - GitHub release created ✓
|
||||
0:08 - release.yml complete
|
||||
0:08 - docker-release.yml triggered
|
||||
0:10 - Docker build starts
|
||||
0:12 - amd64 image built (cache hit)
|
||||
0:14 - arm64 image built (cache hit)
|
||||
0:15 - Images pushed to Docker Hub ✓
|
||||
0:16 - docker-release.yml complete
|
||||
|
||||
Total: ~16 minutes
|
||||
Critical path (PyPI + GitHub): ~8 minutes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Contact
|
||||
|
||||
For workflow issues:
|
||||
1. Check Actions tab for logs
|
||||
2. Review this reference
|
||||
3. See [README.md](./README.md) for detailed docs
|
||||
79
.github/workflows/release.yml
vendored
79
.github/workflows/release.yml
vendored
@@ -10,53 +10,53 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # Required for creating releases
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
|
||||
- name: Extract version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Releasing version: $TAG_VERSION"
|
||||
|
||||
|
||||
- name: Install package dependencies
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
|
||||
|
||||
echo "Tag version: $TAG_VERSION"
|
||||
echo "Package version: $PACKAGE_VERSION"
|
||||
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
|
||||
echo "Please update crawl4ai/__version__.py to match the tag version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Version check passed: $TAG_VERSION"
|
||||
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install build twine
|
||||
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
|
||||
- name: Upload to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
@@ -65,7 +65,37 @@ jobs:
|
||||
echo "📦 Uploading to PyPI..."
|
||||
twine upload dist/*
|
||||
echo "✅ Package uploaded to https://pypi.org/project/crawl4ai/"
|
||||
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Extract major and minor versions
|
||||
id: versions
|
||||
run: |
|
||||
VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1)
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2)
|
||||
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
|
||||
unclecode/crawl4ai:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
@@ -73,29 +103,26 @@ jobs:
|
||||
name: Release v${{ steps.get_version.outputs.VERSION }}
|
||||
body: |
|
||||
## 🎉 Crawl4AI v${{ steps.get_version.outputs.VERSION }} Released!
|
||||
|
||||
|
||||
### 📦 Installation
|
||||
|
||||
|
||||
**PyPI:**
|
||||
```bash
|
||||
pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}
|
||||
```
|
||||
|
||||
|
||||
**Docker:**
|
||||
```bash
|
||||
docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
**Note:** Docker images are being built and will be available shortly.
|
||||
Check the [Docker Release workflow](https://github.com/${{ github.repository }}/actions/workflows/docker-release.yml) for build status.
|
||||
|
||||
|
||||
### 📝 What's Changed
|
||||
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details.
|
||||
draft: false
|
||||
prerelease: false
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🚀 Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -105,9 +132,11 @@ jobs:
|
||||
echo "- URL: https://pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Install: \`pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🐳 Docker Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Docker images are being built in a separate workflow." >> $GITHUB_STEP_SUMMARY
|
||||
echo "Check: https://github.com/${{ github.repository }}/actions/workflows/docker-release.yml" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
|
||||
echo "https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
142
.github/workflows/release.yml.backup
vendored
142
.github/workflows/release.yml.backup
vendored
@@ -1,142 +0,0 @@
|
||||
name: Release Pipeline
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- '!test-v*' # Exclude test tags
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # Required for creating releases
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Extract version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Releasing version: $TAG_VERSION"
|
||||
|
||||
- name: Install package dependencies
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
|
||||
echo "Tag version: $TAG_VERSION"
|
||||
echo "Package version: $PACKAGE_VERSION"
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
|
||||
echo "Please update crawl4ai/__version__.py to match the tag version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Version check passed: $TAG_VERSION"
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Upload to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
run: |
|
||||
echo "📦 Uploading to PyPI..."
|
||||
twine upload dist/*
|
||||
echo "✅ Package uploaded to https://pypi.org/project/crawl4ai/"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Extract major and minor versions
|
||||
id: versions
|
||||
run: |
|
||||
VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1)
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2)
|
||||
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
|
||||
unclecode/crawl4ai:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: v${{ steps.get_version.outputs.VERSION }}
|
||||
name: Release v${{ steps.get_version.outputs.VERSION }}
|
||||
body: |
|
||||
## 🎉 Crawl4AI v${{ steps.get_version.outputs.VERSION }} Released!
|
||||
|
||||
### 📦 Installation
|
||||
|
||||
**PyPI:**
|
||||
```bash
|
||||
pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}
|
||||
```
|
||||
|
||||
**Docker:**
|
||||
```bash
|
||||
docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
### 📝 What's Changed
|
||||
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details.
|
||||
draft: false
|
||||
prerelease: false
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🚀 Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 PyPI Package" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- URL: https://pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Install: \`pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🐳 Docker Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
|
||||
echo "https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
28
.gitignore
vendored
28
.gitignore
vendored
@@ -1,12 +1,8 @@
|
||||
# Scripts folder (private tools)
|
||||
.scripts/
|
||||
|
||||
# Database files
|
||||
*.db
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
# Docker automation scripts (personal use)
|
||||
docker-scripts/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
@@ -266,13 +262,9 @@ continue_config.json
|
||||
.llm.env
|
||||
.private/
|
||||
|
||||
.claude/
|
||||
|
||||
CLAUDE_MONITOR.md
|
||||
CLAUDE.md
|
||||
|
||||
.claude/
|
||||
|
||||
tests/**/test_site
|
||||
tests/**/reports
|
||||
tests/**/benchmark_reports
|
||||
@@ -282,16 +274,6 @@ docs/**/data
|
||||
|
||||
docs/apps/linkdin/debug*/
|
||||
docs/apps/linkdin/samples/insights/*
|
||||
|
||||
scripts/
|
||||
|
||||
|
||||
# Databse files
|
||||
*.sqlite3
|
||||
*.sqlite3-journal
|
||||
*.db-journal
|
||||
*.db-wal
|
||||
*.db-shm
|
||||
*.db
|
||||
*.rdb
|
||||
*.ldb
|
||||
.yoyo/
|
||||
.github/instructions/instructions.instructions.md
|
||||
.kilocode/mcp.json
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
FROM python:3.12-slim-bookworm AS build
|
||||
|
||||
# C4ai version
|
||||
ARG C4AI_VER=0.7.7
|
||||
ARG C4AI_VER=0.7.0-r1
|
||||
ENV C4AI_VERSION=$C4AI_VER
|
||||
LABEL c4ai.version=$C4AI_VER
|
||||
|
||||
@@ -124,7 +124,7 @@ COPY . /tmp/project/
|
||||
|
||||
# Copy supervisor config first (might need root later, but okay for now)
|
||||
COPY deploy/docker/supervisord.conf .
|
||||
|
||||
COPY deploy/docker/routers ./routers
|
||||
COPY deploy/docker/requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
@@ -167,11 +167,6 @@ RUN mkdir -p /home/appuser/.cache/ms-playwright \
|
||||
|
||||
RUN crawl4ai-doctor
|
||||
|
||||
# Ensure all cache directories belong to appuser
|
||||
# This fixes permission issues with .cache/url_seeder and other runtime cache dirs
|
||||
RUN mkdir -p /home/appuser/.cache \
|
||||
&& chown -R appuser:appuser /home/appuser/.cache
|
||||
|
||||
# Copy application code
|
||||
COPY deploy/docker/* ${APP_HOME}/
|
||||
|
||||
|
||||
152
README.md
152
README.md
@@ -27,13 +27,11 @@
|
||||
|
||||
Crawl4AI turns the web into clean, LLM ready Markdown for RAG, agents, and data pipelines. Fast, controllable, battle tested by a 50k+ star community.
|
||||
|
||||
[✨ Check out latest update v0.7.7](#-recent-updates)
|
||||
[✨ Check out latest update v0.7.4](#-recent-updates)
|
||||
|
||||
✨ **New in v0.7.7**: Complete Self-Hosting Platform with Real-time Monitoring! Enterprise-grade monitoring dashboard, comprehensive REST API, WebSocket streaming, smart browser pool management, and production-ready observability. Full visibility and control over your crawling infrastructure. [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.7.md)
|
||||
✨ New in v0.7.4: Revolutionary LLM Table Extraction with intelligent chunking, enhanced concurrency fixes, memory management refactor, and critical stability improvements. [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.4.md)
|
||||
|
||||
✨ Recent v0.7.6: Complete Webhook Infrastructure for Docker Job Queue API! Real-time notifications for both `/crawl/job` and `/llm/job` endpoints with exponential backoff retry, custom headers, and flexible delivery modes. No more polling! [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.6.md)
|
||||
|
||||
✨ Previous v0.7.5: Docker Hooks System with function-based API for pipeline customization, Enhanced LLM Integration with custom providers, HTTPS Preservation, and multiple community-reported bug fixes. [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.5.md)
|
||||
✨ Recent v0.7.3: Undetected Browser Support, Multi-URL Configurations, Memory Monitoring, Enhanced Table Extraction, GitHub Sponsors. [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.3.md)
|
||||
|
||||
<details>
|
||||
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||
@@ -179,7 +177,7 @@ No rate-limited APIs. No lock-in. Build and own your data pipeline with direct g
|
||||
- 📸 **Screenshots**: Capture page screenshots during crawling for debugging or analysis.
|
||||
- 📂 **Raw Data Crawling**: Directly process raw HTML (`raw:`) or local files (`file://`).
|
||||
- 🔗 **Comprehensive Link Extraction**: Extracts internal, external links, and embedded iframe content.
|
||||
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior (supports both string and function-based APIs).
|
||||
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior.
|
||||
- 💾 **Caching**: Cache data for improved speed and to avoid redundant fetches.
|
||||
- 📄 **Metadata Extraction**: Retrieve structured metadata from web pages.
|
||||
- 📡 **IFrame Content Extraction**: Seamless extraction from embedded iframe content.
|
||||
@@ -296,7 +294,6 @@ pip install -e ".[all]" # Install all optional features
|
||||
### New Docker Features
|
||||
|
||||
The new Docker implementation includes:
|
||||
- **Real-time Monitoring Dashboard** with live system metrics and browser pool visibility
|
||||
- **Browser pooling** with page pre-warming for faster response times
|
||||
- **Interactive playground** to test and generate request code
|
||||
- **MCP integration** for direct connection to AI tools like Claude Code
|
||||
@@ -311,8 +308,7 @@ The new Docker implementation includes:
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:latest
|
||||
|
||||
# Visit the monitoring dashboard at http://localhost:11235/dashboard
|
||||
# Or the playground at http://localhost:11235/playground
|
||||
# Visit the playground at http://localhost:11235/playground
|
||||
```
|
||||
|
||||
### Quick Test
|
||||
@@ -341,7 +337,7 @@ else:
|
||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||
```
|
||||
|
||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, monitoring features, and production deployment, see our [Self-Hosting Guide](https://docs.crawl4ai.com/core/self-hosting/).
|
||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
@@ -546,111 +542,8 @@ async def test_news_crawl():
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
> **💡 Tip:** Some websites may use **CAPTCHA** based verification mechanisms to prevent automated access. If your workflow encounters such challenges, you may optionally integrate a third-party CAPTCHA-handling service such as <strong>[CapSolver](https://www.capsolver.com/blog/Partners/crawl4ai-capsolver/?utm_source=crawl4ai&utm_medium=github_pr&utm_campaign=crawl4ai_integration)</strong>. They support reCAPTCHA v2/v3, Cloudflare Turnstile, Challenge, AWS WAF, and more. Please ensure that your usage complies with the target website’s terms of service and applicable laws.
|
||||
|
||||
## ✨ Recent Updates
|
||||
|
||||
<details>
|
||||
<summary><strong>Version 0.7.7 Release Highlights - The Self-Hosting & Monitoring Update</strong></summary>
|
||||
|
||||
- **📊 Real-time Monitoring Dashboard**: Interactive web UI with live system metrics and browser pool visibility
|
||||
```python
|
||||
# Access the monitoring dashboard
|
||||
# Visit: http://localhost:11235/dashboard
|
||||
|
||||
# Real-time metrics include:
|
||||
# - System health (CPU, memory, network, uptime)
|
||||
# - Active and completed request tracking
|
||||
# - Browser pool management (permanent/hot/cold)
|
||||
# - Janitor cleanup events
|
||||
# - Error monitoring with full context
|
||||
```
|
||||
|
||||
- **🔌 Comprehensive Monitor API**: Complete REST API for programmatic access to all monitoring data
|
||||
```python
|
||||
import httpx
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
# System health
|
||||
health = await client.get("http://localhost:11235/monitor/health")
|
||||
|
||||
# Request tracking
|
||||
requests = await client.get("http://localhost:11235/monitor/requests")
|
||||
|
||||
# Browser pool status
|
||||
browsers = await client.get("http://localhost:11235/monitor/browsers")
|
||||
|
||||
# Endpoint statistics
|
||||
stats = await client.get("http://localhost:11235/monitor/endpoints/stats")
|
||||
```
|
||||
|
||||
- **⚡ WebSocket Streaming**: Real-time updates every 2 seconds for custom dashboards
|
||||
- **🔥 Smart Browser Pool**: 3-tier architecture (permanent/hot/cold) with automatic promotion and cleanup
|
||||
- **🧹 Janitor System**: Automatic resource management with event logging
|
||||
- **🎮 Control Actions**: Manual browser management (kill, restart, cleanup) via API
|
||||
- **📈 Production Metrics**: 6 critical metrics for operational excellence with Prometheus integration
|
||||
- **🐛 Critical Bug Fixes**:
|
||||
- Fixed async LLM extraction blocking issue (#1055)
|
||||
- Enhanced DFS deep crawl strategy (#1607)
|
||||
- Fixed sitemap parsing in AsyncUrlSeeder (#1598)
|
||||
- Resolved browser viewport configuration (#1495)
|
||||
- Fixed CDP timing with exponential backoff (#1528)
|
||||
- Security update for pyOpenSSL (>=25.3.0)
|
||||
|
||||
[Full v0.7.7 Release Notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.7.md)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Version 0.7.5 Release Highlights - The Docker Hooks & Security Update</strong></summary>
|
||||
|
||||
- **🔧 Docker Hooks System**: Complete pipeline customization with user-provided Python functions at 8 key points
|
||||
- **✨ Function-Based Hooks API (NEW)**: Write hooks as regular Python functions with full IDE support:
|
||||
```python
|
||||
from crawl4ai import hooks_to_string
|
||||
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||
|
||||
# Define hooks as regular Python functions
|
||||
async def on_page_context_created(page, context, **kwargs):
|
||||
"""Block images to speed up crawling"""
|
||||
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
|
||||
await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
return page
|
||||
|
||||
async def before_goto(page, context, url, **kwargs):
|
||||
"""Add custom headers"""
|
||||
await page.set_extra_http_headers({'X-Crawl4AI': 'v0.7.5'})
|
||||
return page
|
||||
|
||||
# Option 1: Use hooks_to_string() utility for REST API
|
||||
hooks_code = hooks_to_string({
|
||||
"on_page_context_created": on_page_context_created,
|
||||
"before_goto": before_goto
|
||||
})
|
||||
|
||||
# Option 2: Docker client with automatic conversion (Recommended)
|
||||
client = Crawl4aiDockerClient(base_url="http://localhost:11235")
|
||||
results = await client.crawl(
|
||||
urls=["https://httpbin.org/html"],
|
||||
hooks={
|
||||
"on_page_context_created": on_page_context_created,
|
||||
"before_goto": before_goto
|
||||
}
|
||||
)
|
||||
# ✓ Full IDE support, type checking, and reusability!
|
||||
```
|
||||
|
||||
- **🤖 Enhanced LLM Integration**: Custom providers with temperature control and base_url configuration
|
||||
- **🔒 HTTPS Preservation**: Secure internal link handling with `preserve_https_for_internal_links=True`
|
||||
- **🐍 Python 3.10+ Support**: Modern language features and enhanced performance
|
||||
- **🛠️ Bug Fixes**: Resolved multiple community-reported issues including URL processing, JWT authentication, and proxy configuration
|
||||
|
||||
[Full v0.7.5 Release Notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.5.md)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Version 0.7.4 Release Highlights - The Intelligent Table Extraction & Performance Update</strong></summary>
|
||||
|
||||
@@ -1026,39 +919,6 @@ We envision a future where AI is powered by real human knowledge, ensuring data
|
||||
For more details, see our [full mission statement](./MISSION.md).
|
||||
</details>
|
||||
|
||||
## 🌟 Current Sponsors
|
||||
|
||||
### 🏢 Enterprise Sponsors & Partners
|
||||
|
||||
Our enterprise sponsors and technology partners help scale Crawl4AI to power production-grade data pipelines.
|
||||
|
||||
| Company | About | Sponsorship Tier |
|
||||
|------|------|----------------------------|
|
||||
| <a href="https://app.scrapeless.com/passport/register?utm_source=official&utm_term=crawl4ai" target="_blank"><picture><source width="250" media="(prefers-color-scheme: dark)" srcset="https://gist.githubusercontent.com/aravindkarnam/0d275b942705604263e5c32d2db27bc1/raw/Scrapeless-light-logo.svg"><source width="250" media="(prefers-color-scheme: light)" srcset="https://gist.githubusercontent.com/aravindkarnam/22d0525cc0f3021bf19ebf6e11a69ccd/raw/Scrapeless-dark-logo.svg"><img alt="Scrapeless" src="https://gist.githubusercontent.com/aravindkarnam/22d0525cc0f3021bf19ebf6e11a69ccd/raw/Scrapeless-dark-logo.svg"></picture></a> | Scrapeless is the best full-stack web scraping toolkit offering Scraping API, Scraping Browser, Web Unlocker, Captcha Solver, and Proxies, designed to handle all your data collection needs. | 🥈 Silver |
|
||||
| <a href="https://dashboard.capsolver.com/passport/register?inviteCode=ESVSECTX5Q23" target="_blank"><picture><source width="120" media="(prefers-color-scheme: dark)" srcset="https://docs.crawl4ai.com/uploads/sponsors/20251013045338_72a71fa4ee4d2f40.png"><source width="120" media="(prefers-color-scheme: light)" srcset="https://www.capsolver.com/assets/images/logo-text.png"><img alt="Capsolver" src="https://www.capsolver.com/assets/images/logo-text.png"></picture></a> | AI-powered Captcha solving service. Supports all major Captcha types, including reCAPTCHA, Cloudflare, and more | 🥉 Bronze |
|
||||
| <a href="https://kipo.ai" target="_blank"><img src="https://docs.crawl4ai.com/uploads/sponsors/20251013045751_2d54f57f117c651e.png" alt="DataSync" width="120"/></a> | Helps engineers and buyers find, compare, and source electronic & industrial parts in seconds, with specs, pricing, lead times & alternatives.| 🥇 Gold |
|
||||
| <a href="https://www.kidocode.com/" target="_blank"><img src="https://docs.crawl4ai.com/uploads/sponsors/20251013045045_bb8dace3f0440d65.svg" alt="Kidocode" width="120"/><p align="center">KidoCode</p></a> | Kidocode is a hybrid technology and entrepreneurship school for kids aged 5–18, offering both online and on-campus education. | 🥇 Gold |
|
||||
| <a href="https://www.alephnull.sg/" target="_blank"><img src="https://docs.crawl4ai.com/uploads/sponsors/20251013050323_a9e8e8c4c3650421.svg" alt="Aleph null" width="120"/></a> | Singapore-based Aleph Null is Asia’s leading edtech hub, dedicated to student-centric, AI-driven education—empowering learners with the tools to thrive in a fast-changing world. | 🥇 Gold |
|
||||
|
||||
|
||||
|
||||
### 🧑🤝 Individual Sponsors
|
||||
|
||||
A heartfelt thanks to our individual supporters! Every contribution helps us keep our opensource mission alive and thriving!
|
||||
|
||||
<p align="left">
|
||||
<a href="https://github.com/hafezparast"><img src="https://avatars.githubusercontent.com/u/14273305?s=60&v=4" style="border-radius:50%;" width="64px;"/></a>
|
||||
<a href="https://github.com/ntohidi"><img src="https://avatars.githubusercontent.com/u/17140097?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/Sjoeborg"><img src="https://avatars.githubusercontent.com/u/17451310?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/romek-rozen"><img src="https://avatars.githubusercontent.com/u/30595969?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/Kourosh-Kiyani"><img src="https://avatars.githubusercontent.com/u/34105600?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/Etherdrake"><img src="https://avatars.githubusercontent.com/u/67021215?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/shaman247"><img src="https://avatars.githubusercontent.com/u/211010067?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/work-flow-manager"><img src="https://avatars.githubusercontent.com/u/217665461?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
</p>
|
||||
|
||||
> Want to join them? [Sponsor Crawl4AI →](https://github.com/sponsors/unclecode)
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#unclecode/crawl4ai&Date)
|
||||
|
||||
@@ -25,7 +25,8 @@ from .extraction_strategy import (
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy,
|
||||
JsonLxmlExtractionStrategy,
|
||||
RegexExtractionStrategy
|
||||
RegexExtractionStrategy,
|
||||
NoExtractionStrategy, # NEW: Import NoExtractionStrategy
|
||||
)
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
@@ -103,8 +104,7 @@ from .browser_adapter import (
|
||||
|
||||
from .utils import (
|
||||
start_colab_display_server,
|
||||
setup_colab_environment,
|
||||
hooks_to_string
|
||||
setup_colab_environment
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -114,6 +114,7 @@ __all__ = [
|
||||
"BrowserProfiler",
|
||||
"LLMConfig",
|
||||
"GeolocationConfig",
|
||||
"NoExtractionStrategy",
|
||||
# NEW: Add SeedingConfig and VirtualScrollConfig
|
||||
"SeedingConfig",
|
||||
"VirtualScrollConfig",
|
||||
@@ -184,7 +185,6 @@ __all__ = [
|
||||
"ProxyConfig",
|
||||
"start_colab_display_server",
|
||||
"setup_colab_environment",
|
||||
"hooks_to_string",
|
||||
# C4A Script additions
|
||||
"c4a_compile",
|
||||
"c4a_validate",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# crawl4ai/__version__.py
|
||||
|
||||
# This is the version that will be used for stable releases
|
||||
__version__ = "0.7.7"
|
||||
__version__ = "0.7.4"
|
||||
|
||||
# For nightly builds, this gets set during build process
|
||||
__nightly_version__ = None
|
||||
|
||||
@@ -728,18 +728,18 @@ class EmbeddingStrategy(CrawlStrategy):
|
||||
provider = llm_config_dict.get('provider', 'openai/gpt-4o-mini') if llm_config_dict else 'openai/gpt-4o-mini'
|
||||
api_token = llm_config_dict.get('api_token') if llm_config_dict else None
|
||||
|
||||
response = perform_completion_with_backoff(
|
||||
provider=provider,
|
||||
prompt_with_variables=prompt,
|
||||
api_token=api_token,
|
||||
json_response=True
|
||||
)
|
||||
# response = perform_completion_with_backoff(
|
||||
# provider=provider,
|
||||
# prompt_with_variables=prompt,
|
||||
# api_token=api_token,
|
||||
# json_response=True
|
||||
# )
|
||||
|
||||
variations = json.loads(response.choices[0].message.content)
|
||||
# variations = json.loads(response.choices[0].message.content)
|
||||
|
||||
|
||||
# # Mock data with more variations for split
|
||||
# variations ={'queries': ['what are the best vegetables to use in fried rice?', 'how do I make vegetable fried rice from scratch?', 'can you provide a quick recipe for vegetable fried rice?', 'what cooking techniques are essential for perfect fried rice with vegetables?', 'how to add flavor to vegetable fried rice?', 'are there any tips for making healthy fried rice with vegetables?']}
|
||||
variations ={'queries': ['what are the best vegetables to use in fried rice?', 'how do I make vegetable fried rice from scratch?', 'can you provide a quick recipe for vegetable fried rice?', 'what cooking techniques are essential for perfect fried rice with vegetables?', 'how to add flavor to vegetable fried rice?', 'are there any tips for making healthy fried rice with vegetables?']}
|
||||
|
||||
|
||||
# variations = {'queries': [
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import os
|
||||
from typing import Union
|
||||
import warnings
|
||||
import requests
|
||||
from .config import (
|
||||
DEFAULT_PROVIDER,
|
||||
DEFAULT_PROVIDER_API_KEY,
|
||||
@@ -650,85 +649,6 @@ class BrowserConfig:
|
||||
return config
|
||||
return BrowserConfig.from_kwargs(config)
|
||||
|
||||
def set_nstproxy(
|
||||
self,
|
||||
token: str,
|
||||
channel_id: str,
|
||||
country: str = "ANY",
|
||||
state: str = "",
|
||||
city: str = "",
|
||||
protocol: str = "http",
|
||||
session_duration: int = 10,
|
||||
):
|
||||
"""
|
||||
Fetch a proxy from NSTProxy API and automatically assign it to proxy_config.
|
||||
|
||||
Get your NSTProxy token from: https://app.nstproxy.com/profile
|
||||
|
||||
Args:
|
||||
token (str): NSTProxy API token.
|
||||
channel_id (str): NSTProxy channel ID.
|
||||
country (str, optional): Country code (default: "ANY").
|
||||
state (str, optional): State code (default: "").
|
||||
city (str, optional): City name (default: "").
|
||||
protocol (str, optional): Proxy protocol ("http" or "socks5"). Defaults to "http".
|
||||
session_duration (int, optional): Session duration in minutes (0 = rotate each request). Defaults to 10.
|
||||
|
||||
Raises:
|
||||
ValueError: If the API response format is invalid.
|
||||
PermissionError: If the API returns an error message.
|
||||
"""
|
||||
|
||||
# --- Validate input early ---
|
||||
if not token or not channel_id:
|
||||
raise ValueError("[NSTProxy] token and channel_id are required")
|
||||
|
||||
if protocol not in ("http", "socks5"):
|
||||
raise ValueError(f"[NSTProxy] Invalid protocol: {protocol}")
|
||||
|
||||
# --- Build NSTProxy API URL ---
|
||||
params = {
|
||||
"fType": 2,
|
||||
"count": 1,
|
||||
"channelId": channel_id,
|
||||
"country": country,
|
||||
"protocol": protocol,
|
||||
"sessionDuration": session_duration,
|
||||
"token": token,
|
||||
}
|
||||
if state:
|
||||
params["state"] = state
|
||||
if city:
|
||||
params["city"] = city
|
||||
|
||||
url = "https://api.nstproxy.com/api/v1/generate/apiproxies"
|
||||
|
||||
try:
|
||||
response = requests.get(url, params=params, timeout=10)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
|
||||
# --- Handle API error response ---
|
||||
if isinstance(data, dict) and data.get("err"):
|
||||
raise PermissionError(f"[NSTProxy] API Error: {data.get('msg', 'Unknown error')}")
|
||||
|
||||
if not isinstance(data, list) or not data:
|
||||
raise ValueError("[NSTProxy] Invalid API response — expected a non-empty list")
|
||||
|
||||
proxy_info = data[0]
|
||||
|
||||
# --- Apply proxy config ---
|
||||
self.proxy_config = ProxyConfig(
|
||||
server=f"{protocol}://{proxy_info['ip']}:{proxy_info['port']}",
|
||||
username=proxy_info["username"],
|
||||
password=proxy_info["password"],
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"[NSTProxy] ❌ Failed to set proxy: {e}")
|
||||
raise
|
||||
|
||||
class VirtualScrollConfig:
|
||||
"""Configuration for virtual scroll handling.
|
||||
|
||||
|
||||
@@ -1383,10 +1383,9 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
try:
|
||||
await self.adapter.evaluate(page,
|
||||
f"""
|
||||
(async () => {{
|
||||
(() => {{
|
||||
try {{
|
||||
const removeOverlays = {remove_overlays_js};
|
||||
await removeOverlays();
|
||||
{remove_overlays_js}
|
||||
return {{ success: true }};
|
||||
}} catch (error) {{
|
||||
return {{
|
||||
|
||||
@@ -455,6 +455,8 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
if self.monitor:
|
||||
@@ -465,7 +467,6 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
return results
|
||||
|
||||
async def _update_queue_priorities(self):
|
||||
"""Periodically update priorities of items in the queue to prevent starvation"""
|
||||
|
||||
@@ -845,15 +845,6 @@ class AsyncUrlSeeder:
|
||||
return
|
||||
|
||||
data = gzip.decompress(r.content) if url.endswith(".gz") else r.content
|
||||
base_url = str(r.url)
|
||||
|
||||
def _normalize_loc(raw: Optional[str]) -> Optional[str]:
|
||||
if not raw:
|
||||
return None
|
||||
normalized = urljoin(base_url, raw.strip())
|
||||
if not normalized:
|
||||
return None
|
||||
return normalized
|
||||
|
||||
# Detect if this is a sitemap index by checking for <sitemapindex> or presence of <sitemap> elements
|
||||
is_sitemap_index = False
|
||||
@@ -866,42 +857,25 @@ class AsyncUrlSeeder:
|
||||
# Use XML parser for sitemaps, not HTML parser
|
||||
parser = etree.XMLParser(recover=True)
|
||||
root = etree.fromstring(data, parser=parser)
|
||||
# Namespace-agnostic lookups using local-name() so we honor custom or missing namespaces
|
||||
sitemap_loc_nodes = root.xpath("//*[local-name()='sitemap']/*[local-name()='loc']")
|
||||
url_loc_nodes = root.xpath("//*[local-name()='url']/*[local-name()='loc']")
|
||||
|
||||
self._log(
|
||||
"debug",
|
||||
"Parsed sitemap {url}: {sitemap_count} sitemap entries, {url_count} url entries discovered",
|
||||
params={
|
||||
"url": url,
|
||||
"sitemap_count": len(sitemap_loc_nodes),
|
||||
"url_count": len(url_loc_nodes),
|
||||
},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
# Define namespace for sitemap
|
||||
ns = {'s': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
|
||||
|
||||
# Check for sitemap index entries
|
||||
if sitemap_loc_nodes:
|
||||
sitemap_locs = root.xpath('//s:sitemap/s:loc', namespaces=ns)
|
||||
if sitemap_locs:
|
||||
is_sitemap_index = True
|
||||
for sitemap_elem in sitemap_loc_nodes:
|
||||
loc = _normalize_loc(sitemap_elem.text)
|
||||
for sitemap_elem in sitemap_locs:
|
||||
loc = sitemap_elem.text.strip() if sitemap_elem.text else ""
|
||||
if loc:
|
||||
sub_sitemaps.append(loc)
|
||||
|
||||
# If not a sitemap index, get regular URLs
|
||||
if not is_sitemap_index:
|
||||
for loc_elem in url_loc_nodes:
|
||||
loc = _normalize_loc(loc_elem.text)
|
||||
for loc_elem in root.xpath('//s:url/s:loc', namespaces=ns):
|
||||
loc = loc_elem.text.strip() if loc_elem.text else ""
|
||||
if loc:
|
||||
regular_urls.append(loc)
|
||||
if not regular_urls:
|
||||
self._log(
|
||||
"warning",
|
||||
"No <loc> entries found inside <url> tags for sitemap {url}. The sitemap might be empty or use an unexpected structure.",
|
||||
params={"url": url},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
except Exception as e:
|
||||
self._log("error", "LXML parsing error for sitemap {url}: {error}",
|
||||
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
||||
@@ -918,39 +892,19 @@ class AsyncUrlSeeder:
|
||||
|
||||
# Check for sitemap index entries
|
||||
sitemaps = root.findall('.//sitemap')
|
||||
url_entries = root.findall('.//url')
|
||||
self._log(
|
||||
"debug",
|
||||
"ElementTree parsed sitemap {url}: {sitemap_count} sitemap entries, {url_count} url entries discovered",
|
||||
params={
|
||||
"url": url,
|
||||
"sitemap_count": len(sitemaps),
|
||||
"url_count": len(url_entries),
|
||||
},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
if sitemaps:
|
||||
is_sitemap_index = True
|
||||
for sitemap in sitemaps:
|
||||
loc_elem = sitemap.find('loc')
|
||||
loc = _normalize_loc(loc_elem.text if loc_elem is not None else None)
|
||||
if loc:
|
||||
sub_sitemaps.append(loc)
|
||||
if loc_elem is not None and loc_elem.text:
|
||||
sub_sitemaps.append(loc_elem.text.strip())
|
||||
|
||||
# If not a sitemap index, get regular URLs
|
||||
if not is_sitemap_index:
|
||||
for url_elem in url_entries:
|
||||
for url_elem in root.findall('.//url'):
|
||||
loc_elem = url_elem.find('loc')
|
||||
loc = _normalize_loc(loc_elem.text if loc_elem is not None else None)
|
||||
if loc:
|
||||
regular_urls.append(loc)
|
||||
if not regular_urls:
|
||||
self._log(
|
||||
"warning",
|
||||
"No <loc> entries found inside <url> tags for sitemap {url}. The sitemap might be empty or use an unexpected structure.",
|
||||
params={"url": url},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
if loc_elem is not None and loc_elem.text:
|
||||
regular_urls.append(loc_elem.text.strip())
|
||||
except Exception as e:
|
||||
self._log("error", "ElementTree parsing error for sitemap {url}: {error}",
|
||||
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
||||
|
||||
@@ -617,17 +617,7 @@ class AsyncWebCrawler:
|
||||
else config.chunking_strategy
|
||||
)
|
||||
sections = chunking.chunk(content)
|
||||
# extracted_content = config.extraction_strategy.run(_url, sections)
|
||||
|
||||
# Use async version if available for better parallelism
|
||||
if hasattr(config.extraction_strategy, 'arun'):
|
||||
extracted_content = await config.extraction_strategy.arun(_url, sections)
|
||||
else:
|
||||
# Fallback to sync version run in thread pool to avoid blocking
|
||||
extracted_content = await asyncio.to_thread(
|
||||
config.extraction_strategy.run, url, sections
|
||||
)
|
||||
|
||||
extracted_content = config.extraction_strategy.run(url, sections)
|
||||
extracted_content = json.dumps(
|
||||
extracted_content, indent=4, default=str, ensure_ascii=False
|
||||
)
|
||||
|
||||
@@ -369,9 +369,6 @@ class ManagedBrowser:
|
||||
]
|
||||
if self.headless:
|
||||
flags.append("--headless=new")
|
||||
# Add viewport flag if specified in config
|
||||
if self.browser_config.viewport_height and self.browser_config.viewport_width:
|
||||
flags.append(f"--window-size={self.browser_config.viewport_width},{self.browser_config.viewport_height}")
|
||||
# merge common launch flags
|
||||
flags.extend(self.build_browser_flags(self.browser_config))
|
||||
elif self.browser_type == "firefox":
|
||||
@@ -661,11 +658,6 @@ class BrowserManager:
|
||||
if self.config.cdp_url or self.config.use_managed_browser:
|
||||
self.config.use_managed_browser = True
|
||||
cdp_url = await self.managed_browser.start() if not self.config.cdp_url else self.config.cdp_url
|
||||
|
||||
# Add CDP endpoint verification before connecting
|
||||
if not await self._verify_cdp_ready(cdp_url):
|
||||
raise Exception(f"CDP endpoint at {cdp_url} is not ready after startup")
|
||||
|
||||
self.browser = await self.playwright.chromium.connect_over_cdp(cdp_url)
|
||||
contexts = self.browser.contexts
|
||||
if contexts:
|
||||
@@ -686,24 +678,6 @@ class BrowserManager:
|
||||
|
||||
self.default_context = self.browser
|
||||
|
||||
async def _verify_cdp_ready(self, cdp_url: str) -> bool:
|
||||
"""Verify CDP endpoint is ready with exponential backoff"""
|
||||
import aiohttp
|
||||
self.logger.debug(f"Starting CDP verification for {cdp_url}", tag="BROWSER")
|
||||
for attempt in range(5):
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(f"{cdp_url}/json/version", timeout=aiohttp.ClientTimeout(total=2)) as response:
|
||||
if response.status == 200:
|
||||
self.logger.debug(f"CDP endpoint ready after {attempt + 1} attempts", tag="BROWSER")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.debug(f"CDP check attempt {attempt + 1} failed: {e}", tag="BROWSER")
|
||||
delay = 0.5 * (1.4 ** attempt)
|
||||
self.logger.debug(f"Waiting {delay:.2f}s before next CDP check...", tag="BROWSER")
|
||||
await asyncio.sleep(delay)
|
||||
self.logger.debug(f"CDP verification failed after 5 attempts", tag="BROWSER")
|
||||
return False
|
||||
|
||||
def _build_browser_args(self) -> dict:
|
||||
"""Build browser launch arguments from config."""
|
||||
|
||||
@@ -542,19 +542,6 @@ class LXMLWebScrapingStrategy(ContentScrapingStrategy):
|
||||
if el.tag in bypass_tags:
|
||||
continue
|
||||
|
||||
# Skip elements inside <pre> or <code> tags where whitespace is significant
|
||||
# This preserves whitespace-only spans (e.g., <span class="w"> </span>) in code blocks
|
||||
is_in_code_block = False
|
||||
ancestor = el.getparent()
|
||||
while ancestor is not None:
|
||||
if ancestor.tag in ("pre", "code"):
|
||||
is_in_code_block = True
|
||||
break
|
||||
ancestor = ancestor.getparent()
|
||||
|
||||
if is_in_code_block:
|
||||
continue
|
||||
|
||||
text_content = (el.text_content() or "").strip()
|
||||
if (
|
||||
len(text_content.split()) < word_count_threshold
|
||||
|
||||
@@ -4,26 +4,14 @@ from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
from ..models import CrawlResult
|
||||
from .bfs_strategy import BFSDeepCrawlStrategy # noqa
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig
|
||||
from ..utils import normalize_url_for_deep_crawl
|
||||
|
||||
class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
"""
|
||||
Depth-first deep crawling with familiar BFS rules.
|
||||
Depth-First Search (DFS) deep crawling strategy.
|
||||
|
||||
We reuse the same filters, scoring, and page limits from :class:`BFSDeepCrawlStrategy`,
|
||||
but walk the graph with a stack so we fully explore one branch before hopping to the
|
||||
next. DFS also keeps its own ``_dfs_seen`` set so we can drop duplicate links at
|
||||
discovery time without accidentally marking them as “already crawled”.
|
||||
Inherits URL validation and link discovery from BFSDeepCrawlStrategy.
|
||||
Overrides _arun_batch and _arun_stream to use a stack (LIFO) for DFS traversal.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._dfs_seen: Set[str] = set()
|
||||
|
||||
def _reset_seen(self, start_url: str) -> None:
|
||||
"""Start each crawl with a clean dedupe set seeded with the root URL."""
|
||||
self._dfs_seen = {start_url}
|
||||
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
@@ -31,19 +19,14 @@ class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Crawl level-by-level but emit results at the end.
|
||||
|
||||
We keep a stack of ``(url, parent, depth)`` tuples, pop one at a time, and
|
||||
hand it to ``crawler.arun_many`` with deep crawling disabled so we remain
|
||||
in control of traversal. Every successful page bumps ``_pages_crawled`` and
|
||||
seeds new stack items discovered via :meth:`link_discovery`.
|
||||
Batch (non-streaming) DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order, aggregating CrawlResults into a list.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
# Stack items: (url, parent_url, depth)
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
results: List[CrawlResult] = []
|
||||
self._reset_seen(start_url)
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
@@ -88,16 +71,12 @@ class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Same traversal as :meth:`_arun_batch`, but yield pages immediately.
|
||||
|
||||
Each popped URL is crawled, its metadata annotated, then the result gets
|
||||
yielded before we even look at the next stack entry. Successful crawls
|
||||
still feed :meth:`link_discovery`, keeping DFS order intact.
|
||||
Streaming DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order and yields CrawlResults as they become available.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
self._reset_seen(start_url)
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
@@ -129,92 +108,3 @@ class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
for new_url, new_parent in reversed(new_links):
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
stack.append((new_url, new_parent, new_depth))
|
||||
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
_visited: Set[str],
|
||||
next_level: List[Tuple[str, Optional[str]]],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Find the next URLs we should push onto the DFS stack.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
result : CrawlResult
|
||||
Output of the page we just crawled; its ``links`` block is our raw material.
|
||||
source_url : str
|
||||
URL of the parent page; stored so callers can track ancestry.
|
||||
current_depth : int
|
||||
Depth of the parent; children naturally sit at ``current_depth + 1``.
|
||||
_visited : Set[str]
|
||||
Present to match the BFS signature, but we rely on ``_dfs_seen`` instead.
|
||||
next_level : list of tuples
|
||||
The stack buffer supplied by the caller; we append new ``(url, parent)`` items here.
|
||||
depths : dict
|
||||
Shared depth map so future metadata tagging knows how deep each URL lives.
|
||||
|
||||
Notes
|
||||
-----
|
||||
- ``_dfs_seen`` keeps us from pushing duplicates without touching the traversal guard.
|
||||
- Validation, scoring, and capacity trimming mirror the BFS version so behaviour stays consistent.
|
||||
"""
|
||||
next_depth = current_depth + 1
|
||||
if next_depth > self.max_depth:
|
||||
return
|
||||
|
||||
remaining_capacity = self.max_pages - self._pages_crawled
|
||||
if remaining_capacity <= 0:
|
||||
self.logger.info(
|
||||
f"Max pages limit ({self.max_pages}) reached, stopping link discovery"
|
||||
)
|
||||
return
|
||||
|
||||
links = result.links.get("internal", [])
|
||||
if self.include_external:
|
||||
links += result.links.get("external", [])
|
||||
|
||||
seen = self._dfs_seen
|
||||
valid_links: List[Tuple[str, float]] = []
|
||||
|
||||
for link in links:
|
||||
raw_url = link.get("href")
|
||||
if not raw_url:
|
||||
continue
|
||||
|
||||
normalized_url = normalize_url_for_deep_crawl(raw_url, source_url)
|
||||
if not normalized_url or normalized_url in seen:
|
||||
continue
|
||||
|
||||
if not await self.can_process_url(raw_url, next_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
score = self.url_scorer.score(normalized_url) if self.url_scorer else 0
|
||||
if score < self.score_threshold:
|
||||
self.logger.debug(
|
||||
f"URL {normalized_url} skipped: score {score} below threshold {self.score_threshold}"
|
||||
)
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
seen.add(normalized_url)
|
||||
valid_links.append((normalized_url, score))
|
||||
|
||||
if len(valid_links) > remaining_capacity:
|
||||
if self.url_scorer:
|
||||
valid_links.sort(key=lambda x: x[1], reverse=True)
|
||||
valid_links = valid_links[:remaining_capacity]
|
||||
self.logger.info(
|
||||
f"Limiting to {remaining_capacity} URLs due to max_pages limit"
|
||||
)
|
||||
|
||||
for url, score in valid_links:
|
||||
if score:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["score"] = score
|
||||
next_level.append((url, source_url))
|
||||
depths[url] = next_depth
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List, Optional, Union, AsyncGenerator, Dict, Any, Callable
|
||||
from typing import List, Optional, Union, AsyncGenerator, Dict, Any
|
||||
import httpx
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
@@ -7,7 +7,6 @@ import asyncio
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .models import CrawlResult
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from .utils import hooks_to_string
|
||||
|
||||
|
||||
class Crawl4aiClientError(Exception):
|
||||
@@ -71,41 +70,17 @@ class Crawl4aiDockerClient:
|
||||
self.logger.error(f"Server unreachable: {str(e)}", tag="ERROR")
|
||||
raise ConnectionError(f"Cannot connect to server: {str(e)}")
|
||||
|
||||
def _prepare_request(
|
||||
self,
|
||||
urls: List[str],
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None,
|
||||
hooks: Optional[Union[Dict[str, Callable], Dict[str, str]]] = None,
|
||||
hooks_timeout: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
def _prepare_request(self, urls: List[str], browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None) -> Dict[str, Any]:
|
||||
"""Prepare request data from configs."""
|
||||
if self._token:
|
||||
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
|
||||
|
||||
request_data = {
|
||||
return {
|
||||
"urls": urls,
|
||||
"browser_config": browser_config.dump() if browser_config else {},
|
||||
"crawler_config": crawler_config.dump() if crawler_config else {}
|
||||
}
|
||||
|
||||
# Handle hooks if provided
|
||||
if hooks:
|
||||
# Check if hooks are already strings or need conversion
|
||||
if any(callable(v) for v in hooks.values()):
|
||||
# Convert function objects to strings
|
||||
hooks_code = hooks_to_string(hooks)
|
||||
else:
|
||||
# Already in string format
|
||||
hooks_code = hooks
|
||||
|
||||
request_data["hooks"] = {
|
||||
"code": hooks_code,
|
||||
"timeout": hooks_timeout
|
||||
}
|
||||
|
||||
return request_data
|
||||
|
||||
async def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
|
||||
"""Make an HTTP request with error handling."""
|
||||
url = urljoin(self.base_url, endpoint)
|
||||
@@ -127,42 +102,16 @@ class Crawl4aiDockerClient:
|
||||
self,
|
||||
urls: List[str],
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None,
|
||||
hooks: Optional[Union[Dict[str, Callable], Dict[str, str]]] = None,
|
||||
hooks_timeout: int = 30
|
||||
crawler_config: Optional[CrawlerRunConfig] = None
|
||||
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
|
||||
"""
|
||||
Execute a crawl operation.
|
||||
|
||||
Args:
|
||||
urls: List of URLs to crawl
|
||||
browser_config: Browser configuration
|
||||
crawler_config: Crawler configuration
|
||||
hooks: Optional hooks - can be either:
|
||||
- Dict[str, Callable]: Function objects that will be converted to strings
|
||||
- Dict[str, str]: Already stringified hook code
|
||||
hooks_timeout: Timeout in seconds for each hook execution (1-120)
|
||||
|
||||
Returns:
|
||||
Single CrawlResult, list of results, or async generator for streaming
|
||||
|
||||
Example with function hooks:
|
||||
>>> async def my_hook(page, context, **kwargs):
|
||||
... await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
... return page
|
||||
>>>
|
||||
>>> result = await client.crawl(
|
||||
... ["https://example.com"],
|
||||
... hooks={"on_page_context_created": my_hook}
|
||||
... )
|
||||
"""
|
||||
"""Execute a crawl operation."""
|
||||
await self._check_server()
|
||||
|
||||
data = self._prepare_request(urls, browser_config, crawler_config, hooks, hooks_timeout)
|
||||
|
||||
data = self._prepare_request(urls, browser_config, crawler_config)
|
||||
is_streaming = crawler_config and crawler_config.stream
|
||||
|
||||
|
||||
self.logger.info(f"Crawling {len(urls)} URLs {'(streaming)' if is_streaming else ''}", tag="CRAWL")
|
||||
|
||||
|
||||
if is_streaming:
|
||||
async def stream_results() -> AsyncGenerator[CrawlResult, None]:
|
||||
async with self._http_client.stream("POST", f"{self.base_url}/crawl/stream", json=data) as response:
|
||||
@@ -179,12 +128,12 @@ class Crawl4aiDockerClient:
|
||||
else:
|
||||
yield CrawlResult(**result)
|
||||
return stream_results()
|
||||
|
||||
|
||||
response = await self._request("POST", "/crawl", json=data)
|
||||
result_data = response.json()
|
||||
if not result_data.get("success", False):
|
||||
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
|
||||
|
||||
|
||||
results = [CrawlResult(**r) for r in result_data.get("results", [])]
|
||||
self.logger.success(f"Crawl completed with {len(results)} results", tag="CRAWL")
|
||||
return results[0] if len(results) == 1 else results
|
||||
|
||||
@@ -94,20 +94,6 @@ class ExtractionStrategy(ABC):
|
||||
extracted_content.extend(future.result())
|
||||
return extracted_content
|
||||
|
||||
async def arun(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Async version: Process sections of text in parallel using asyncio.
|
||||
|
||||
Default implementation runs the sync version in a thread pool.
|
||||
Subclasses can override this for true async processing.
|
||||
|
||||
:param url: The URL of the webpage.
|
||||
:param sections: List of sections (strings) to process.
|
||||
:return: A list of processed JSON blocks.
|
||||
"""
|
||||
import asyncio
|
||||
return await asyncio.to_thread(self.run, url, sections, *q, **kwargs)
|
||||
|
||||
|
||||
class NoExtractionStrategy(ExtractionStrategy):
|
||||
"""
|
||||
@@ -794,177 +780,6 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
|
||||
return extracted_content
|
||||
|
||||
async def aextract(self, url: str, ix: int, html: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Async version: Extract meaningful blocks or chunks from the given HTML using an LLM.
|
||||
|
||||
How it works:
|
||||
1. Construct a prompt with variables.
|
||||
2. Make an async request to the LLM using the prompt.
|
||||
3. Parse the response and extract blocks or chunks.
|
||||
|
||||
Args:
|
||||
url: The URL of the webpage.
|
||||
ix: Index of the block.
|
||||
html: The HTML content of the webpage.
|
||||
|
||||
Returns:
|
||||
A list of extracted blocks or chunks.
|
||||
"""
|
||||
from .utils import aperform_completion_with_backoff
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] Call LLM for {url} - block index: {ix}")
|
||||
|
||||
variable_values = {
|
||||
"URL": url,
|
||||
"HTML": escape_json_string(sanitize_html(html)),
|
||||
}
|
||||
|
||||
prompt_with_variables = PROMPT_EXTRACT_BLOCKS
|
||||
if self.instruction:
|
||||
variable_values["REQUEST"] = self.instruction
|
||||
prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION
|
||||
|
||||
if self.extract_type == "schema" and self.schema:
|
||||
variable_values["SCHEMA"] = json.dumps(self.schema, indent=2)
|
||||
prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION
|
||||
|
||||
if self.extract_type == "schema" and not self.schema:
|
||||
prompt_with_variables = PROMPT_EXTRACT_INFERRED_SCHEMA
|
||||
|
||||
for variable in variable_values:
|
||||
prompt_with_variables = prompt_with_variables.replace(
|
||||
"{" + variable + "}", variable_values[variable]
|
||||
)
|
||||
|
||||
try:
|
||||
response = await aperform_completion_with_backoff(
|
||||
self.llm_config.provider,
|
||||
prompt_with_variables,
|
||||
self.llm_config.api_token,
|
||||
base_url=self.llm_config.base_url,
|
||||
json_response=self.force_json_response,
|
||||
extra_args=self.extra_args,
|
||||
)
|
||||
# Track usage
|
||||
usage = TokenUsage(
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
total_tokens=response.usage.total_tokens,
|
||||
completion_tokens_details=response.usage.completion_tokens_details.__dict__
|
||||
if response.usage.completion_tokens_details
|
||||
else {},
|
||||
prompt_tokens_details=response.usage.prompt_tokens_details.__dict__
|
||||
if response.usage.prompt_tokens_details
|
||||
else {},
|
||||
)
|
||||
self.usages.append(usage)
|
||||
|
||||
# Update totals
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
|
||||
try:
|
||||
content = response.choices[0].message.content
|
||||
blocks = None
|
||||
|
||||
if self.force_json_response:
|
||||
blocks = json.loads(content)
|
||||
if isinstance(blocks, dict):
|
||||
if len(blocks) == 1 and isinstance(list(blocks.values())[0], list):
|
||||
blocks = list(blocks.values())[0]
|
||||
else:
|
||||
blocks = [blocks]
|
||||
elif isinstance(blocks, list):
|
||||
blocks = blocks
|
||||
else:
|
||||
blocks = extract_xml_data(["blocks"], content)["blocks"]
|
||||
blocks = json.loads(blocks)
|
||||
|
||||
for block in blocks:
|
||||
block["error"] = False
|
||||
except Exception:
|
||||
parsed, unparsed = split_and_parse_json_objects(
|
||||
response.choices[0].message.content
|
||||
)
|
||||
blocks = parsed
|
||||
if unparsed:
|
||||
blocks.append(
|
||||
{"index": 0, "error": True, "tags": ["error"], "content": unparsed}
|
||||
)
|
||||
|
||||
if self.verbose:
|
||||
print(
|
||||
"[LOG] Extracted",
|
||||
len(blocks),
|
||||
"blocks from URL:",
|
||||
url,
|
||||
"block index:",
|
||||
ix,
|
||||
)
|
||||
return blocks
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"[LOG] Error in LLM extraction: {e}")
|
||||
return [
|
||||
{
|
||||
"index": ix,
|
||||
"error": True,
|
||||
"tags": ["error"],
|
||||
"content": str(e),
|
||||
}
|
||||
]
|
||||
|
||||
async def arun(self, url: str, sections: List[str]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Async version: Process sections with true parallelism using asyncio.gather.
|
||||
|
||||
Args:
|
||||
url: The URL of the webpage.
|
||||
sections: List of sections (strings) to process.
|
||||
|
||||
Returns:
|
||||
A list of extracted blocks or chunks.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
merged_sections = self._merge(
|
||||
sections,
|
||||
self.chunk_token_threshold,
|
||||
overlap=int(self.chunk_token_threshold * self.overlap_rate),
|
||||
)
|
||||
|
||||
extracted_content = []
|
||||
|
||||
# Create tasks for all sections to run in parallel
|
||||
tasks = [
|
||||
self.aextract(url, ix, sanitize_input_encode(section))
|
||||
for ix, section in enumerate(merged_sections)
|
||||
]
|
||||
|
||||
# Execute all tasks concurrently
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Process results
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
if self.verbose:
|
||||
print(f"Error in async extraction: {result}")
|
||||
extracted_content.append(
|
||||
{
|
||||
"index": 0,
|
||||
"error": True,
|
||||
"tags": ["error"],
|
||||
"content": str(result),
|
||||
}
|
||||
)
|
||||
else:
|
||||
extracted_content.extend(result)
|
||||
|
||||
return extracted_content
|
||||
|
||||
def show_usage(self) -> None:
|
||||
"""Print a detailed token usage report showing total and per-request usage."""
|
||||
print("\n=== Token Usage Summary ===")
|
||||
|
||||
@@ -336,40 +336,8 @@ class LinkPreview:
|
||||
|
||||
updated_internal.append(updated_link)
|
||||
else:
|
||||
# # Keep original link unchanged
|
||||
# updated_internal.append(link)
|
||||
|
||||
# Head extraction failed - calculate fallback scores
|
||||
# Use URL-based scoring if query provided
|
||||
contextual_score = None
|
||||
if config.link_preview_config and config.link_preview_config.query:
|
||||
# Calculate URL-based relevance score as fallback
|
||||
contextual_score = self.seeder._calculate_url_relevance_score(
|
||||
config.link_preview_config.query,
|
||||
link.href
|
||||
)
|
||||
|
||||
# Create updated link with fallback scoring
|
||||
updated_link = Link(
|
||||
href=link.href,
|
||||
text=link.text,
|
||||
title=link.title,
|
||||
base_domain=link.base_domain,
|
||||
head_data=None, # No head data available
|
||||
head_extraction_status="failed",
|
||||
intrinsic_score=getattr(link, 'intrinsic_score', None),
|
||||
contextual_score=contextual_score
|
||||
)
|
||||
|
||||
# Calculate total score even without head data
|
||||
updated_link.total_score = calculate_total_score(
|
||||
intrinsic_score=updated_link.intrinsic_score,
|
||||
contextual_score=updated_link.contextual_score,
|
||||
score_links_enabled=getattr(config, 'score_links', False),
|
||||
query_provided=bool(config.link_preview_config and config.link_preview_config.query)
|
||||
)
|
||||
|
||||
updated_internal.append(updated_link)
|
||||
# Keep original link unchanged
|
||||
updated_internal.append(link)
|
||||
|
||||
# Update external links
|
||||
updated_external = []
|
||||
@@ -406,40 +374,8 @@ class LinkPreview:
|
||||
|
||||
updated_external.append(updated_link)
|
||||
else:
|
||||
# # Keep original link unchanged
|
||||
# updated_external.append(link)
|
||||
|
||||
# Head extraction failed - calculate fallback scores
|
||||
# Use URL-based scoring if query provided
|
||||
contextual_score = None
|
||||
if config.link_preview_config and config.link_preview_config.query:
|
||||
# Calculate URL-based relevance score as fallback
|
||||
contextual_score = self.seeder._calculate_url_relevance_score(
|
||||
config.link_preview_config.query,
|
||||
link.href
|
||||
)
|
||||
|
||||
# Create updated link with fallback scoring
|
||||
updated_link = Link(
|
||||
href=link.href,
|
||||
text=link.text,
|
||||
title=link.title,
|
||||
base_domain=link.base_domain,
|
||||
head_data=None, # No head data available
|
||||
head_extraction_status="failed",
|
||||
intrinsic_score=getattr(link, 'intrinsic_score', None),
|
||||
contextual_score=contextual_score
|
||||
)
|
||||
|
||||
# Calculate total score even without head data
|
||||
updated_link.total_score = calculate_total_score(
|
||||
intrinsic_score=updated_link.intrinsic_score,
|
||||
contextual_score=updated_link.contextual_score,
|
||||
score_links_enabled=getattr(config, 'score_links', False),
|
||||
query_provided=bool(config.link_preview_config and config.link_preview_config.query)
|
||||
)
|
||||
|
||||
updated_external.append(updated_link)
|
||||
# Keep original link unchanged
|
||||
updated_external.append(link)
|
||||
|
||||
# Sort links by relevance score if available
|
||||
if any(hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data
|
||||
|
||||
@@ -2,6 +2,11 @@ from typing import List, Dict, Optional
|
||||
from abc import ABC, abstractmethod
|
||||
from itertools import cycle
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
import asyncio
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
########### ATTENTION PEOPLE OF EARTH ###########
|
||||
@@ -131,7 +136,7 @@ class ProxyRotationStrategy(ABC):
|
||||
"""Add proxy configurations to the strategy"""
|
||||
pass
|
||||
|
||||
class RoundRobinProxyStrategy:
|
||||
class RoundRobinProxyStrategy(ProxyRotationStrategy):
|
||||
"""Simple round-robin proxy rotation strategy using ProxyConfig objects"""
|
||||
|
||||
def __init__(self, proxies: List[ProxyConfig] = None):
|
||||
@@ -156,3 +161,113 @@ class RoundRobinProxyStrategy:
|
||||
if not self._proxy_cycle:
|
||||
return None
|
||||
return next(self._proxy_cycle)
|
||||
|
||||
|
||||
class RandomProxyStrategy(ProxyRotationStrategy):
|
||||
"""Random proxy selection strategy for unpredictable traffic patterns."""
|
||||
|
||||
def __init__(self, proxies: List[ProxyConfig] = None):
|
||||
self._proxies = []
|
||||
self._lock = asyncio.Lock()
|
||||
if proxies:
|
||||
self.add_proxies(proxies)
|
||||
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add new proxies to the rotation pool."""
|
||||
self._proxies.extend(proxies)
|
||||
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get randomly selected proxy."""
|
||||
async with self._lock:
|
||||
if not self._proxies:
|
||||
return None
|
||||
return random.choice(self._proxies)
|
||||
|
||||
|
||||
class LeastUsedProxyStrategy(ProxyRotationStrategy):
|
||||
"""Least used proxy strategy for optimal load distribution."""
|
||||
|
||||
def __init__(self, proxies: List[ProxyConfig] = None):
|
||||
self._proxies = []
|
||||
self._usage_count: Dict[str, int] = defaultdict(int)
|
||||
self._lock = asyncio.Lock()
|
||||
if proxies:
|
||||
self.add_proxies(proxies)
|
||||
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add new proxies to the rotation pool."""
|
||||
self._proxies.extend(proxies)
|
||||
for proxy in proxies:
|
||||
self._usage_count[proxy.server] = 0
|
||||
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get least used proxy for optimal load balancing."""
|
||||
async with self._lock:
|
||||
if not self._proxies:
|
||||
return None
|
||||
|
||||
# Find proxy with minimum usage
|
||||
min_proxy = min(self._proxies, key=lambda p: self._usage_count[p.server])
|
||||
self._usage_count[min_proxy.server] += 1
|
||||
return min_proxy
|
||||
|
||||
|
||||
class FailureAwareProxyStrategy(ProxyRotationStrategy):
|
||||
"""Failure-aware proxy strategy with automatic recovery and health tracking."""
|
||||
|
||||
def __init__(self, proxies: List[ProxyConfig] = None, failure_threshold: int = 3, recovery_time: int = 300):
|
||||
self._proxies = []
|
||||
self._healthy_proxies = []
|
||||
self._failure_count: Dict[str, int] = defaultdict(int)
|
||||
self._last_failure_time: Dict[str, float] = defaultdict(float)
|
||||
self._failure_threshold = failure_threshold
|
||||
self._recovery_time = recovery_time # seconds
|
||||
self._lock = asyncio.Lock()
|
||||
if proxies:
|
||||
self.add_proxies(proxies)
|
||||
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add new proxies to the rotation pool."""
|
||||
self._proxies.extend(proxies)
|
||||
self._healthy_proxies.extend(proxies)
|
||||
for proxy in proxies:
|
||||
self._failure_count[proxy.server] = 0
|
||||
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next healthy proxy with automatic recovery."""
|
||||
async with self._lock:
|
||||
# Recovery check: re-enable proxies after recovery_time
|
||||
current_time = time.time()
|
||||
recovered_proxies = []
|
||||
|
||||
for proxy in self._proxies:
|
||||
if (proxy not in self._healthy_proxies and
|
||||
current_time - self._last_failure_time[proxy.server] > self._recovery_time):
|
||||
recovered_proxies.append(proxy)
|
||||
self._failure_count[proxy.server] = 0
|
||||
|
||||
# Add recovered proxies back to healthy pool
|
||||
self._healthy_proxies.extend(recovered_proxies)
|
||||
|
||||
# If no healthy proxies, reset all (emergency fallback)
|
||||
if not self._healthy_proxies and self._proxies:
|
||||
logging.warning("All proxies failed, resetting health status")
|
||||
self._healthy_proxies = self._proxies.copy()
|
||||
for proxy in self._proxies:
|
||||
self._failure_count[proxy.server] = 0
|
||||
|
||||
if not self._healthy_proxies:
|
||||
return None
|
||||
|
||||
return random.choice(self._healthy_proxies)
|
||||
|
||||
async def mark_proxy_failed(self, proxy: ProxyConfig):
|
||||
"""Mark a proxy as failed and remove from healthy pool if threshold exceeded."""
|
||||
async with self._lock:
|
||||
self._failure_count[proxy.server] += 1
|
||||
self._last_failure_time[proxy.server] = time.time()
|
||||
|
||||
if (self._failure_count[proxy.server] >= self._failure_threshold and
|
||||
proxy in self._healthy_proxies):
|
||||
self._healthy_proxies.remove(proxy)
|
||||
logging.warning(f"Proxy {proxy.server} marked as unhealthy after {self._failure_count[proxy.server]} failures")
|
||||
|
||||
195
crawl4ai/types_backup.py
Normal file
195
crawl4ai/types_backup.py
Normal file
@@ -0,0 +1,195 @@
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
# Logger types
|
||||
AsyncLoggerBase = Union['AsyncLoggerBaseType']
|
||||
AsyncLogger = Union['AsyncLoggerType']
|
||||
|
||||
# Crawler core types
|
||||
AsyncWebCrawler = Union['AsyncWebCrawlerType']
|
||||
CacheMode = Union['CacheModeType']
|
||||
CrawlResult = Union['CrawlResultType']
|
||||
CrawlerHub = Union['CrawlerHubType']
|
||||
BrowserProfiler = Union['BrowserProfilerType']
|
||||
# NEW: Add AsyncUrlSeederType
|
||||
AsyncUrlSeeder = Union['AsyncUrlSeederType']
|
||||
|
||||
# Configuration types
|
||||
BrowserConfig = Union['BrowserConfigType']
|
||||
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
|
||||
LLMConfig = Union['LLMConfigType']
|
||||
# NEW: Add SeedingConfigType
|
||||
SeedingConfig = Union['SeedingConfigType']
|
||||
|
||||
# Content scraping types
|
||||
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
|
||||
LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
|
||||
# Backward compatibility alias
|
||||
WebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
|
||||
|
||||
# Proxy types
|
||||
ProxyRotationStrategy = Union['ProxyRotationStrategyType']
|
||||
RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType']
|
||||
|
||||
# Extraction types
|
||||
ExtractionStrategy = Union['ExtractionStrategyType']
|
||||
LLMExtractionStrategy = Union['LLMExtractionStrategyType']
|
||||
CosineStrategy = Union['CosineStrategyType']
|
||||
JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType']
|
||||
JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType']
|
||||
|
||||
# Chunking types
|
||||
ChunkingStrategy = Union['ChunkingStrategyType']
|
||||
RegexChunking = Union['RegexChunkingType']
|
||||
|
||||
# Markdown generation types
|
||||
DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType']
|
||||
MarkdownGenerationResult = Union['MarkdownGenerationResultType']
|
||||
|
||||
# Content filter types
|
||||
RelevantContentFilter = Union['RelevantContentFilterType']
|
||||
PruningContentFilter = Union['PruningContentFilterType']
|
||||
BM25ContentFilter = Union['BM25ContentFilterType']
|
||||
LLMContentFilter = Union['LLMContentFilterType']
|
||||
|
||||
# Dispatcher types
|
||||
BaseDispatcher = Union['BaseDispatcherType']
|
||||
MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType']
|
||||
SemaphoreDispatcher = Union['SemaphoreDispatcherType']
|
||||
RateLimiter = Union['RateLimiterType']
|
||||
CrawlerMonitor = Union['CrawlerMonitorType']
|
||||
DisplayMode = Union['DisplayModeType']
|
||||
RunManyReturn = Union['RunManyReturnType']
|
||||
|
||||
# Docker client
|
||||
Crawl4aiDockerClient = Union['Crawl4aiDockerClientType']
|
||||
|
||||
# Deep crawling types
|
||||
DeepCrawlStrategy = Union['DeepCrawlStrategyType']
|
||||
BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType']
|
||||
FilterChain = Union['FilterChainType']
|
||||
ContentTypeFilter = Union['ContentTypeFilterType']
|
||||
DomainFilter = Union['DomainFilterType']
|
||||
URLFilter = Union['URLFilterType']
|
||||
FilterStats = Union['FilterStatsType']
|
||||
SEOFilter = Union['SEOFilterType']
|
||||
KeywordRelevanceScorer = Union['KeywordRelevanceScorerType']
|
||||
URLScorer = Union['URLScorerType']
|
||||
CompositeScorer = Union['CompositeScorerType']
|
||||
DomainAuthorityScorer = Union['DomainAuthorityScorerType']
|
||||
FreshnessScorer = Union['FreshnessScorerType']
|
||||
PathDepthScorer = Union['PathDepthScorerType']
|
||||
BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType']
|
||||
DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType']
|
||||
DeepCrawlDecorator = Union['DeepCrawlDecoratorType']
|
||||
|
||||
# Only import types during type checking to avoid circular imports
|
||||
if TYPE_CHECKING:
|
||||
# Logger imports
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase as AsyncLoggerBaseType,
|
||||
AsyncLogger as AsyncLoggerType,
|
||||
)
|
||||
|
||||
# Crawler core imports
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler as AsyncWebCrawlerType,
|
||||
CacheMode as CacheModeType,
|
||||
)
|
||||
from .models import CrawlResult as CrawlResultType
|
||||
from .hub import CrawlerHub as CrawlerHubType
|
||||
from .browser_profiler import BrowserProfiler as BrowserProfilerType
|
||||
# NEW: Import AsyncUrlSeeder for type checking
|
||||
from .async_url_seeder import AsyncUrlSeeder as AsyncUrlSeederType
|
||||
|
||||
# Configuration imports
|
||||
from .async_configs import (
|
||||
BrowserConfig as BrowserConfigType,
|
||||
CrawlerRunConfig as CrawlerRunConfigType,
|
||||
HTTPCrawlerConfig as HTTPCrawlerConfigType,
|
||||
LLMConfig as LLMConfigType,
|
||||
# NEW: Import SeedingConfig for type checking
|
||||
SeedingConfig as SeedingConfigType,
|
||||
)
|
||||
|
||||
# Content scraping imports
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy as ContentScrapingStrategyType,
|
||||
LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType,
|
||||
)
|
||||
|
||||
# Proxy imports
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy as ProxyRotationStrategyType,
|
||||
RoundRobinProxyStrategy as RoundRobinProxyStrategyType,
|
||||
)
|
||||
|
||||
# Extraction imports
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy as ExtractionStrategyType,
|
||||
LLMExtractionStrategy as LLMExtractionStrategyType,
|
||||
CosineStrategy as CosineStrategyType,
|
||||
JsonCssExtractionStrategy as JsonCssExtractionStrategyType,
|
||||
JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType,
|
||||
)
|
||||
|
||||
# Chunking imports
|
||||
from .chunking_strategy import (
|
||||
ChunkingStrategy as ChunkingStrategyType,
|
||||
RegexChunking as RegexChunkingType,
|
||||
)
|
||||
|
||||
# Markdown generation imports
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator as DefaultMarkdownGeneratorType,
|
||||
)
|
||||
from .models import MarkdownGenerationResult as MarkdownGenerationResultType
|
||||
|
||||
# Content filter imports
|
||||
from .content_filter_strategy import (
|
||||
RelevantContentFilter as RelevantContentFilterType,
|
||||
PruningContentFilter as PruningContentFilterType,
|
||||
BM25ContentFilter as BM25ContentFilterType,
|
||||
LLMContentFilter as LLMContentFilterType,
|
||||
)
|
||||
|
||||
# Dispatcher imports
|
||||
from .async_dispatcher import (
|
||||
BaseDispatcher as BaseDispatcherType,
|
||||
MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType,
|
||||
SemaphoreDispatcher as SemaphoreDispatcherType,
|
||||
RateLimiter as RateLimiterType,
|
||||
CrawlerMonitor as CrawlerMonitorType,
|
||||
DisplayMode as DisplayModeType,
|
||||
RunManyReturn as RunManyReturnType,
|
||||
)
|
||||
|
||||
# Docker client
|
||||
from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType
|
||||
|
||||
# Deep crawling imports
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy as DeepCrawlStrategyType,
|
||||
BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType,
|
||||
FilterChain as FilterChainType,
|
||||
ContentTypeFilter as ContentTypeFilterType,
|
||||
DomainFilter as DomainFilterType,
|
||||
URLFilter as URLFilterType,
|
||||
FilterStats as FilterStatsType,
|
||||
SEOFilter as SEOFilterType,
|
||||
KeywordRelevanceScorer as KeywordRelevanceScorerType,
|
||||
URLScorer as URLScorerType,
|
||||
CompositeScorer as CompositeScorerType,
|
||||
DomainAuthorityScorer as DomainAuthorityScorerType,
|
||||
FreshnessScorer as FreshnessScorerType,
|
||||
PathDepthScorer as PathDepthScorerType,
|
||||
BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType,
|
||||
DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType,
|
||||
DeepCrawlDecorator as DeepCrawlDecoratorType,
|
||||
)
|
||||
|
||||
|
||||
|
||||
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
|
||||
from .async_configs import LLMConfig
|
||||
return LLMConfig(*args, **kwargs)
|
||||
@@ -47,7 +47,6 @@ from urllib.parse import (
|
||||
urljoin, urlparse, urlunparse,
|
||||
parse_qsl, urlencode, quote, unquote
|
||||
)
|
||||
import inspect
|
||||
|
||||
|
||||
# Monkey patch to fix wildcard handling in urllib.robotparser
|
||||
@@ -1825,82 +1824,6 @@ def perform_completion_with_backoff(
|
||||
# ]
|
||||
|
||||
|
||||
async def aperform_completion_with_backoff(
|
||||
provider,
|
||||
prompt_with_variables,
|
||||
api_token,
|
||||
json_response=False,
|
||||
base_url=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Async version: Perform an API completion request with exponential backoff.
|
||||
|
||||
How it works:
|
||||
1. Sends an async completion request to the API.
|
||||
2. Retries on rate-limit errors with exponential delays (async).
|
||||
3. Returns the API response or an error after all retries.
|
||||
|
||||
Args:
|
||||
provider (str): The name of the API provider.
|
||||
prompt_with_variables (str): The input prompt for the completion request.
|
||||
api_token (str): The API token for authentication.
|
||||
json_response (bool): Whether to request a JSON response. Defaults to False.
|
||||
base_url (Optional[str]): The base URL for the API. Defaults to None.
|
||||
**kwargs: Additional arguments for the API request.
|
||||
|
||||
Returns:
|
||||
dict: The API response or an error message after all retries.
|
||||
"""
|
||||
|
||||
from litellm import acompletion
|
||||
from litellm.exceptions import RateLimitError
|
||||
import asyncio
|
||||
|
||||
max_attempts = 3
|
||||
base_delay = 2 # Base delay in seconds, you can adjust this based on your needs
|
||||
|
||||
extra_args = {"temperature": 0.01, "api_key": api_token, "base_url": base_url}
|
||||
if json_response:
|
||||
extra_args["response_format"] = {"type": "json_object"}
|
||||
|
||||
if kwargs.get("extra_args"):
|
||||
extra_args.update(kwargs["extra_args"])
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
response = await acompletion(
|
||||
model=provider,
|
||||
messages=[{"role": "user", "content": prompt_with_variables}],
|
||||
**extra_args,
|
||||
)
|
||||
return response # Return the successful response
|
||||
except RateLimitError as e:
|
||||
print("Rate limit error:", str(e))
|
||||
|
||||
if attempt == max_attempts - 1:
|
||||
# Last attempt failed, raise the error.
|
||||
raise
|
||||
|
||||
# Check if we have exhausted our max attempts
|
||||
if attempt < max_attempts - 1:
|
||||
# Calculate the delay and wait
|
||||
delay = base_delay * (2**attempt) # Exponential backoff formula
|
||||
print(f"Waiting for {delay} seconds before retrying...")
|
||||
await asyncio.sleep(delay)
|
||||
else:
|
||||
# Return an error response after exhausting all retries
|
||||
return [
|
||||
{
|
||||
"index": 0,
|
||||
"tags": ["error"],
|
||||
"content": ["Rate limit error. Please try again later."],
|
||||
}
|
||||
]
|
||||
except Exception as e:
|
||||
raise e # Raise any other exceptions immediately
|
||||
|
||||
|
||||
def extract_blocks(url, html, provider=DEFAULT_PROVIDER, api_token=None, base_url=None):
|
||||
"""
|
||||
Extract content blocks from website HTML using an AI provider.
|
||||
@@ -3606,52 +3529,4 @@ def get_memory_stats() -> Tuple[float, float, float]:
|
||||
available_gb = get_true_available_memory_gb()
|
||||
used_percent = get_true_memory_usage_percent()
|
||||
|
||||
return used_percent, available_gb, total_gb
|
||||
|
||||
|
||||
# Hook utilities for Docker API
|
||||
def hooks_to_string(hooks: Dict[str, Callable]) -> Dict[str, str]:
|
||||
"""
|
||||
Convert hook function objects to string representations for Docker API.
|
||||
|
||||
This utility simplifies the process of using hooks with the Docker API by converting
|
||||
Python function objects into the string format required by the API.
|
||||
|
||||
Args:
|
||||
hooks: Dictionary mapping hook point names to Python function objects.
|
||||
Functions should be async and follow hook signature requirements.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping hook point names to string representations of the functions.
|
||||
|
||||
Example:
|
||||
>>> async def my_hook(page, context, **kwargs):
|
||||
... await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
... return page
|
||||
>>>
|
||||
>>> hooks_dict = {"on_page_context_created": my_hook}
|
||||
>>> api_hooks = hooks_to_string(hooks_dict)
|
||||
>>> # api_hooks is now ready to use with Docker API
|
||||
|
||||
Raises:
|
||||
ValueError: If a hook is not callable or source cannot be extracted
|
||||
"""
|
||||
result = {}
|
||||
|
||||
for hook_name, hook_func in hooks.items():
|
||||
if not callable(hook_func):
|
||||
raise ValueError(f"Hook '{hook_name}' must be a callable function, got {type(hook_func)}")
|
||||
|
||||
try:
|
||||
# Get the source code of the function
|
||||
source = inspect.getsource(hook_func)
|
||||
# Remove any leading indentation to get clean source
|
||||
source = textwrap.dedent(source)
|
||||
result[hook_name] = source
|
||||
except (OSError, TypeError) as e:
|
||||
raise ValueError(
|
||||
f"Cannot extract source code for hook '{hook_name}'. "
|
||||
f"Make sure the function is defined in a file (not interactively). Error: {e}"
|
||||
)
|
||||
|
||||
return result
|
||||
return used_percent, available_gb, total_gb
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,8 +12,8 @@
|
||||
- [Python SDK](#python-sdk)
|
||||
- [Understanding Request Schema](#understanding-request-schema)
|
||||
- [REST API Examples](#rest-api-examples)
|
||||
- [Asynchronous Jobs with Webhooks](#asynchronous-jobs-with-webhooks)
|
||||
- [Additional API Endpoints](#additional-api-endpoints)
|
||||
- [Dispatcher Management](#dispatcher-management)
|
||||
- [HTML Extraction Endpoint](#html-extraction-endpoint)
|
||||
- [Screenshot Endpoint](#screenshot-endpoint)
|
||||
- [PDF Export Endpoint](#pdf-export-endpoint)
|
||||
@@ -35,6 +35,8 @@
|
||||
- [Configuration Tips and Best Practices](#configuration-tips-and-best-practices)
|
||||
- [Customizing Your Configuration](#customizing-your-configuration)
|
||||
- [Configuration Recommendations](#configuration-recommendations)
|
||||
- [Testing & Validation](#testing--validation)
|
||||
- [Dispatcher Demo Test Suite](#dispatcher-demo-test-suite)
|
||||
- [Getting Help](#getting-help)
|
||||
- [Summary](#summary)
|
||||
|
||||
@@ -59,13 +61,15 @@ Pull and run images directly from Docker Hub without building locally.
|
||||
|
||||
#### 1. Pull the Image
|
||||
|
||||
Our latest stable release is `0.7.7`. Images are built with multi-arch manifests, so Docker automatically pulls the correct version for your system.
|
||||
Our latest release candidate is `0.7.0-r1`. Images are built with multi-arch manifests, so Docker automatically pulls the correct version for your system.
|
||||
|
||||
> ⚠️ **Important Note**: The `latest` tag currently points to the stable `0.6.0` version. After testing and validation, `0.7.0` (without -r1) will be released and `latest` will be updated. For now, please use `0.7.0-r1` to test the new features.
|
||||
|
||||
```bash
|
||||
# Pull the latest stable version (0.7.7)
|
||||
docker pull unclecode/crawl4ai:0.7.7
|
||||
# Pull the release candidate (for testing new features)
|
||||
docker pull unclecode/crawl4ai:0.7.0-r1
|
||||
|
||||
# Or use the latest tag (points to 0.7.7)
|
||||
# Or pull the current stable version (0.6.0)
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
@@ -100,7 +104,7 @@ EOL
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.7.7
|
||||
unclecode/crawl4ai:0.7.0-r1
|
||||
```
|
||||
|
||||
* **With LLM support:**
|
||||
@@ -111,7 +115,7 @@ EOL
|
||||
--name crawl4ai \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.7.7
|
||||
unclecode/crawl4ai:0.7.0-r1
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`. Visit `/playground` to access the interactive testing interface.
|
||||
@@ -184,7 +188,7 @@ The `docker-compose.yml` file in the project root provides a simplified approach
|
||||
```bash
|
||||
# Pulls and runs the release candidate from Docker Hub
|
||||
# Automatically selects the correct architecture
|
||||
IMAGE=unclecode/crawl4ai:0.7.7 docker compose up -d
|
||||
IMAGE=unclecode/crawl4ai:0.7.0-r1 docker compose up -d
|
||||
```
|
||||
|
||||
* **Build and Run Locally:**
|
||||
@@ -331,6 +335,134 @@ Access the MCP tool schemas at `http://localhost:11235/mcp/schema` for detailed
|
||||
|
||||
In addition to the core `/crawl` and `/crawl/stream` endpoints, the server provides several specialized endpoints:
|
||||
|
||||
### Dispatcher Management
|
||||
|
||||
The server supports multiple dispatcher strategies for managing concurrent crawling operations. Dispatchers control how many crawl jobs run simultaneously based on different rules like fixed concurrency limits or system memory availability.
|
||||
|
||||
#### Available Dispatchers
|
||||
|
||||
**Memory Adaptive Dispatcher** (Default)
|
||||
- Dynamically adjusts concurrency based on system memory usage
|
||||
- Monitors memory pressure and adapts crawl sessions accordingly
|
||||
- Automatically requeues tasks under high memory conditions
|
||||
- Implements fairness timeout for long-waiting URLs
|
||||
|
||||
**Semaphore Dispatcher**
|
||||
- Fixed concurrency limit using semaphore-based control
|
||||
- Simple and predictable resource usage
|
||||
- Ideal for controlled crawling scenarios
|
||||
|
||||
#### Dispatcher Endpoints
|
||||
|
||||
**List Available Dispatchers**
|
||||
```bash
|
||||
GET /dispatchers
|
||||
```
|
||||
|
||||
Returns information about all available dispatcher types, their configurations, and features.
|
||||
|
||||
```bash
|
||||
curl http://localhost:11234/dispatchers | jq
|
||||
```
|
||||
|
||||
**Get Default Dispatcher**
|
||||
```bash
|
||||
GET /dispatchers/default
|
||||
```
|
||||
|
||||
Returns the current default dispatcher configuration.
|
||||
|
||||
```bash
|
||||
curl http://localhost:11234/dispatchers/default | jq
|
||||
```
|
||||
|
||||
**Get Dispatcher Statistics**
|
||||
```bash
|
||||
GET /dispatchers/{dispatcher_type}/stats
|
||||
```
|
||||
|
||||
Returns real-time statistics for a specific dispatcher including active sessions, memory usage, and configuration.
|
||||
|
||||
```bash
|
||||
# Get memory_adaptive dispatcher stats
|
||||
curl http://localhost:11234/dispatchers/memory_adaptive/stats | jq
|
||||
|
||||
# Get semaphore dispatcher stats
|
||||
curl http://localhost:11234/dispatchers/semaphore/stats | jq
|
||||
```
|
||||
|
||||
#### Using Dispatchers in Crawl Requests
|
||||
|
||||
You can specify which dispatcher to use in your crawl requests by adding the `dispatcher` field:
|
||||
|
||||
**Using Default Dispatcher (memory_adaptive)**
|
||||
```bash
|
||||
curl -X POST http://localhost:11234/crawl \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"browser_config": {},
|
||||
"crawler_config": {}
|
||||
}'
|
||||
```
|
||||
|
||||
**Using Semaphore Dispatcher**
|
||||
```bash
|
||||
curl -X POST http://localhost:11234/crawl \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com", "https://httpbin.org/html"],
|
||||
"browser_config": {},
|
||||
"crawler_config": {},
|
||||
"dispatcher": "semaphore"
|
||||
}'
|
||||
```
|
||||
|
||||
**Python SDK Example**
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Crawl with memory adaptive dispatcher (default)
|
||||
response = requests.post(
|
||||
"http://localhost:11234/crawl",
|
||||
json={
|
||||
"urls": ["https://example.com"],
|
||||
"browser_config": {},
|
||||
"crawler_config": {}
|
||||
}
|
||||
)
|
||||
|
||||
# Crawl with semaphore dispatcher
|
||||
response = requests.post(
|
||||
"http://localhost:11234/crawl",
|
||||
json={
|
||||
"urls": ["https://example.com"],
|
||||
"browser_config": {},
|
||||
"crawler_config": {},
|
||||
"dispatcher": "semaphore"
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
#### Dispatcher Configuration
|
||||
|
||||
Dispatchers are configured with sensible defaults that work well for most use cases:
|
||||
|
||||
**Memory Adaptive Dispatcher Defaults:**
|
||||
- `memory_threshold_percent`: 70.0 - Start adjusting at 70% memory usage
|
||||
- `critical_threshold_percent`: 85.0 - Critical memory pressure threshold
|
||||
- `recovery_threshold_percent`: 65.0 - Resume normal operation below 65%
|
||||
- `check_interval`: 1.0 - Check memory every second
|
||||
- `max_session_permit`: 20 - Maximum concurrent sessions
|
||||
- `fairness_timeout`: 600.0 - Prioritize URLs waiting > 10 minutes
|
||||
- `memory_wait_timeout`: 600.0 - Fail if high memory persists > 10 minutes
|
||||
|
||||
**Semaphore Dispatcher Defaults:**
|
||||
- `semaphore_count`: 5 - Maximum concurrent crawl operations
|
||||
- `max_session_permit`: 10 - Maximum total sessions allowed
|
||||
|
||||
> 💡 **Tip**: Use `memory_adaptive` for dynamic workloads where memory availability varies. Use `semaphore` for predictable, controlled crawling with fixed concurrency limits.
|
||||
|
||||
### HTML Extraction Endpoint
|
||||
|
||||
```
|
||||
@@ -647,193 +779,143 @@ async def test_stream_crawl(token: str = None): # Made token optional
|
||||
# asyncio.run(test_stream_crawl())
|
||||
```
|
||||
|
||||
### Asynchronous Jobs with Webhooks
|
||||
#### LLM Job with Chunking Strategy
|
||||
|
||||
For long-running crawls or when you want to avoid keeping connections open, use the job queue endpoints. Instead of polling for results, configure a webhook to receive notifications when jobs complete.
|
||||
```python
|
||||
import requests
|
||||
import time
|
||||
|
||||
#### Why Use Jobs & Webhooks?
|
||||
# Example: LLM extraction with RegexChunking strategy
|
||||
# This breaks large documents into smaller chunks before LLM processing
|
||||
|
||||
- **No Polling Required** - Get notified when crawls complete instead of constantly checking status
|
||||
- **Better Resource Usage** - Free up client connections while jobs run in the background
|
||||
- **Scalable Architecture** - Ideal for high-volume crawling with TypeScript/Node.js clients or microservices
|
||||
- **Reliable Delivery** - Automatic retry with exponential backoff (5 attempts: 1s → 2s → 4s → 8s → 16s)
|
||||
|
||||
#### How It Works
|
||||
|
||||
1. **Submit Job** → POST to `/crawl/job` with optional `webhook_config`
|
||||
2. **Get Task ID** → Receive a `task_id` immediately
|
||||
3. **Job Runs** → Crawl executes in the background
|
||||
4. **Webhook Fired** → Server POSTs completion notification to your webhook URL
|
||||
5. **Fetch Results** → If data wasn't included in webhook, GET `/crawl/job/{task_id}`
|
||||
|
||||
#### Quick Example
|
||||
|
||||
```bash
|
||||
# Submit a crawl job with webhook notification
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/crawl-complete",
|
||||
"webhook_data_in_payload": false
|
||||
llm_job_payload = {
|
||||
"url": "https://example.com/long-article",
|
||||
"q": "Extract all key points and main ideas from this article",
|
||||
"chunking_strategy": {
|
||||
"type": "RegexChunking",
|
||||
"params": {
|
||||
"patterns": ["\\n\\n"], # Split on double newlines (paragraphs)
|
||||
"overlap": 50
|
||||
}
|
||||
}
|
||||
}'
|
||||
|
||||
# Response: {"task_id": "crawl_a1b2c3d4"}
|
||||
```
|
||||
|
||||
**Your webhook receives:**
|
||||
```json
|
||||
{
|
||||
"task_id": "crawl_a1b2c3d4",
|
||||
"task_type": "crawl",
|
||||
"status": "completed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com"]
|
||||
}
|
||||
|
||||
# Submit LLM job
|
||||
response = requests.post(
|
||||
"http://localhost:11235/llm/job",
|
||||
json=llm_job_payload
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
job_data = response.json()
|
||||
job_id = job_data["task_id"]
|
||||
print(f"Job submitted successfully. Job ID: {job_id}")
|
||||
|
||||
# Poll for completion
|
||||
while True:
|
||||
status_response = requests.get(f"http://localhost:11235/llm/job/{job_id}")
|
||||
if status_response.ok:
|
||||
status_data = status_response.json()
|
||||
if status_data["status"] == "completed":
|
||||
print("Job completed!")
|
||||
print("Extracted content:", status_data["result"])
|
||||
break
|
||||
elif status_data["status"] == "failed":
|
||||
print("Job failed:", status_data.get("error"))
|
||||
break
|
||||
else:
|
||||
print(f"Job status: {status_data['status']}")
|
||||
time.sleep(2) # Wait 2 seconds before checking again
|
||||
else:
|
||||
print(f"Error checking job status: {status_response.text}")
|
||||
break
|
||||
else:
|
||||
print(f"Error submitting job: {response.text}")
|
||||
```
|
||||
|
||||
Then fetch the results:
|
||||
```bash
|
||||
curl http://localhost:11235/crawl/job/crawl_a1b2c3d4
|
||||
```
|
||||
**Available Chunking Strategies:**
|
||||
|
||||
#### Include Data in Webhook
|
||||
|
||||
Set `webhook_data_in_payload: true` to receive the full crawl results directly in the webhook:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/crawl-complete",
|
||||
"webhook_data_in_payload": true
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
**Your webhook receives the complete data:**
|
||||
```json
|
||||
{
|
||||
"task_id": "crawl_a1b2c3d4",
|
||||
"task_type": "crawl",
|
||||
"status": "completed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com"],
|
||||
"data": {
|
||||
"markdown": "...",
|
||||
"html": "...",
|
||||
"links": {...},
|
||||
"metadata": {...}
|
||||
- **IdentityChunking**: Returns the entire content as a single chunk (no splitting)
|
||||
```json
|
||||
{
|
||||
"type": "IdentityChunking",
|
||||
"params": {}
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
#### Webhook Authentication
|
||||
|
||||
Add custom headers for authentication:
|
||||
|
||||
```json
|
||||
{
|
||||
"urls": ["https://example.com"],
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/crawl",
|
||||
"webhook_data_in_payload": false,
|
||||
"webhook_headers": {
|
||||
"X-Webhook-Secret": "your-secret-token",
|
||||
"X-Service-ID": "crawl4ai-prod"
|
||||
- **RegexChunking**: Split content using regular expression patterns
|
||||
```json
|
||||
{
|
||||
"type": "RegexChunking",
|
||||
"params": {
|
||||
"patterns": ["\\n\\n"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
#### Global Default Webhook
|
||||
- **NlpSentenceChunking**: Split content into sentences using NLP (requires NLTK)
|
||||
```json
|
||||
{
|
||||
"type": "NlpSentenceChunking",
|
||||
"params": {}
|
||||
}
|
||||
```
|
||||
|
||||
Configure a default webhook URL in `config.yml` for all jobs:
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
enabled: true
|
||||
default_url: "https://myapp.com/webhooks/default"
|
||||
data_in_payload: false
|
||||
retry:
|
||||
max_attempts: 5
|
||||
initial_delay_ms: 1000
|
||||
max_delay_ms: 32000
|
||||
timeout_ms: 30000
|
||||
```
|
||||
|
||||
Now jobs without `webhook_config` automatically use the default webhook.
|
||||
|
||||
#### Job Status Polling (Without Webhooks)
|
||||
|
||||
If you prefer polling instead of webhooks, just omit `webhook_config`:
|
||||
|
||||
```bash
|
||||
# Submit job
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"urls": ["https://example.com"]}'
|
||||
# Response: {"task_id": "crawl_xyz"}
|
||||
|
||||
# Poll for status
|
||||
curl http://localhost:11235/crawl/job/crawl_xyz
|
||||
```
|
||||
|
||||
The response includes `status` field: `"processing"`, `"completed"`, or `"failed"`.
|
||||
|
||||
#### LLM Extraction Jobs with Webhooks
|
||||
|
||||
The same webhook system works for LLM extraction jobs via `/llm/job`:
|
||||
|
||||
```bash
|
||||
# Submit LLM extraction job with webhook
|
||||
curl -X POST http://localhost:11235/llm/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"url": "https://example.com/article",
|
||||
"q": "Extract the article title, author, and main points",
|
||||
"provider": "openai/gpt-4o-mini",
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/llm-complete",
|
||||
"webhook_data_in_payload": true,
|
||||
"webhook_headers": {
|
||||
"X-Webhook-Secret": "your-secret-token"
|
||||
}
|
||||
}
|
||||
}'
|
||||
|
||||
# Response: {"task_id": "llm_1234567890"}
|
||||
```
|
||||
|
||||
**Your webhook receives:**
|
||||
```json
|
||||
{
|
||||
"task_id": "llm_1234567890",
|
||||
"task_type": "llm_extraction",
|
||||
"status": "completed",
|
||||
"timestamp": "2025-10-22T12:30:00.000000+00:00",
|
||||
"urls": ["https://example.com/article"],
|
||||
"data": {
|
||||
"extracted_content": {
|
||||
"title": "Understanding Web Scraping",
|
||||
"author": "John Doe",
|
||||
"main_points": ["Point 1", "Point 2", "Point 3"]
|
||||
- **TopicSegmentationChunking**: Segment content into topics using TextTiling (requires NLTK)
|
||||
```json
|
||||
{
|
||||
"type": "TopicSegmentationChunking",
|
||||
"params": {
|
||||
"num_keywords": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
**Key Differences for LLM Jobs:**
|
||||
- Task type is `"llm_extraction"` instead of `"crawl"`
|
||||
- Extracted data is in `data.extracted_content`
|
||||
- Single URL only (not an array)
|
||||
- Supports schema-based extraction with `schema` parameter
|
||||
- **FixedLengthWordChunking**: Split into fixed-length word chunks
|
||||
```json
|
||||
{
|
||||
"type": "FixedLengthWordChunking",
|
||||
"params": {
|
||||
"chunk_size": 100
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> 💡 **Pro tip**: See [WEBHOOK_EXAMPLES.md](./WEBHOOK_EXAMPLES.md) for detailed examples including TypeScript client code, Flask webhook handlers, and failure handling.
|
||||
- **SlidingWindowChunking**: Overlapping word chunks with configurable step size
|
||||
```json
|
||||
{
|
||||
"type": "SlidingWindowChunking",
|
||||
"params": {
|
||||
"window_size": 100,
|
||||
"step": 50
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **OverlappingWindowChunking**: Fixed-size chunks with word overlap
|
||||
```json
|
||||
{
|
||||
"type": "OverlappingWindowChunking",
|
||||
"params": {
|
||||
"window_size": 1000,
|
||||
"overlap": 100
|
||||
}
|
||||
}
|
||||
```
|
||||
{
|
||||
"type": "OverlappingWindowChunking",
|
||||
"params": {
|
||||
"chunk_size": 1500,
|
||||
"overlap": 100
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- `chunking_strategy` is optional - if omitted, default token-based chunking is used
|
||||
- Chunking is applied at the API level without modifying the core SDK
|
||||
- Results from all chunks are merged into a single response
|
||||
- Each chunk is processed independently with the same LLM instruction
|
||||
|
||||
---
|
||||
|
||||
@@ -1000,6 +1082,93 @@ You can override the default `config.yml`.
|
||||
- Increase batch_process timeout for large content
|
||||
- Adjust stream_init timeout based on initial response times
|
||||
|
||||
## Testing & Validation
|
||||
|
||||
We provide two comprehensive test suites to validate all Docker server functionality:
|
||||
|
||||
### 1. Extended Features Test Suite ✅ **100% Pass Rate**
|
||||
|
||||
Complete validation of all advanced features including URL seeding, adaptive crawling, browser adapters, proxy rotation, and dispatchers.
|
||||
|
||||
```bash
|
||||
# Run all extended features tests
|
||||
cd tests/docker/extended_features
|
||||
./run_extended_tests.sh
|
||||
|
||||
# Custom server URL
|
||||
./run_extended_tests.sh --server http://localhost:8080
|
||||
```
|
||||
|
||||
**Test Coverage (12 tests):**
|
||||
- ✅ **URL Seeding** (2 tests): Basic seeding + domain filters
|
||||
- ✅ **Adaptive Crawling** (2 tests): Basic + custom thresholds
|
||||
- ✅ **Browser Adapters** (3 tests): Default, Stealth, Undetected
|
||||
- ✅ **Proxy Rotation** (2 tests): Round Robin, Random strategies
|
||||
- ✅ **Dispatchers** (3 tests): Memory Adaptive, Semaphore, Management APIs
|
||||
|
||||
**Current Status:**
|
||||
```
|
||||
Total Tests: 12
|
||||
Passed: 12
|
||||
Failed: 0
|
||||
Pass Rate: 100.0% ✅
|
||||
Average Duration: ~8.8 seconds
|
||||
```
|
||||
|
||||
Features:
|
||||
- Rich formatted output with tables and panels
|
||||
- Real-time progress indicators
|
||||
- Detailed error diagnostics
|
||||
- Category-based results grouping
|
||||
- Server health checks
|
||||
|
||||
See [`tests/docker/extended_features/README_EXTENDED_TESTS.md`](../../tests/docker/extended_features/README_EXTENDED_TESTS.md) for full documentation and API response format reference.
|
||||
|
||||
### 2. Dispatcher Demo Test Suite
|
||||
|
||||
Focused tests for dispatcher functionality with performance comparisons:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
cd test_scripts
|
||||
./run_dispatcher_tests.sh
|
||||
|
||||
# Run specific category
|
||||
./run_dispatcher_tests.sh -c basic # Basic dispatcher usage
|
||||
./run_dispatcher_tests.sh -c integration # Integration with other features
|
||||
./run_dispatcher_tests.sh -c endpoints # Dispatcher management endpoints
|
||||
./run_dispatcher_tests.sh -c performance # Performance comparison
|
||||
./run_dispatcher_tests.sh -c error # Error handling
|
||||
|
||||
# Custom server URL
|
||||
./run_dispatcher_tests.sh -s http://your-server:port
|
||||
```
|
||||
|
||||
**Test Coverage (17 tests):**
|
||||
- **Basic Usage Tests**: Single/multiple URL crawling with different dispatchers
|
||||
- **Integration Tests**: Dispatchers combined with anti-bot strategies, browser configs, JS execution, screenshots
|
||||
- **Endpoint Tests**: Dispatcher management API validation
|
||||
- **Performance Tests**: Side-by-side comparison of memory_adaptive vs semaphore
|
||||
- **Error Handling**: Edge cases and validation tests
|
||||
|
||||
Results are displayed with rich formatting, timing information, and success rates. See `test_scripts/README_DISPATCHER_TESTS.md` for full documentation.
|
||||
|
||||
### Quick Test Commands
|
||||
|
||||
```bash
|
||||
# Test all features (recommended)
|
||||
./tests/docker/extended_features/run_extended_tests.sh
|
||||
|
||||
# Test dispatchers only
|
||||
./test_scripts/run_dispatcher_tests.sh
|
||||
|
||||
# Test server health
|
||||
curl http://localhost:11235/health
|
||||
|
||||
# Test dispatcher endpoint
|
||||
curl http://localhost:11235/dispatchers | jq
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
We're here to help you succeed with Crawl4AI! Here's how to get support:
|
||||
@@ -1013,11 +1182,10 @@ We're here to help you succeed with Crawl4AI! Here's how to get support:
|
||||
|
||||
In this guide, we've covered everything you need to get started with Crawl4AI's Docker deployment:
|
||||
- Building and running the Docker container
|
||||
- Configuring the environment
|
||||
- Configuring the environment
|
||||
- Using the interactive playground for testing
|
||||
- Making API requests with proper typing
|
||||
- Using the Python SDK
|
||||
- Asynchronous job queues with webhook notifications
|
||||
- Leveraging specialized endpoints for screenshots, PDFs, and JavaScript execution
|
||||
- Connecting via the Model Context Protocol (MCP)
|
||||
- Monitoring your deployment
|
||||
|
||||
@@ -1,241 +0,0 @@
|
||||
# Crawl4AI Docker Memory & Pool Optimization - Implementation Log
|
||||
|
||||
## Critical Issues Identified
|
||||
|
||||
### Memory Management
|
||||
- **Host vs Container**: `psutil.virtual_memory()` reported host memory, not container limits
|
||||
- **Browser Pooling**: No pool reuse - every endpoint created new browsers
|
||||
- **Warmup Waste**: Permanent browser sat idle with mismatched config signature
|
||||
- **Idle Cleanup**: 30min TTL too long, janitor ran every 60s
|
||||
- **Endpoint Inconsistency**: 75% of endpoints bypassed pool (`/md`, `/html`, `/screenshot`, `/pdf`, `/execute_js`, `/llm`)
|
||||
|
||||
### Pool Design Flaws
|
||||
- **Config Mismatch**: Permanent browser used `config.yml` args, endpoints used empty `BrowserConfig()`
|
||||
- **Logging Level**: Pool hit markers at DEBUG, invisible with INFO logging
|
||||
|
||||
## Implementation Changes
|
||||
|
||||
### 1. Container-Aware Memory Detection (`utils.py`)
|
||||
```python
|
||||
def get_container_memory_percent() -> float:
|
||||
# Try cgroup v2 → v1 → fallback to psutil
|
||||
# Reads /sys/fs/cgroup/memory.{current,max} OR memory/memory.{usage,limit}_in_bytes
|
||||
```
|
||||
|
||||
### 2. Smart Browser Pool (`crawler_pool.py`)
|
||||
**3-Tier System:**
|
||||
- **PERMANENT**: Always-ready default browser (never cleaned)
|
||||
- **HOT_POOL**: Configs used 3+ times (longer TTL)
|
||||
- **COLD_POOL**: New/rare configs (short TTL)
|
||||
|
||||
**Key Functions:**
|
||||
- `get_crawler(cfg)`: Check permanent → hot → cold → create new
|
||||
- `init_permanent(cfg)`: Initialize permanent at startup
|
||||
- `janitor()`: Adaptive cleanup (10s/30s/60s intervals based on memory)
|
||||
- `_sig(cfg)`: SHA1 hash of config dict for pool keys
|
||||
|
||||
**Logging Fix**: Changed `logger.debug()` → `logger.info()` for pool hits
|
||||
|
||||
### 3. Endpoint Unification
|
||||
**Helper Function** (`server.py`):
|
||||
```python
|
||||
def get_default_browser_config() -> BrowserConfig:
|
||||
return BrowserConfig(
|
||||
extra_args=config["crawler"]["browser"].get("extra_args", []),
|
||||
**config["crawler"]["browser"].get("kwargs", {}),
|
||||
)
|
||||
```
|
||||
|
||||
**Migrated Endpoints:**
|
||||
- `/html`, `/screenshot`, `/pdf`, `/execute_js` → use `get_default_browser_config()`
|
||||
- `handle_llm_qa()`, `handle_markdown_request()` → same
|
||||
|
||||
**Result**: All endpoints now hit permanent browser pool
|
||||
|
||||
### 4. Config Updates (`config.yml`)
|
||||
- `idle_ttl_sec: 1800` → `300` (30min → 5min base TTL)
|
||||
- `port: 11234` → `11235` (fixed mismatch with Gunicorn)
|
||||
|
||||
### 5. Lifespan Fix (`server.py`)
|
||||
```python
|
||||
await init_permanent(BrowserConfig(
|
||||
extra_args=config["crawler"]["browser"].get("extra_args", []),
|
||||
**config["crawler"]["browser"].get("kwargs", {}),
|
||||
))
|
||||
```
|
||||
Permanent browser now matches endpoint config signatures
|
||||
|
||||
## Test Results
|
||||
|
||||
### Test 1: Basic Health
|
||||
- 10 requests to `/health`
|
||||
- **Result**: 100% success, avg 3ms latency
|
||||
- **Baseline**: Container starts in ~5s, 270 MB idle
|
||||
|
||||
### Test 2: Memory Monitoring
|
||||
- 20 requests with Docker stats tracking
|
||||
- **Result**: 100% success, no memory leak (-0.2 MB delta)
|
||||
- **Baseline**: 269.7 MB container overhead
|
||||
|
||||
### Test 3: Pool Validation
|
||||
- 30 requests to `/html` endpoint
|
||||
- **Result**: **100% permanent browser hits**, 0 new browsers created
|
||||
- **Memory**: 287 MB baseline → 396 MB active (+109 MB)
|
||||
- **Latency**: Avg 4s (includes network to httpbin.org)
|
||||
|
||||
### Test 4: Concurrent Load
|
||||
- Light (10) → Medium (50) → Heavy (100) concurrent
|
||||
- **Total**: 320 requests
|
||||
- **Result**: 100% success, **320/320 permanent hits**, 0 new browsers
|
||||
- **Memory**: 269 MB → peak 1533 MB → final 993 MB
|
||||
- **Latency**: P99 at 100 concurrent = 34s (expected with single browser)
|
||||
|
||||
### Test 5: Pool Stress (Mixed Configs)
|
||||
- 20 requests with 4 different viewport configs
|
||||
- **Result**: 4 new browsers, 4 cold hits, **4 promotions to hot**, 8 hot hits
|
||||
- **Reuse Rate**: 60% (12 pool hits / 20 requests)
|
||||
- **Memory**: 270 MB → 928 MB peak (+658 MB = ~165 MB per browser)
|
||||
- **Proves**: Cold → hot promotion at 3 uses working perfectly
|
||||
|
||||
### Test 6: Multi-Endpoint
|
||||
- 10 requests each: `/html`, `/screenshot`, `/pdf`, `/crawl`
|
||||
- **Result**: 100% success across all 4 endpoints
|
||||
- **Latency**: 5-8s avg (PDF slowest at 7.2s)
|
||||
|
||||
### Test 7: Cleanup Verification
|
||||
- 20 requests (load spike) → 90s idle
|
||||
- **Memory**: 269 MB → peak 1107 MB → final 780 MB
|
||||
- **Recovery**: 327 MB (39%) - partial cleanup
|
||||
- **Note**: Hot pool browsers persist (by design), janitor working correctly
|
||||
|
||||
## Performance Metrics
|
||||
|
||||
| Metric | Before | After | Improvement |
|
||||
|--------|--------|-------|-------------|
|
||||
| Pool Reuse | 0% | 100% (default config) | ∞ |
|
||||
| Memory Leak | Unknown | 0 MB/cycle | Stable |
|
||||
| Browser Reuse | No | Yes | ~3-5s saved per request |
|
||||
| Idle Memory | 500-700 MB × N | 270-400 MB | 10x reduction |
|
||||
| Concurrent Capacity | ~20 | 100+ | 5x |
|
||||
|
||||
## Key Learnings
|
||||
|
||||
1. **Config Signature Matching**: Permanent browser MUST match endpoint default config exactly (SHA1 hash)
|
||||
2. **Logging Levels**: Pool diagnostics need INFO level, not DEBUG
|
||||
3. **Memory in Docker**: Must read cgroup files, not host metrics
|
||||
4. **Janitor Timing**: 60s interval adequate, but TTLs should be short (5min) for cold pool
|
||||
5. **Hot Promotion**: 3-use threshold works well for production patterns
|
||||
6. **Memory Per Browser**: ~150-200 MB per Chromium instance with headless + text_mode
|
||||
|
||||
## Test Infrastructure
|
||||
|
||||
**Location**: `deploy/docker/tests/`
|
||||
**Dependencies**: `httpx`, `docker` (Python SDK)
|
||||
**Pattern**: Sequential build - each test adds one capability
|
||||
|
||||
**Files**:
|
||||
- `test_1_basic.py`: Health check + container lifecycle
|
||||
- `test_2_memory.py`: + Docker stats monitoring
|
||||
- `test_3_pool.py`: + Log analysis for pool markers
|
||||
- `test_4_concurrent.py`: + asyncio.Semaphore for concurrency control
|
||||
- `test_5_pool_stress.py`: + Config variants (viewports)
|
||||
- `test_6_multi_endpoint.py`: + Multiple endpoint testing
|
||||
- `test_7_cleanup.py`: + Time-series memory tracking for janitor
|
||||
|
||||
**Run Pattern**:
|
||||
```bash
|
||||
cd deploy/docker/tests
|
||||
pip install -r requirements.txt
|
||||
# Rebuild after code changes:
|
||||
cd /path/to/repo && docker buildx build -t crawl4ai-local:latest --load .
|
||||
# Run test:
|
||||
python test_N_name.py
|
||||
```
|
||||
|
||||
## Architecture Decisions
|
||||
|
||||
**Why Permanent Browser?**
|
||||
- 90% of requests use default config → single browser serves most traffic
|
||||
- Eliminates 3-5s startup overhead per request
|
||||
|
||||
**Why 3-Tier Pool?**
|
||||
- Permanent: Zero cost for common case
|
||||
- Hot: Amortized cost for frequent variants
|
||||
- Cold: Lazy allocation for rare configs
|
||||
|
||||
**Why Adaptive Janitor?**
|
||||
- Memory pressure triggers aggressive cleanup
|
||||
- Low memory allows longer TTLs for better reuse
|
||||
|
||||
**Why Not Close After Each Request?**
|
||||
- Browser startup: 3-5s overhead
|
||||
- Pool reuse: <100ms overhead
|
||||
- Net: 30-50x faster
|
||||
|
||||
## Future Optimizations
|
||||
|
||||
1. **Request Queuing**: When at capacity, queue instead of reject
|
||||
2. **Pre-warming**: Predict common configs, pre-create browsers
|
||||
3. **Metrics Export**: Prometheus metrics for pool efficiency
|
||||
4. **Config Normalization**: Group similar viewports (e.g., 1920±50 → 1920)
|
||||
|
||||
## Critical Code Paths
|
||||
|
||||
**Browser Acquisition** (`crawler_pool.py:34-78`):
|
||||
```
|
||||
get_crawler(cfg) →
|
||||
_sig(cfg) →
|
||||
if sig == DEFAULT_CONFIG_SIG → PERMANENT
|
||||
elif sig in HOT_POOL → HOT_POOL[sig]
|
||||
elif sig in COLD_POOL → promote if count >= 3
|
||||
else → create new in COLD_POOL
|
||||
```
|
||||
|
||||
**Janitor Loop** (`crawler_pool.py:107-146`):
|
||||
```
|
||||
while True:
|
||||
mem% = get_container_memory_percent()
|
||||
if mem% > 80: interval=10s, cold_ttl=30s
|
||||
elif mem% > 60: interval=30s, cold_ttl=60s
|
||||
else: interval=60s, cold_ttl=300s
|
||||
sleep(interval)
|
||||
close idle browsers (COLD then HOT)
|
||||
```
|
||||
|
||||
**Endpoint Pattern** (`server.py` example):
|
||||
```python
|
||||
@app.post("/html")
|
||||
async def generate_html(...):
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(get_default_browser_config())
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
# No crawler.close() - returned to pool
|
||||
```
|
||||
|
||||
## Debugging Tips
|
||||
|
||||
**Check Pool Activity**:
|
||||
```bash
|
||||
docker logs crawl4ai-test | grep -E "(🔥|♨️|❄️|🆕|⬆️)"
|
||||
```
|
||||
|
||||
**Verify Config Signature**:
|
||||
```python
|
||||
from crawl4ai import BrowserConfig
|
||||
import json, hashlib
|
||||
cfg = BrowserConfig(...)
|
||||
sig = hashlib.sha1(json.dumps(cfg.to_dict(), sort_keys=True).encode()).hexdigest()
|
||||
print(sig[:8]) # Compare with logs
|
||||
```
|
||||
|
||||
**Monitor Memory**:
|
||||
```bash
|
||||
docker stats crawl4ai-test
|
||||
```
|
||||
|
||||
## Known Limitations
|
||||
|
||||
- **Mac Docker Stats**: CPU metrics unreliable, memory works
|
||||
- **PDF Generation**: Slowest endpoint (~7s), no optimization yet
|
||||
- **Hot Pool Persistence**: May hold memory longer than needed (trade-off for performance)
|
||||
- **Janitor Lag**: Up to 60s before cleanup triggers in low-memory scenarios
|
||||
@@ -1,378 +0,0 @@
|
||||
# Webhook Feature Examples
|
||||
|
||||
This document provides examples of how to use the webhook feature for crawl jobs in Crawl4AI.
|
||||
|
||||
## Overview
|
||||
|
||||
The webhook feature allows you to receive notifications when crawl jobs complete, eliminating the need for polling. Webhooks are sent with exponential backoff retry logic to ensure reliable delivery.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Global Configuration (config.yml)
|
||||
|
||||
You can configure default webhook settings in `config.yml`:
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
enabled: true
|
||||
default_url: null # Optional: default webhook URL for all jobs
|
||||
data_in_payload: false # Optional: default behavior for including data
|
||||
retry:
|
||||
max_attempts: 5
|
||||
initial_delay_ms: 1000 # 1s, 2s, 4s, 8s, 16s exponential backoff
|
||||
max_delay_ms: 32000
|
||||
timeout_ms: 30000 # 30s timeout per webhook call
|
||||
headers: # Optional: default headers to include
|
||||
User-Agent: "Crawl4AI-Webhook/1.0"
|
||||
```
|
||||
|
||||
## API Usage Examples
|
||||
|
||||
### Example 1: Basic Webhook (Notification Only)
|
||||
|
||||
Send a webhook notification without including the crawl data in the payload.
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/crawl-complete",
|
||||
"webhook_data_in_payload": false
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"task_id": "crawl_a1b2c3d4"
|
||||
}
|
||||
```
|
||||
|
||||
**Webhook Payload Received:**
|
||||
```json
|
||||
{
|
||||
"task_id": "crawl_a1b2c3d4",
|
||||
"task_type": "crawl",
|
||||
"status": "completed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com"]
|
||||
}
|
||||
```
|
||||
|
||||
Your webhook handler should then fetch the results:
|
||||
```bash
|
||||
curl http://localhost:11235/crawl/job/crawl_a1b2c3d4
|
||||
```
|
||||
|
||||
### Example 2: Webhook with Data Included
|
||||
|
||||
Include the full crawl results in the webhook payload.
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/crawl-complete",
|
||||
"webhook_data_in_payload": true
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
**Webhook Payload Received:**
|
||||
```json
|
||||
{
|
||||
"task_id": "crawl_a1b2c3d4",
|
||||
"task_type": "crawl",
|
||||
"status": "completed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com"],
|
||||
"data": {
|
||||
"markdown": "...",
|
||||
"html": "...",
|
||||
"links": {...},
|
||||
"metadata": {...}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Example 3: Webhook with Custom Headers
|
||||
|
||||
Include custom headers for authentication or identification.
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/crawl-complete",
|
||||
"webhook_data_in_payload": false,
|
||||
"webhook_headers": {
|
||||
"X-Webhook-Secret": "my-secret-token",
|
||||
"X-Service-ID": "crawl4ai-production"
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
The webhook will be sent with these additional headers plus the default headers from config.
|
||||
|
||||
### Example 4: Failure Notification
|
||||
|
||||
When a crawl job fails, a webhook is sent with error details.
|
||||
|
||||
**Webhook Payload on Failure:**
|
||||
```json
|
||||
{
|
||||
"task_id": "crawl_a1b2c3d4",
|
||||
"task_type": "crawl",
|
||||
"status": "failed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com"],
|
||||
"error": "Connection timeout after 30s"
|
||||
}
|
||||
```
|
||||
|
||||
### Example 5: Using Global Default Webhook
|
||||
|
||||
If you set a `default_url` in config.yml, jobs without webhook_config will use it:
|
||||
|
||||
**config.yml:**
|
||||
```yaml
|
||||
webhooks:
|
||||
enabled: true
|
||||
default_url: "https://myapp.com/webhooks/default"
|
||||
data_in_payload: false
|
||||
```
|
||||
|
||||
**Request (no webhook_config needed):**
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"]
|
||||
}'
|
||||
```
|
||||
|
||||
The webhook will be sent to the default URL configured in config.yml.
|
||||
|
||||
### Example 6: LLM Extraction Job with Webhook
|
||||
|
||||
Use webhooks with the LLM extraction endpoint for asynchronous processing.
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/llm/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"url": "https://example.com/article",
|
||||
"q": "Extract the article title, author, and publication date",
|
||||
"schema": "{\"type\": \"object\", \"properties\": {\"title\": {\"type\": \"string\"}, \"author\": {\"type\": \"string\"}, \"date\": {\"type\": \"string\"}}}",
|
||||
"cache": false,
|
||||
"provider": "openai/gpt-4o-mini",
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/llm-complete",
|
||||
"webhook_data_in_payload": true
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"task_id": "llm_1698765432_12345"
|
||||
}
|
||||
```
|
||||
|
||||
**Webhook Payload Received:**
|
||||
```json
|
||||
{
|
||||
"task_id": "llm_1698765432_12345",
|
||||
"task_type": "llm_extraction",
|
||||
"status": "completed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com/article"],
|
||||
"data": {
|
||||
"extracted_content": {
|
||||
"title": "Understanding Web Scraping",
|
||||
"author": "John Doe",
|
||||
"date": "2025-10-21"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Webhook Handler Example
|
||||
|
||||
Here's a simple Python Flask webhook handler that supports both crawl and LLM extraction jobs:
|
||||
|
||||
```python
|
||||
from flask import Flask, request, jsonify
|
||||
import requests
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/webhooks/crawl-complete', methods=['POST'])
|
||||
def handle_crawl_webhook():
|
||||
payload = request.json
|
||||
|
||||
task_id = payload['task_id']
|
||||
task_type = payload['task_type']
|
||||
status = payload['status']
|
||||
|
||||
if status == 'completed':
|
||||
# If data not in payload, fetch it
|
||||
if 'data' not in payload:
|
||||
# Determine endpoint based on task type
|
||||
endpoint = 'crawl' if task_type == 'crawl' else 'llm'
|
||||
response = requests.get(f'http://localhost:11235/{endpoint}/job/{task_id}')
|
||||
data = response.json()
|
||||
else:
|
||||
data = payload['data']
|
||||
|
||||
# Process based on task type
|
||||
if task_type == 'crawl':
|
||||
print(f"Processing crawl results for {task_id}")
|
||||
# Handle crawl results
|
||||
results = data.get('results', [])
|
||||
for result in results:
|
||||
print(f" - {result.get('url')}: {len(result.get('markdown', ''))} chars")
|
||||
|
||||
elif task_type == 'llm_extraction':
|
||||
print(f"Processing LLM extraction for {task_id}")
|
||||
# Handle LLM extraction
|
||||
# Note: Webhook sends 'extracted_content', API returns 'result'
|
||||
extracted = data.get('extracted_content', data.get('result', {}))
|
||||
print(f" - Extracted: {extracted}")
|
||||
|
||||
# Your business logic here...
|
||||
|
||||
elif status == 'failed':
|
||||
error = payload.get('error', 'Unknown error')
|
||||
print(f"{task_type} job {task_id} failed: {error}")
|
||||
# Handle failure...
|
||||
|
||||
return jsonify({"status": "received"}), 200
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(port=8080)
|
||||
```
|
||||
|
||||
## Retry Logic
|
||||
|
||||
The webhook delivery service uses exponential backoff retry logic:
|
||||
|
||||
- **Attempts:** Up to 5 attempts by default
|
||||
- **Delays:** 1s → 2s → 4s → 8s → 16s
|
||||
- **Timeout:** 30 seconds per attempt
|
||||
- **Retry Conditions:**
|
||||
- Server errors (5xx status codes)
|
||||
- Network errors
|
||||
- Timeouts
|
||||
- **No Retry:**
|
||||
- Client errors (4xx status codes)
|
||||
- Successful delivery (2xx status codes)
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **No Polling Required** - Eliminates constant API calls to check job status
|
||||
2. **Real-time Notifications** - Immediate notification when jobs complete
|
||||
3. **Reliable Delivery** - Exponential backoff ensures webhooks are delivered
|
||||
4. **Flexible** - Choose between notification-only or full data delivery
|
||||
5. **Secure** - Support for custom headers for authentication
|
||||
6. **Configurable** - Global defaults or per-job configuration
|
||||
7. **Universal Support** - Works with both `/crawl/job` and `/llm/job` endpoints
|
||||
|
||||
## TypeScript Client Example
|
||||
|
||||
```typescript
|
||||
interface WebhookConfig {
|
||||
webhook_url: string;
|
||||
webhook_data_in_payload?: boolean;
|
||||
webhook_headers?: Record<string, string>;
|
||||
}
|
||||
|
||||
interface CrawlJobRequest {
|
||||
urls: string[];
|
||||
browser_config?: Record<string, any>;
|
||||
crawler_config?: Record<string, any>;
|
||||
webhook_config?: WebhookConfig;
|
||||
}
|
||||
|
||||
interface LLMJobRequest {
|
||||
url: string;
|
||||
q: string;
|
||||
schema?: string;
|
||||
cache?: boolean;
|
||||
provider?: string;
|
||||
webhook_config?: WebhookConfig;
|
||||
}
|
||||
|
||||
async function createCrawlJob(request: CrawlJobRequest) {
|
||||
const response = await fetch('http://localhost:11235/crawl/job', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(request)
|
||||
});
|
||||
|
||||
const { task_id } = await response.json();
|
||||
return task_id;
|
||||
}
|
||||
|
||||
async function createLLMJob(request: LLMJobRequest) {
|
||||
const response = await fetch('http://localhost:11235/llm/job', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(request)
|
||||
});
|
||||
|
||||
const { task_id } = await response.json();
|
||||
return task_id;
|
||||
}
|
||||
|
||||
// Usage - Crawl Job
|
||||
const crawlTaskId = await createCrawlJob({
|
||||
urls: ['https://example.com'],
|
||||
webhook_config: {
|
||||
webhook_url: 'https://myapp.com/webhooks/crawl-complete',
|
||||
webhook_data_in_payload: false,
|
||||
webhook_headers: {
|
||||
'X-Webhook-Secret': 'my-secret'
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Usage - LLM Extraction Job
|
||||
const llmTaskId = await createLLMJob({
|
||||
url: 'https://example.com/article',
|
||||
q: 'Extract the main points from this article',
|
||||
provider: 'openai/gpt-4o-mini',
|
||||
webhook_config: {
|
||||
webhook_url: 'https://myapp.com/webhooks/llm-complete',
|
||||
webhook_data_in_payload: true,
|
||||
webhook_headers: {
|
||||
'X-Webhook-Secret': 'my-secret'
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Monitoring and Debugging
|
||||
|
||||
Webhook delivery attempts are logged at INFO level:
|
||||
- Successful deliveries
|
||||
- Retry attempts with delays
|
||||
- Final failures after max attempts
|
||||
|
||||
Check the application logs for webhook delivery status:
|
||||
```bash
|
||||
docker logs crawl4ai-container | grep -i webhook
|
||||
```
|
||||
1462
deploy/docker/api.py
1462
deploy/docker/api.py
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0"
|
||||
host: "0.0.0.0"
|
||||
port: 11235
|
||||
port: 11234
|
||||
reload: False
|
||||
workers: 1
|
||||
timeout_keep_alive: 300
|
||||
@@ -61,7 +61,7 @@ crawler:
|
||||
batch_process: 300.0 # Timeout for batch processing
|
||||
pool:
|
||||
max_pages: 40 # ← GLOBAL_SEM permits
|
||||
idle_ttl_sec: 300 # ← 30 min janitor cutoff
|
||||
idle_ttl_sec: 1800 # ← 30 min janitor cutoff
|
||||
browser:
|
||||
kwargs:
|
||||
headless: true
|
||||
@@ -87,17 +87,4 @@ observability:
|
||||
enabled: True
|
||||
endpoint: "/metrics"
|
||||
health_check:
|
||||
endpoint: "/health"
|
||||
|
||||
# Webhook Configuration
|
||||
webhooks:
|
||||
enabled: true
|
||||
default_url: null # Optional: default webhook URL for all jobs
|
||||
data_in_payload: false # Optional: default behavior for including data
|
||||
retry:
|
||||
max_attempts: 5
|
||||
initial_delay_ms: 1000 # 1s, 2s, 4s, 8s, 16s exponential backoff
|
||||
max_delay_ms: 32000
|
||||
timeout_ms: 30000 # 30s timeout per webhook call
|
||||
headers: # Optional: default headers to include
|
||||
User-Agent: "Crawl4AI-Webhook/1.0"
|
||||
endpoint: "/health"
|
||||
@@ -1,170 +1,119 @@
|
||||
# crawler_pool.py - Smart browser pool with tiered management
|
||||
import asyncio, json, hashlib, time
|
||||
# crawler_pool.py (new file)
|
||||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
import time
|
||||
from contextlib import suppress
|
||||
from typing import Dict, Optional
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
from utils import load_config, get_container_memory_percent
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
import psutil
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
|
||||
|
||||
# Import browser adapters with fallback
|
||||
try:
|
||||
from crawl4ai.browser_adapter import BrowserAdapter, PlaywrightAdapter
|
||||
except ImportError:
|
||||
# Fallback for development environment
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
|
||||
from crawl4ai.browser_adapter import BrowserAdapter, PlaywrightAdapter
|
||||
from utils import load_config
|
||||
|
||||
CONFIG = load_config()
|
||||
|
||||
# Pool tiers
|
||||
PERMANENT: Optional[AsyncWebCrawler] = None # Always-ready default browser
|
||||
HOT_POOL: Dict[str, AsyncWebCrawler] = {} # Frequent configs
|
||||
COLD_POOL: Dict[str, AsyncWebCrawler] = {} # Rare configs
|
||||
POOL: Dict[str, AsyncWebCrawler] = {}
|
||||
LAST_USED: Dict[str, float] = {}
|
||||
USAGE_COUNT: Dict[str, int] = {}
|
||||
LOCK = asyncio.Lock()
|
||||
|
||||
# Config
|
||||
MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0)
|
||||
BASE_IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 300)
|
||||
DEFAULT_CONFIG_SIG = None # Cached sig for default config
|
||||
MEM_LIMIT = CONFIG.get("crawler", {}).get(
|
||||
"memory_threshold_percent", 95.0
|
||||
) # % RAM – refuse new browsers above this
|
||||
IDLE_TTL = (
|
||||
CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800)
|
||||
) # close if unused for 30 min
|
||||
|
||||
def _sig(cfg: BrowserConfig) -> str:
|
||||
"""Generate config signature."""
|
||||
payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":"))
|
||||
|
||||
def _sig(cfg: BrowserConfig, adapter: Optional[BrowserAdapter] = None) -> str:
|
||||
try:
|
||||
config_payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",", ":"))
|
||||
except (TypeError, ValueError):
|
||||
# Fallback to string representation if JSON serialization fails
|
||||
config_payload = str(cfg.to_dict())
|
||||
adapter_name = adapter.__class__.__name__ if adapter else "PlaywrightAdapter"
|
||||
payload = f"{config_payload}:{adapter_name}"
|
||||
return hashlib.sha1(payload.encode()).hexdigest()
|
||||
|
||||
def _is_default_config(sig: str) -> bool:
|
||||
"""Check if config matches default."""
|
||||
return sig == DEFAULT_CONFIG_SIG
|
||||
|
||||
async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
|
||||
"""Get crawler from pool with tiered strategy."""
|
||||
sig = _sig(cfg)
|
||||
async with LOCK:
|
||||
# Check permanent browser for default config
|
||||
if PERMANENT and _is_default_config(sig):
|
||||
async def get_crawler(
|
||||
cfg: BrowserConfig, adapter: Optional[BrowserAdapter] = None
|
||||
) -> AsyncWebCrawler:
|
||||
sig = None
|
||||
try:
|
||||
sig = _sig(cfg, adapter)
|
||||
async with LOCK:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
return POOL[sig]
|
||||
if psutil.virtual_memory().percent >= MEM_LIMIT:
|
||||
raise MemoryError("RAM pressure – new browser denied")
|
||||
|
||||
# Create crawler - let it initialize the strategy with proper logger
|
||||
# Pass browser_adapter as a kwarg so AsyncWebCrawler can use it when creating the strategy
|
||||
crawler = AsyncWebCrawler(
|
||||
config=cfg,
|
||||
thread_safe=False
|
||||
)
|
||||
|
||||
# Set the browser adapter on the strategy after crawler initialization
|
||||
if adapter:
|
||||
# Create a new strategy with the adapter and the crawler's logger
|
||||
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
|
||||
crawler.crawler_strategy = AsyncPlaywrightCrawlerStrategy(
|
||||
browser_config=cfg,
|
||||
logger=crawler.logger,
|
||||
browser_adapter=adapter
|
||||
)
|
||||
|
||||
await crawler.start()
|
||||
POOL[sig] = crawler
|
||||
LAST_USED[sig] = time.time()
|
||||
USAGE_COUNT[sig] = USAGE_COUNT.get(sig, 0) + 1
|
||||
logger.info("🔥 Using permanent browser")
|
||||
return PERMANENT
|
||||
return crawler
|
||||
except MemoryError as e:
|
||||
raise MemoryError(f"RAM pressure – new browser denied: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to start browser: {e}")
|
||||
finally:
|
||||
if sig:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
else:
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
|
||||
# Check hot pool
|
||||
if sig in HOT_POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
USAGE_COUNT[sig] = USAGE_COUNT.get(sig, 0) + 1
|
||||
logger.info(f"♨️ Using hot pool browser (sig={sig[:8]})")
|
||||
return HOT_POOL[sig]
|
||||
|
||||
# Check cold pool (promote to hot if used 3+ times)
|
||||
if sig in COLD_POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
USAGE_COUNT[sig] = USAGE_COUNT.get(sig, 0) + 1
|
||||
|
||||
if USAGE_COUNT[sig] >= 3:
|
||||
logger.info(f"⬆️ Promoting to hot pool (sig={sig[:8]}, count={USAGE_COUNT[sig]})")
|
||||
HOT_POOL[sig] = COLD_POOL.pop(sig)
|
||||
|
||||
# Track promotion in monitor
|
||||
try:
|
||||
from monitor import get_monitor
|
||||
await get_monitor().track_janitor_event("promote", sig, {"count": USAGE_COUNT[sig]})
|
||||
except:
|
||||
pass
|
||||
|
||||
return HOT_POOL[sig]
|
||||
|
||||
logger.info(f"❄️ Using cold pool browser (sig={sig[:8]})")
|
||||
return COLD_POOL[sig]
|
||||
|
||||
# Memory check before creating new
|
||||
mem_pct = get_container_memory_percent()
|
||||
if mem_pct >= MEM_LIMIT:
|
||||
logger.error(f"💥 Memory pressure: {mem_pct:.1f}% >= {MEM_LIMIT}%")
|
||||
raise MemoryError(f"Memory at {mem_pct:.1f}%, refusing new browser")
|
||||
|
||||
# Create new in cold pool
|
||||
logger.info(f"🆕 Creating new browser in cold pool (sig={sig[:8]}, mem={mem_pct:.1f}%)")
|
||||
crawler = AsyncWebCrawler(config=cfg, thread_safe=False)
|
||||
await crawler.start()
|
||||
COLD_POOL[sig] = crawler
|
||||
LAST_USED[sig] = time.time()
|
||||
USAGE_COUNT[sig] = 1
|
||||
return crawler
|
||||
|
||||
async def init_permanent(cfg: BrowserConfig):
|
||||
"""Initialize permanent default browser."""
|
||||
global PERMANENT, DEFAULT_CONFIG_SIG
|
||||
async with LOCK:
|
||||
if PERMANENT:
|
||||
return
|
||||
DEFAULT_CONFIG_SIG = _sig(cfg)
|
||||
logger.info("🔥 Creating permanent default browser")
|
||||
PERMANENT = AsyncWebCrawler(config=cfg, thread_safe=False)
|
||||
await PERMANENT.start()
|
||||
LAST_USED[DEFAULT_CONFIG_SIG] = time.time()
|
||||
USAGE_COUNT[DEFAULT_CONFIG_SIG] = 0
|
||||
|
||||
async def close_all():
|
||||
"""Close all browsers."""
|
||||
async with LOCK:
|
||||
tasks = []
|
||||
if PERMANENT:
|
||||
tasks.append(PERMANENT.close())
|
||||
tasks.extend([c.close() for c in HOT_POOL.values()])
|
||||
tasks.extend([c.close() for c in COLD_POOL.values()])
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
HOT_POOL.clear()
|
||||
COLD_POOL.clear()
|
||||
await asyncio.gather(
|
||||
*(c.close() for c in POOL.values()), return_exceptions=True
|
||||
)
|
||||
POOL.clear()
|
||||
LAST_USED.clear()
|
||||
USAGE_COUNT.clear()
|
||||
|
||||
|
||||
async def janitor():
|
||||
"""Adaptive cleanup based on memory pressure."""
|
||||
while True:
|
||||
mem_pct = get_container_memory_percent()
|
||||
|
||||
# Adaptive intervals and TTLs
|
||||
if mem_pct > 80:
|
||||
interval, cold_ttl, hot_ttl = 10, 30, 120
|
||||
elif mem_pct > 60:
|
||||
interval, cold_ttl, hot_ttl = 30, 60, 300
|
||||
else:
|
||||
interval, cold_ttl, hot_ttl = 60, BASE_IDLE_TTL, BASE_IDLE_TTL * 2
|
||||
|
||||
await asyncio.sleep(interval)
|
||||
|
||||
await asyncio.sleep(60)
|
||||
now = time.time()
|
||||
async with LOCK:
|
||||
# Clean cold pool
|
||||
for sig in list(COLD_POOL.keys()):
|
||||
if now - LAST_USED.get(sig, now) > cold_ttl:
|
||||
idle_time = now - LAST_USED[sig]
|
||||
logger.info(f"🧹 Closing cold browser (sig={sig[:8]}, idle={idle_time:.0f}s)")
|
||||
for sig, crawler in list(POOL.items()):
|
||||
if now - LAST_USED[sig] > IDLE_TTL:
|
||||
with suppress(Exception):
|
||||
await COLD_POOL[sig].close()
|
||||
COLD_POOL.pop(sig, None)
|
||||
await crawler.close()
|
||||
POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
USAGE_COUNT.pop(sig, None)
|
||||
|
||||
# Track in monitor
|
||||
try:
|
||||
from monitor import get_monitor
|
||||
await get_monitor().track_janitor_event("close_cold", sig, {"idle_seconds": int(idle_time), "ttl": cold_ttl})
|
||||
except:
|
||||
pass
|
||||
|
||||
# Clean hot pool (more conservative)
|
||||
for sig in list(HOT_POOL.keys()):
|
||||
if now - LAST_USED.get(sig, now) > hot_ttl:
|
||||
idle_time = now - LAST_USED[sig]
|
||||
logger.info(f"🧹 Closing hot browser (sig={sig[:8]}, idle={idle_time:.0f}s)")
|
||||
with suppress(Exception):
|
||||
await HOT_POOL[sig].close()
|
||||
HOT_POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
USAGE_COUNT.pop(sig, None)
|
||||
|
||||
# Track in monitor
|
||||
try:
|
||||
from monitor import get_monitor
|
||||
await get_monitor().track_janitor_event("close_hot", sig, {"idle_seconds": int(idle_time), "ttl": hot_ttl})
|
||||
except:
|
||||
pass
|
||||
|
||||
# Log pool stats
|
||||
if mem_pct > 60:
|
||||
logger.info(f"📊 Pool: hot={len(HOT_POOL)}, cold={len(COLD_POOL)}, mem={mem_pct:.1f}%")
|
||||
|
||||
@@ -12,7 +12,6 @@ from api import (
|
||||
handle_crawl_job,
|
||||
handle_task_status,
|
||||
)
|
||||
from schemas import WebhookConfig
|
||||
|
||||
# ------------- dependency placeholders -------------
|
||||
_redis = None # will be injected from server.py
|
||||
@@ -38,16 +37,15 @@ class LlmJobPayload(BaseModel):
|
||||
schema: Optional[str] = None
|
||||
cache: bool = False
|
||||
provider: Optional[str] = None
|
||||
webhook_config: Optional[WebhookConfig] = None
|
||||
temperature: Optional[float] = None
|
||||
base_url: Optional[str] = None
|
||||
chunking_strategy: Optional[Dict] = None
|
||||
|
||||
|
||||
class CrawlJobPayload(BaseModel):
|
||||
urls: list[HttpUrl]
|
||||
browser_config: Dict = {}
|
||||
crawler_config: Dict = {}
|
||||
webhook_config: Optional[WebhookConfig] = None
|
||||
|
||||
|
||||
# ---------- LLM job ---------------------------------------------------------
|
||||
@@ -58,10 +56,6 @@ async def llm_job_enqueue(
|
||||
request: Request,
|
||||
_td: Dict = Depends(lambda: _token_dep()), # late-bound dep
|
||||
):
|
||||
webhook_config = None
|
||||
if payload.webhook_config:
|
||||
webhook_config = payload.webhook_config.model_dump(mode='json')
|
||||
|
||||
return await handle_llm_request(
|
||||
_redis,
|
||||
background_tasks,
|
||||
@@ -72,9 +66,9 @@ async def llm_job_enqueue(
|
||||
cache=payload.cache,
|
||||
config=_config,
|
||||
provider=payload.provider,
|
||||
webhook_config=webhook_config,
|
||||
temperature=payload.temperature,
|
||||
api_base_url=payload.base_url,
|
||||
chunking_strategy_config=payload.chunking_strategy,
|
||||
)
|
||||
|
||||
|
||||
@@ -94,10 +88,6 @@ async def crawl_job_enqueue(
|
||||
background_tasks: BackgroundTasks,
|
||||
_td: Dict = Depends(lambda: _token_dep()),
|
||||
):
|
||||
webhook_config = None
|
||||
if payload.webhook_config:
|
||||
webhook_config = payload.webhook_config.model_dump(mode='json')
|
||||
|
||||
return await handle_crawl_job(
|
||||
_redis,
|
||||
background_tasks,
|
||||
@@ -105,7 +95,6 @@ async def crawl_job_enqueue(
|
||||
payload.browser_config,
|
||||
payload.crawler_config,
|
||||
config=_config,
|
||||
webhook_config=webhook_config,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,382 +0,0 @@
|
||||
# monitor.py - Real-time monitoring stats with Redis persistence
|
||||
import time
|
||||
import json
|
||||
import asyncio
|
||||
from typing import Dict, List, Optional
|
||||
from datetime import datetime, timezone
|
||||
from collections import deque
|
||||
from redis import asyncio as aioredis
|
||||
from utils import get_container_memory_percent
|
||||
import psutil
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MonitorStats:
|
||||
"""Tracks real-time server stats with Redis persistence."""
|
||||
|
||||
def __init__(self, redis: aioredis.Redis):
|
||||
self.redis = redis
|
||||
self.start_time = time.time()
|
||||
|
||||
# In-memory queues (fast reads, Redis backup)
|
||||
self.active_requests: Dict[str, Dict] = {} # id -> request info
|
||||
self.completed_requests: deque = deque(maxlen=100) # Last 100
|
||||
self.janitor_events: deque = deque(maxlen=100)
|
||||
self.errors: deque = deque(maxlen=100)
|
||||
|
||||
# Endpoint stats (persisted in Redis)
|
||||
self.endpoint_stats: Dict[str, Dict] = {} # endpoint -> {count, total_time, errors, ...}
|
||||
|
||||
# Background persistence queue (max 10 pending persist requests)
|
||||
self._persist_queue: asyncio.Queue = asyncio.Queue(maxsize=10)
|
||||
self._persist_worker_task: Optional[asyncio.Task] = None
|
||||
|
||||
# Timeline data (5min window, 5s resolution = 60 points)
|
||||
self.memory_timeline: deque = deque(maxlen=60)
|
||||
self.requests_timeline: deque = deque(maxlen=60)
|
||||
self.browser_timeline: deque = deque(maxlen=60)
|
||||
|
||||
async def track_request_start(self, request_id: str, endpoint: str, url: str, config: Dict = None):
|
||||
"""Track new request start."""
|
||||
req_info = {
|
||||
"id": request_id,
|
||||
"endpoint": endpoint,
|
||||
"url": url[:100], # Truncate long URLs
|
||||
"start_time": time.time(),
|
||||
"config_sig": config.get("sig", "default") if config else "default",
|
||||
"mem_start": psutil.Process().memory_info().rss / (1024 * 1024)
|
||||
}
|
||||
self.active_requests[request_id] = req_info
|
||||
|
||||
# Increment endpoint counter
|
||||
if endpoint not in self.endpoint_stats:
|
||||
self.endpoint_stats[endpoint] = {
|
||||
"count": 0, "total_time": 0, "errors": 0,
|
||||
"pool_hits": 0, "success": 0
|
||||
}
|
||||
self.endpoint_stats[endpoint]["count"] += 1
|
||||
|
||||
# Queue persistence (handled by background worker)
|
||||
try:
|
||||
self._persist_queue.put_nowait(True)
|
||||
except asyncio.QueueFull:
|
||||
logger.warning("Persistence queue full, skipping")
|
||||
|
||||
async def track_request_end(self, request_id: str, success: bool, error: str = None,
|
||||
pool_hit: bool = True, status_code: int = 200):
|
||||
"""Track request completion."""
|
||||
if request_id not in self.active_requests:
|
||||
return
|
||||
|
||||
req_info = self.active_requests.pop(request_id)
|
||||
end_time = time.time()
|
||||
elapsed = end_time - req_info["start_time"]
|
||||
mem_end = psutil.Process().memory_info().rss / (1024 * 1024)
|
||||
mem_delta = mem_end - req_info["mem_start"]
|
||||
|
||||
# Update stats
|
||||
endpoint = req_info["endpoint"]
|
||||
if endpoint in self.endpoint_stats:
|
||||
self.endpoint_stats[endpoint]["total_time"] += elapsed
|
||||
if success:
|
||||
self.endpoint_stats[endpoint]["success"] += 1
|
||||
else:
|
||||
self.endpoint_stats[endpoint]["errors"] += 1
|
||||
if pool_hit:
|
||||
self.endpoint_stats[endpoint]["pool_hits"] += 1
|
||||
|
||||
# Add to completed queue
|
||||
completed = {
|
||||
**req_info,
|
||||
"end_time": end_time,
|
||||
"elapsed": round(elapsed, 2),
|
||||
"mem_delta": round(mem_delta, 1),
|
||||
"success": success,
|
||||
"error": error,
|
||||
"status_code": status_code,
|
||||
"pool_hit": pool_hit
|
||||
}
|
||||
self.completed_requests.append(completed)
|
||||
|
||||
# Track errors
|
||||
if not success and error:
|
||||
self.errors.append({
|
||||
"timestamp": end_time,
|
||||
"endpoint": endpoint,
|
||||
"url": req_info["url"],
|
||||
"error": error,
|
||||
"request_id": request_id
|
||||
})
|
||||
|
||||
await self._persist_endpoint_stats()
|
||||
|
||||
async def track_janitor_event(self, event_type: str, sig: str, details: Dict):
|
||||
"""Track janitor cleanup events."""
|
||||
self.janitor_events.append({
|
||||
"timestamp": time.time(),
|
||||
"type": event_type, # "close_cold", "close_hot", "promote"
|
||||
"sig": sig[:8],
|
||||
"details": details
|
||||
})
|
||||
|
||||
def _cleanup_old_entries(self, max_age_seconds: int = 300):
|
||||
"""Remove entries older than max_age_seconds (default 5min)."""
|
||||
now = time.time()
|
||||
cutoff = now - max_age_seconds
|
||||
|
||||
# Clean completed requests
|
||||
while self.completed_requests and self.completed_requests[0].get("end_time", 0) < cutoff:
|
||||
self.completed_requests.popleft()
|
||||
|
||||
# Clean janitor events
|
||||
while self.janitor_events and self.janitor_events[0].get("timestamp", 0) < cutoff:
|
||||
self.janitor_events.popleft()
|
||||
|
||||
# Clean errors
|
||||
while self.errors and self.errors[0].get("timestamp", 0) < cutoff:
|
||||
self.errors.popleft()
|
||||
|
||||
async def update_timeline(self):
|
||||
"""Update timeline data points (called every 5s)."""
|
||||
now = time.time()
|
||||
mem_pct = get_container_memory_percent()
|
||||
|
||||
# Clean old entries (keep last 5 minutes)
|
||||
self._cleanup_old_entries(max_age_seconds=300)
|
||||
|
||||
# Count requests in last 5s
|
||||
recent_reqs = sum(1 for req in self.completed_requests
|
||||
if now - req.get("end_time", 0) < 5)
|
||||
|
||||
# Browser counts (acquire lock to prevent race conditions)
|
||||
from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LOCK
|
||||
async with LOCK:
|
||||
browser_count = {
|
||||
"permanent": 1 if PERMANENT else 0,
|
||||
"hot": len(HOT_POOL),
|
||||
"cold": len(COLD_POOL)
|
||||
}
|
||||
|
||||
self.memory_timeline.append({"time": now, "value": mem_pct})
|
||||
self.requests_timeline.append({"time": now, "value": recent_reqs})
|
||||
self.browser_timeline.append({"time": now, "browsers": browser_count})
|
||||
|
||||
async def _persist_endpoint_stats(self):
|
||||
"""Persist endpoint stats to Redis."""
|
||||
try:
|
||||
await self.redis.set(
|
||||
"monitor:endpoint_stats",
|
||||
json.dumps(self.endpoint_stats),
|
||||
ex=86400 # 24h TTL
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to persist endpoint stats: {e}")
|
||||
|
||||
async def _persistence_worker(self):
|
||||
"""Background worker to persist stats to Redis."""
|
||||
while True:
|
||||
try:
|
||||
await self._persist_queue.get()
|
||||
await self._persist_endpoint_stats()
|
||||
self._persist_queue.task_done()
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"Persistence worker error: {e}")
|
||||
|
||||
def start_persistence_worker(self):
|
||||
"""Start the background persistence worker."""
|
||||
if not self._persist_worker_task:
|
||||
self._persist_worker_task = asyncio.create_task(self._persistence_worker())
|
||||
logger.info("Started persistence worker")
|
||||
|
||||
async def stop_persistence_worker(self):
|
||||
"""Stop the background persistence worker."""
|
||||
if self._persist_worker_task:
|
||||
self._persist_worker_task.cancel()
|
||||
try:
|
||||
await self._persist_worker_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._persist_worker_task = None
|
||||
logger.info("Stopped persistence worker")
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup on shutdown - persist final stats and stop workers."""
|
||||
logger.info("Monitor cleanup starting...")
|
||||
try:
|
||||
# Persist final stats before shutdown
|
||||
await self._persist_endpoint_stats()
|
||||
# Stop background worker
|
||||
await self.stop_persistence_worker()
|
||||
logger.info("Monitor cleanup completed")
|
||||
except Exception as e:
|
||||
logger.error(f"Monitor cleanup error: {e}")
|
||||
|
||||
async def load_from_redis(self):
|
||||
"""Load persisted stats from Redis."""
|
||||
try:
|
||||
data = await self.redis.get("monitor:endpoint_stats")
|
||||
if data:
|
||||
self.endpoint_stats = json.loads(data)
|
||||
logger.info("Loaded endpoint stats from Redis")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load from Redis: {e}")
|
||||
|
||||
async def get_health_summary(self) -> Dict:
|
||||
"""Get current system health snapshot."""
|
||||
mem_pct = get_container_memory_percent()
|
||||
cpu_pct = psutil.cpu_percent(interval=0.1)
|
||||
|
||||
# Network I/O (delta since last call)
|
||||
net = psutil.net_io_counters()
|
||||
|
||||
# Pool status (acquire lock to prevent race conditions)
|
||||
from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LOCK
|
||||
async with LOCK:
|
||||
# TODO: Track actual browser process memory instead of estimates
|
||||
# These are conservative estimates based on typical Chromium usage
|
||||
permanent_mem = 270 if PERMANENT else 0 # Estimate: ~270MB for permanent browser
|
||||
hot_mem = len(HOT_POOL) * 180 # Estimate: ~180MB per hot pool browser
|
||||
cold_mem = len(COLD_POOL) * 180 # Estimate: ~180MB per cold pool browser
|
||||
permanent_active = PERMANENT is not None
|
||||
hot_count = len(HOT_POOL)
|
||||
cold_count = len(COLD_POOL)
|
||||
|
||||
return {
|
||||
"container": {
|
||||
"memory_percent": round(mem_pct, 1),
|
||||
"cpu_percent": round(cpu_pct, 1),
|
||||
"network_sent_mb": round(net.bytes_sent / (1024**2), 2),
|
||||
"network_recv_mb": round(net.bytes_recv / (1024**2), 2),
|
||||
"uptime_seconds": int(time.time() - self.start_time)
|
||||
},
|
||||
"pool": {
|
||||
"permanent": {"active": permanent_active, "memory_mb": permanent_mem},
|
||||
"hot": {"count": hot_count, "memory_mb": hot_mem},
|
||||
"cold": {"count": cold_count, "memory_mb": cold_mem},
|
||||
"total_memory_mb": permanent_mem + hot_mem + cold_mem
|
||||
},
|
||||
"janitor": {
|
||||
"next_cleanup_estimate": "adaptive", # Would need janitor state
|
||||
"memory_pressure": "LOW" if mem_pct < 60 else "MEDIUM" if mem_pct < 80 else "HIGH"
|
||||
}
|
||||
}
|
||||
|
||||
def get_active_requests(self) -> List[Dict]:
|
||||
"""Get list of currently active requests."""
|
||||
now = time.time()
|
||||
return [
|
||||
{
|
||||
**req,
|
||||
"elapsed": round(now - req["start_time"], 1),
|
||||
"status": "running"
|
||||
}
|
||||
for req in self.active_requests.values()
|
||||
]
|
||||
|
||||
def get_completed_requests(self, limit: int = 50, filter_status: str = "all") -> List[Dict]:
|
||||
"""Get recent completed requests."""
|
||||
requests = list(self.completed_requests)[-limit:]
|
||||
if filter_status == "success":
|
||||
requests = [r for r in requests if r.get("success")]
|
||||
elif filter_status == "error":
|
||||
requests = [r for r in requests if not r.get("success")]
|
||||
return requests
|
||||
|
||||
async def get_browser_list(self) -> List[Dict]:
|
||||
"""Get detailed browser pool information."""
|
||||
from crawler_pool import PERMANENT, HOT_POOL, COLD_POOL, LAST_USED, USAGE_COUNT, DEFAULT_CONFIG_SIG, LOCK
|
||||
|
||||
browsers = []
|
||||
now = time.time()
|
||||
|
||||
# Acquire lock to prevent race conditions during iteration
|
||||
async with LOCK:
|
||||
if PERMANENT:
|
||||
browsers.append({
|
||||
"type": "permanent",
|
||||
"sig": DEFAULT_CONFIG_SIG[:8] if DEFAULT_CONFIG_SIG else "unknown",
|
||||
"age_seconds": int(now - self.start_time),
|
||||
"last_used_seconds": int(now - LAST_USED.get(DEFAULT_CONFIG_SIG, now)),
|
||||
"memory_mb": 270,
|
||||
"hits": USAGE_COUNT.get(DEFAULT_CONFIG_SIG, 0),
|
||||
"killable": False
|
||||
})
|
||||
|
||||
for sig, crawler in HOT_POOL.items():
|
||||
browsers.append({
|
||||
"type": "hot",
|
||||
"sig": sig[:8],
|
||||
"age_seconds": int(now - self.start_time), # Approximation
|
||||
"last_used_seconds": int(now - LAST_USED.get(sig, now)),
|
||||
"memory_mb": 180, # Estimate
|
||||
"hits": USAGE_COUNT.get(sig, 0),
|
||||
"killable": True
|
||||
})
|
||||
|
||||
for sig, crawler in COLD_POOL.items():
|
||||
browsers.append({
|
||||
"type": "cold",
|
||||
"sig": sig[:8],
|
||||
"age_seconds": int(now - self.start_time),
|
||||
"last_used_seconds": int(now - LAST_USED.get(sig, now)),
|
||||
"memory_mb": 180,
|
||||
"hits": USAGE_COUNT.get(sig, 0),
|
||||
"killable": True
|
||||
})
|
||||
|
||||
return browsers
|
||||
|
||||
def get_endpoint_stats_summary(self) -> Dict[str, Dict]:
|
||||
"""Get aggregated endpoint statistics."""
|
||||
summary = {}
|
||||
for endpoint, stats in self.endpoint_stats.items():
|
||||
count = stats["count"]
|
||||
avg_time = (stats["total_time"] / count) if count > 0 else 0
|
||||
success_rate = (stats["success"] / count * 100) if count > 0 else 0
|
||||
pool_hit_rate = (stats["pool_hits"] / count * 100) if count > 0 else 0
|
||||
|
||||
summary[endpoint] = {
|
||||
"count": count,
|
||||
"avg_latency_ms": round(avg_time * 1000, 1),
|
||||
"success_rate_percent": round(success_rate, 1),
|
||||
"pool_hit_rate_percent": round(pool_hit_rate, 1),
|
||||
"errors": stats["errors"]
|
||||
}
|
||||
return summary
|
||||
|
||||
def get_timeline_data(self, metric: str, window: str = "5m") -> Dict:
|
||||
"""Get timeline data for charts."""
|
||||
# For now, only 5m window supported
|
||||
if metric == "memory":
|
||||
data = list(self.memory_timeline)
|
||||
elif metric == "requests":
|
||||
data = list(self.requests_timeline)
|
||||
elif metric == "browsers":
|
||||
data = list(self.browser_timeline)
|
||||
else:
|
||||
return {"timestamps": [], "values": []}
|
||||
|
||||
return {
|
||||
"timestamps": [int(d["time"]) for d in data],
|
||||
"values": [d.get("value", d.get("browsers")) for d in data]
|
||||
}
|
||||
|
||||
def get_janitor_log(self, limit: int = 100) -> List[Dict]:
|
||||
"""Get recent janitor events."""
|
||||
return list(self.janitor_events)[-limit:]
|
||||
|
||||
def get_errors_log(self, limit: int = 100) -> List[Dict]:
|
||||
"""Get recent errors."""
|
||||
return list(self.errors)[-limit:]
|
||||
|
||||
# Global instance (initialized in server.py)
|
||||
monitor_stats: Optional[MonitorStats] = None
|
||||
|
||||
def get_monitor() -> MonitorStats:
|
||||
"""Get global monitor instance."""
|
||||
if monitor_stats is None:
|
||||
raise RuntimeError("Monitor not initialized")
|
||||
return monitor_stats
|
||||
@@ -1,405 +0,0 @@
|
||||
# monitor_routes.py - Monitor API endpoints
|
||||
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional
|
||||
from monitor import get_monitor
|
||||
import logging
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/monitor", tags=["monitor"])
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def get_health():
|
||||
"""Get current system health snapshot."""
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
return await monitor.get_health_summary()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting health: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.get("/requests")
|
||||
async def get_requests(status: str = "all", limit: int = 50):
|
||||
"""Get active and completed requests.
|
||||
|
||||
Args:
|
||||
status: Filter by 'active', 'completed', 'success', 'error', or 'all'
|
||||
limit: Max number of completed requests to return (default 50)
|
||||
"""
|
||||
# Input validation
|
||||
if status not in ["all", "active", "completed", "success", "error"]:
|
||||
raise HTTPException(400, f"Invalid status: {status}. Must be one of: all, active, completed, success, error")
|
||||
if limit < 1 or limit > 1000:
|
||||
raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000")
|
||||
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
|
||||
if status == "active":
|
||||
return {"active": monitor.get_active_requests(), "completed": []}
|
||||
elif status == "completed":
|
||||
return {"active": [], "completed": monitor.get_completed_requests(limit)}
|
||||
elif status in ["success", "error"]:
|
||||
return {"active": [], "completed": monitor.get_completed_requests(limit, status)}
|
||||
else: # "all"
|
||||
return {
|
||||
"active": monitor.get_active_requests(),
|
||||
"completed": monitor.get_completed_requests(limit)
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting requests: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.get("/browsers")
|
||||
async def get_browsers():
|
||||
"""Get detailed browser pool information."""
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
browsers = await monitor.get_browser_list()
|
||||
|
||||
# Calculate summary stats
|
||||
total_browsers = len(browsers)
|
||||
total_memory = sum(b["memory_mb"] for b in browsers)
|
||||
|
||||
# Calculate reuse rate from recent requests
|
||||
recent = monitor.get_completed_requests(100)
|
||||
pool_hits = sum(1 for r in recent if r.get("pool_hit", False))
|
||||
reuse_rate = (pool_hits / len(recent) * 100) if recent else 0
|
||||
|
||||
return {
|
||||
"browsers": browsers,
|
||||
"summary": {
|
||||
"total_count": total_browsers,
|
||||
"total_memory_mb": total_memory,
|
||||
"reuse_rate_percent": round(reuse_rate, 1)
|
||||
}
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting browsers: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.get("/endpoints/stats")
|
||||
async def get_endpoint_stats():
|
||||
"""Get aggregated endpoint statistics."""
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
return monitor.get_endpoint_stats_summary()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting endpoint stats: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.get("/timeline")
|
||||
async def get_timeline(metric: str = "memory", window: str = "5m"):
|
||||
"""Get timeline data for charts.
|
||||
|
||||
Args:
|
||||
metric: 'memory', 'requests', or 'browsers'
|
||||
window: Time window (only '5m' supported for now)
|
||||
"""
|
||||
# Input validation
|
||||
if metric not in ["memory", "requests", "browsers"]:
|
||||
raise HTTPException(400, f"Invalid metric: {metric}. Must be one of: memory, requests, browsers")
|
||||
if window != "5m":
|
||||
raise HTTPException(400, f"Invalid window: {window}. Only '5m' is currently supported")
|
||||
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
return monitor.get_timeline_data(metric, window)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting timeline: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.get("/logs/janitor")
|
||||
async def get_janitor_log(limit: int = 100):
|
||||
"""Get recent janitor cleanup events."""
|
||||
# Input validation
|
||||
if limit < 1 or limit > 1000:
|
||||
raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000")
|
||||
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
return {"events": monitor.get_janitor_log(limit)}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting janitor log: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.get("/logs/errors")
|
||||
async def get_errors_log(limit: int = 100):
|
||||
"""Get recent errors."""
|
||||
# Input validation
|
||||
if limit < 1 or limit > 1000:
|
||||
raise HTTPException(400, f"Invalid limit: {limit}. Must be between 1 and 1000")
|
||||
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
return {"errors": monitor.get_errors_log(limit)}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting errors log: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
# ========== Control Actions ==========
|
||||
|
||||
class KillBrowserRequest(BaseModel):
|
||||
sig: str
|
||||
|
||||
|
||||
@router.post("/actions/cleanup")
|
||||
async def force_cleanup():
|
||||
"""Force immediate janitor cleanup (kills idle cold pool browsers)."""
|
||||
try:
|
||||
from crawler_pool import COLD_POOL, LAST_USED, USAGE_COUNT, LOCK
|
||||
import time
|
||||
from contextlib import suppress
|
||||
|
||||
killed_count = 0
|
||||
now = time.time()
|
||||
|
||||
async with LOCK:
|
||||
for sig in list(COLD_POOL.keys()):
|
||||
# Kill all cold pool browsers immediately
|
||||
logger.info(f"🧹 Force cleanup: closing cold browser (sig={sig[:8]})")
|
||||
with suppress(Exception):
|
||||
await COLD_POOL[sig].close()
|
||||
COLD_POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
USAGE_COUNT.pop(sig, None)
|
||||
killed_count += 1
|
||||
|
||||
monitor = get_monitor()
|
||||
await monitor.track_janitor_event("force_cleanup", "manual", {"killed": killed_count})
|
||||
|
||||
return {"success": True, "killed_browsers": killed_count}
|
||||
except Exception as e:
|
||||
logger.error(f"Error during force cleanup: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.post("/actions/kill_browser")
|
||||
async def kill_browser(req: KillBrowserRequest):
|
||||
"""Kill a specific browser by signature (hot or cold only).
|
||||
|
||||
Args:
|
||||
sig: Browser config signature (first 8 chars)
|
||||
"""
|
||||
try:
|
||||
from crawler_pool import HOT_POOL, COLD_POOL, LAST_USED, USAGE_COUNT, LOCK, DEFAULT_CONFIG_SIG
|
||||
from contextlib import suppress
|
||||
|
||||
# Find full signature matching prefix
|
||||
target_sig = None
|
||||
pool_type = None
|
||||
|
||||
async with LOCK:
|
||||
# Check hot pool
|
||||
for sig in HOT_POOL.keys():
|
||||
if sig.startswith(req.sig):
|
||||
target_sig = sig
|
||||
pool_type = "hot"
|
||||
break
|
||||
|
||||
# Check cold pool
|
||||
if not target_sig:
|
||||
for sig in COLD_POOL.keys():
|
||||
if sig.startswith(req.sig):
|
||||
target_sig = sig
|
||||
pool_type = "cold"
|
||||
break
|
||||
|
||||
# Check if trying to kill permanent
|
||||
if DEFAULT_CONFIG_SIG and DEFAULT_CONFIG_SIG.startswith(req.sig):
|
||||
raise HTTPException(403, "Cannot kill permanent browser. Use restart instead.")
|
||||
|
||||
if not target_sig:
|
||||
raise HTTPException(404, f"Browser with sig={req.sig} not found")
|
||||
|
||||
# Warn if there are active requests (browser might be in use)
|
||||
monitor = get_monitor()
|
||||
active_count = len(monitor.get_active_requests())
|
||||
if active_count > 0:
|
||||
logger.warning(f"Killing browser {target_sig[:8]} while {active_count} requests are active - may cause failures")
|
||||
|
||||
# Kill the browser
|
||||
if pool_type == "hot":
|
||||
browser = HOT_POOL.pop(target_sig)
|
||||
else:
|
||||
browser = COLD_POOL.pop(target_sig)
|
||||
|
||||
with suppress(Exception):
|
||||
await browser.close()
|
||||
|
||||
LAST_USED.pop(target_sig, None)
|
||||
USAGE_COUNT.pop(target_sig, None)
|
||||
|
||||
logger.info(f"🔪 Killed {pool_type} browser (sig={target_sig[:8]})")
|
||||
|
||||
monitor = get_monitor()
|
||||
await monitor.track_janitor_event("kill_browser", target_sig, {"pool": pool_type, "manual": True})
|
||||
|
||||
return {"success": True, "killed_sig": target_sig[:8], "pool_type": pool_type}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error killing browser: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.post("/actions/restart_browser")
|
||||
async def restart_browser(req: KillBrowserRequest):
|
||||
"""Restart a browser (kill + recreate). Works for permanent too.
|
||||
|
||||
Args:
|
||||
sig: Browser config signature (first 8 chars), or "permanent"
|
||||
"""
|
||||
try:
|
||||
from crawler_pool import (PERMANENT, HOT_POOL, COLD_POOL, LAST_USED,
|
||||
USAGE_COUNT, LOCK, DEFAULT_CONFIG_SIG, init_permanent)
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
from contextlib import suppress
|
||||
import time
|
||||
|
||||
# Handle permanent browser restart
|
||||
if req.sig == "permanent" or (DEFAULT_CONFIG_SIG and DEFAULT_CONFIG_SIG.startswith(req.sig)):
|
||||
async with LOCK:
|
||||
if PERMANENT:
|
||||
with suppress(Exception):
|
||||
await PERMANENT.close()
|
||||
|
||||
# Reinitialize permanent
|
||||
from utils import load_config
|
||||
config = load_config()
|
||||
await init_permanent(BrowserConfig(
|
||||
extra_args=config["crawler"]["browser"].get("extra_args", []),
|
||||
**config["crawler"]["browser"].get("kwargs", {}),
|
||||
))
|
||||
|
||||
logger.info("🔄 Restarted permanent browser")
|
||||
return {"success": True, "restarted": "permanent"}
|
||||
|
||||
# Handle hot/cold browser restart
|
||||
target_sig = None
|
||||
pool_type = None
|
||||
browser_config = None
|
||||
|
||||
async with LOCK:
|
||||
# Find browser
|
||||
for sig in HOT_POOL.keys():
|
||||
if sig.startswith(req.sig):
|
||||
target_sig = sig
|
||||
pool_type = "hot"
|
||||
# Would need to reconstruct config (not stored currently)
|
||||
break
|
||||
|
||||
if not target_sig:
|
||||
for sig in COLD_POOL.keys():
|
||||
if sig.startswith(req.sig):
|
||||
target_sig = sig
|
||||
pool_type = "cold"
|
||||
break
|
||||
|
||||
if not target_sig:
|
||||
raise HTTPException(404, f"Browser with sig={req.sig} not found")
|
||||
|
||||
# Kill existing
|
||||
if pool_type == "hot":
|
||||
browser = HOT_POOL.pop(target_sig)
|
||||
else:
|
||||
browser = COLD_POOL.pop(target_sig)
|
||||
|
||||
with suppress(Exception):
|
||||
await browser.close()
|
||||
|
||||
# Note: We can't easily recreate with same config without storing it
|
||||
# For now, just kill and let new requests create fresh ones
|
||||
LAST_USED.pop(target_sig, None)
|
||||
USAGE_COUNT.pop(target_sig, None)
|
||||
|
||||
logger.info(f"🔄 Restarted {pool_type} browser (sig={target_sig[:8]})")
|
||||
|
||||
monitor = get_monitor()
|
||||
await monitor.track_janitor_event("restart_browser", target_sig, {"pool": pool_type})
|
||||
|
||||
return {"success": True, "restarted_sig": target_sig[:8], "note": "Browser will be recreated on next request"}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error restarting browser: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.post("/stats/reset")
|
||||
async def reset_stats():
|
||||
"""Reset today's endpoint counters."""
|
||||
try:
|
||||
monitor = get_monitor()
|
||||
monitor.endpoint_stats.clear()
|
||||
await monitor._persist_endpoint_stats()
|
||||
|
||||
return {"success": True, "message": "Endpoint stats reset"}
|
||||
except Exception as e:
|
||||
logger.error(f"Error resetting stats: {e}")
|
||||
raise HTTPException(500, str(e))
|
||||
|
||||
|
||||
@router.websocket("/ws")
|
||||
async def websocket_endpoint(websocket: WebSocket):
|
||||
"""WebSocket endpoint for real-time monitoring updates.
|
||||
|
||||
Sends updates every 2 seconds with:
|
||||
- Health stats
|
||||
- Active/completed requests
|
||||
- Browser pool status
|
||||
- Timeline data
|
||||
"""
|
||||
await websocket.accept()
|
||||
logger.info("WebSocket client connected")
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
# Gather all monitoring data
|
||||
monitor = get_monitor()
|
||||
|
||||
data = {
|
||||
"timestamp": asyncio.get_event_loop().time(),
|
||||
"health": await monitor.get_health_summary(),
|
||||
"requests": {
|
||||
"active": monitor.get_active_requests(),
|
||||
"completed": monitor.get_completed_requests(limit=10)
|
||||
},
|
||||
"browsers": await monitor.get_browser_list(),
|
||||
"timeline": {
|
||||
"memory": monitor.get_timeline_data("memory", "5m"),
|
||||
"requests": monitor.get_timeline_data("requests", "5m"),
|
||||
"browsers": monitor.get_timeline_data("browsers", "5m")
|
||||
},
|
||||
"janitor": monitor.get_janitor_log(limit=10),
|
||||
"errors": monitor.get_errors_log(limit=10)
|
||||
}
|
||||
|
||||
# Send update to client
|
||||
await websocket.send_json(data)
|
||||
|
||||
# Wait 2 seconds before next update
|
||||
await asyncio.sleep(2)
|
||||
|
||||
except WebSocketDisconnect:
|
||||
logger.info("WebSocket client disconnected")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error: {e}", exc_info=True)
|
||||
await asyncio.sleep(2) # Continue trying
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket connection error: {e}", exc_info=True)
|
||||
finally:
|
||||
logger.info("WebSocket connection closed")
|
||||
@@ -12,6 +12,6 @@ pydantic>=2.11
|
||||
rank-bm25==0.2.2
|
||||
anyio==4.9.0
|
||||
PyJWT==2.10.1
|
||||
mcp>=1.18.0
|
||||
mcp>=1.6.0
|
||||
websockets>=15.0.1
|
||||
httpx[http2]>=0.27.2
|
||||
|
||||
0
deploy/docker/routers/__init__.py
Normal file
0
deploy/docker/routers/__init__.py
Normal file
270
deploy/docker/routers/adaptive.py
Normal file
270
deploy/docker/routers/adaptive.py
Normal file
@@ -0,0 +1,270 @@
|
||||
import uuid
|
||||
from typing import Any, Dict
|
||||
|
||||
from fastapi import APIRouter, BackgroundTasks, HTTPException
|
||||
from schemas import AdaptiveConfigPayload, AdaptiveCrawlRequest, AdaptiveJobStatus
|
||||
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.adaptive_crawler import AdaptiveConfig, AdaptiveCrawler
|
||||
from crawl4ai.utils import get_error_context
|
||||
|
||||
# --- In-memory storage for job statuses. For production, use Redis or a database. ---
|
||||
ADAPTIVE_JOBS: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
# --- APIRouter for Adaptive Crawling Endpoints ---
|
||||
router = APIRouter(
|
||||
prefix="/adaptive/digest",
|
||||
tags=["Adaptive Crawling"],
|
||||
)
|
||||
|
||||
# --- Background Worker Function ---
|
||||
|
||||
|
||||
async def run_adaptive_digest(task_id: str, request: AdaptiveCrawlRequest):
|
||||
"""The actual async worker that performs the adaptive crawl."""
|
||||
try:
|
||||
# Update job status to RUNNING
|
||||
ADAPTIVE_JOBS[task_id]["status"] = "RUNNING"
|
||||
|
||||
# Create AdaptiveConfig from payload or use default
|
||||
if request.config:
|
||||
adaptive_config = AdaptiveConfig(**request.config.model_dump())
|
||||
else:
|
||||
adaptive_config = AdaptiveConfig()
|
||||
|
||||
# The adaptive crawler needs an instance of the web crawler
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
adaptive_crawler = AdaptiveCrawler(crawler, config=adaptive_config)
|
||||
|
||||
# This is the long-running operation
|
||||
final_state = await adaptive_crawler.digest(
|
||||
start_url=request.start_url, query=request.query
|
||||
)
|
||||
|
||||
# Process the final state into a clean result
|
||||
result_data = {
|
||||
"confidence": final_state.metrics.get("confidence", 0.0),
|
||||
"is_sufficient": adaptive_crawler.is_sufficient,
|
||||
"coverage_stats": adaptive_crawler.coverage_stats,
|
||||
"relevant_content": adaptive_crawler.get_relevant_content(top_k=5),
|
||||
}
|
||||
|
||||
# Update job with the final result
|
||||
ADAPTIVE_JOBS[task_id].update(
|
||||
{
|
||||
"status": "COMPLETED",
|
||||
"result": result_data,
|
||||
"metrics": final_state.metrics,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# On failure, update the job with an error message
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
error_message = f"Adaptive crawl failed: {str(e)}\nContext: {error_context}"
|
||||
|
||||
ADAPTIVE_JOBS[task_id].update({"status": "FAILED", "error": error_message})
|
||||
|
||||
|
||||
# --- API Endpoints ---
|
||||
|
||||
|
||||
@router.post("/job",
|
||||
summary="Submit Adaptive Crawl Job",
|
||||
description="Start a long-running adaptive crawling job that intelligently discovers relevant content.",
|
||||
response_description="Job ID for status polling",
|
||||
response_model=AdaptiveJobStatus,
|
||||
status_code=202
|
||||
)
|
||||
async def submit_adaptive_digest_job(
|
||||
request: AdaptiveCrawlRequest,
|
||||
background_tasks: BackgroundTasks,
|
||||
):
|
||||
"""
|
||||
Submit a new adaptive crawling job.
|
||||
|
||||
This endpoint starts an intelligent, long-running crawl that automatically
|
||||
discovers and extracts relevant content based on your query. Returns
|
||||
immediately with a task ID for polling.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"start_url": "https://example.com",
|
||||
"query": "Find all product documentation",
|
||||
"config": {
|
||||
"max_depth": 3,
|
||||
"max_pages": 50,
|
||||
"confidence_threshold": 0.7,
|
||||
"timeout": 300
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `start_url`: Starting URL for the crawl
|
||||
- `query`: Natural language query describing what to find
|
||||
- `config`: Optional adaptive configuration (max_depth, max_pages, etc.)
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"task_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"status": "PENDING",
|
||||
"metrics": null,
|
||||
"result": null,
|
||||
"error": null
|
||||
}
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```python
|
||||
# Submit job
|
||||
response = requests.post(
|
||||
"http://localhost:11235/adaptive/digest/job",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
json={
|
||||
"start_url": "https://example.com",
|
||||
"query": "Find all API documentation"
|
||||
}
|
||||
)
|
||||
task_id = response.json()["task_id"]
|
||||
|
||||
# Poll for results
|
||||
while True:
|
||||
status_response = requests.get(
|
||||
f"http://localhost:11235/adaptive/digest/job/{task_id}",
|
||||
headers={"Authorization": f"Bearer {token}"}
|
||||
)
|
||||
status = status_response.json()
|
||||
if status["status"] in ["COMPLETED", "FAILED"]:
|
||||
print(status["result"])
|
||||
break
|
||||
time.sleep(2)
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- Job runs in background, returns immediately
|
||||
- Use task_id to poll status with GET /adaptive/digest/job/{task_id}
|
||||
- Adaptive crawler intelligently follows links based on relevance
|
||||
- Automatically stops when sufficient content found
|
||||
- Returns HTTP 202 Accepted
|
||||
"""
|
||||
|
||||
print("Received adaptive crawl request:", request)
|
||||
task_id = str(uuid.uuid4())
|
||||
|
||||
# Initialize the job in our in-memory store
|
||||
ADAPTIVE_JOBS[task_id] = {
|
||||
"task_id": task_id,
|
||||
"status": "PENDING",
|
||||
"metrics": None,
|
||||
"result": None,
|
||||
"error": None,
|
||||
}
|
||||
|
||||
# Add the long-running task to the background
|
||||
background_tasks.add_task(run_adaptive_digest, task_id, request)
|
||||
|
||||
return ADAPTIVE_JOBS[task_id]
|
||||
|
||||
|
||||
@router.get("/job/{task_id}",
|
||||
summary="Get Adaptive Job Status",
|
||||
description="Poll the status and results of an adaptive crawling job.",
|
||||
response_description="Job status, metrics, and results",
|
||||
response_model=AdaptiveJobStatus
|
||||
)
|
||||
async def get_adaptive_digest_status(task_id: str):
|
||||
"""
|
||||
Get the status and result of an adaptive crawling job.
|
||||
|
||||
Poll this endpoint with the task_id returned from the submission endpoint
|
||||
until the status is 'COMPLETED' or 'FAILED'.
|
||||
|
||||
**Parameters:**
|
||||
- `task_id`: Job ID from POST /adaptive/digest/job
|
||||
|
||||
**Response (Running):**
|
||||
```json
|
||||
{
|
||||
"task_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"status": "RUNNING",
|
||||
"metrics": {
|
||||
"confidence": 0.45,
|
||||
"pages_crawled": 15,
|
||||
"relevant_pages": 8
|
||||
},
|
||||
"result": null,
|
||||
"error": null
|
||||
}
|
||||
```
|
||||
|
||||
**Response (Completed):**
|
||||
```json
|
||||
{
|
||||
"task_id": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"status": "COMPLETED",
|
||||
"metrics": {
|
||||
"confidence": 0.85,
|
||||
"pages_crawled": 42,
|
||||
"relevant_pages": 28
|
||||
},
|
||||
"result": {
|
||||
"confidence": 0.85,
|
||||
"is_sufficient": true,
|
||||
"coverage_stats": {...},
|
||||
"relevant_content": [...]
|
||||
},
|
||||
"error": null
|
||||
}
|
||||
```
|
||||
|
||||
**Status Values:**
|
||||
- `PENDING`: Job queued, not started yet
|
||||
- `RUNNING`: Job actively crawling
|
||||
- `COMPLETED`: Job finished successfully
|
||||
- `FAILED`: Job encountered an error
|
||||
|
||||
**Usage:**
|
||||
```python
|
||||
import time
|
||||
|
||||
# Poll until complete
|
||||
while True:
|
||||
response = requests.get(
|
||||
f"http://localhost:11235/adaptive/digest/job/{task_id}",
|
||||
headers={"Authorization": f"Bearer {token}"}
|
||||
)
|
||||
job = response.json()
|
||||
|
||||
print(f"Status: {job['status']}")
|
||||
if job['status'] == 'RUNNING':
|
||||
print(f"Progress: {job['metrics']['pages_crawled']} pages")
|
||||
elif job['status'] == 'COMPLETED':
|
||||
print(f"Found {len(job['result']['relevant_content'])} relevant items")
|
||||
break
|
||||
elif job['status'] == 'FAILED':
|
||||
print(f"Error: {job['error']}")
|
||||
break
|
||||
|
||||
time.sleep(2)
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- Poll every 1-5 seconds
|
||||
- Metrics updated in real-time while running
|
||||
- Returns 404 if task_id not found
|
||||
- Results include top relevant content and statistics
|
||||
"""
|
||||
job = ADAPTIVE_JOBS.get(task_id)
|
||||
if not job:
|
||||
raise HTTPException(status_code=404, detail="Job not found")
|
||||
|
||||
# If the job is running, update the metrics from the live state
|
||||
if job["status"] == "RUNNING" and job.get("live_state"):
|
||||
job["metrics"] = job["live_state"].metrics
|
||||
|
||||
return job
|
||||
259
deploy/docker/routers/dispatchers.py
Normal file
259
deploy/docker/routers/dispatchers.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Router for dispatcher management endpoints.
|
||||
|
||||
Provides endpoints to:
|
||||
- List available dispatchers
|
||||
- Get default dispatcher info
|
||||
- Get dispatcher statistics
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from schemas import DispatcherInfo, DispatcherStatsResponse, DispatcherType
|
||||
from utils import get_available_dispatchers, get_dispatcher_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# --- APIRouter for Dispatcher Endpoints ---
|
||||
router = APIRouter(
|
||||
prefix="/dispatchers",
|
||||
tags=["Dispatchers"],
|
||||
)
|
||||
|
||||
|
||||
@router.get("",
|
||||
summary="List Dispatchers",
|
||||
description="Get information about all available dispatcher types.",
|
||||
response_description="List of dispatcher configurations and features",
|
||||
response_model=List[DispatcherInfo]
|
||||
)
|
||||
async def list_dispatchers(request: Request):
|
||||
"""
|
||||
List all available dispatcher types.
|
||||
|
||||
Returns information about each dispatcher type including name, description,
|
||||
configuration parameters, and key features.
|
||||
|
||||
**Dispatchers:**
|
||||
- `memory_adaptive`: Automatically manages crawler instances based on memory
|
||||
- `semaphore`: Simple semaphore-based concurrency control
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
[
|
||||
{
|
||||
"type": "memory_adaptive",
|
||||
"name": "Memory Adaptive Dispatcher",
|
||||
"description": "Automatically adjusts crawler pool based on memory usage",
|
||||
"config": {...},
|
||||
"features": ["Auto-scaling", "Memory monitoring", "Smart throttling"]
|
||||
},
|
||||
{
|
||||
"type": "semaphore",
|
||||
"name": "Semaphore Dispatcher",
|
||||
"description": "Simple semaphore-based concurrency control",
|
||||
"config": {...},
|
||||
"features": ["Fixed concurrency", "Simple queue"]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```python
|
||||
response = requests.get(
|
||||
"http://localhost:11235/dispatchers",
|
||||
headers={"Authorization": f"Bearer {token}"}
|
||||
)
|
||||
dispatchers = response.json()
|
||||
for dispatcher in dispatchers:
|
||||
print(f"{dispatcher['type']}: {dispatcher['description']}")
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- Lists all registered dispatcher types
|
||||
- Shows configuration options for each
|
||||
- Use with /crawl endpoint's `dispatcher` parameter
|
||||
"""
|
||||
try:
|
||||
dispatchers_info = get_available_dispatchers()
|
||||
|
||||
result = []
|
||||
for dispatcher_type, info in dispatchers_info.items():
|
||||
result.append(
|
||||
DispatcherInfo(
|
||||
type=DispatcherType(dispatcher_type),
|
||||
name=info["name"],
|
||||
description=info["description"],
|
||||
config=info["config"],
|
||||
features=info["features"],
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing dispatchers: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to list dispatchers: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/default",
|
||||
summary="Get Default Dispatcher",
|
||||
description="Get information about the currently configured default dispatcher.",
|
||||
response_description="Default dispatcher information",
|
||||
response_model=Dict
|
||||
)
|
||||
async def get_default_dispatcher(request: Request):
|
||||
"""
|
||||
Get information about the current default dispatcher.
|
||||
|
||||
Returns the dispatcher type, configuration, and status for the default
|
||||
dispatcher used when no specific dispatcher is requested.
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"type": "memory_adaptive",
|
||||
"config": {
|
||||
"max_memory_percent": 80,
|
||||
"check_interval": 10,
|
||||
"min_instances": 1,
|
||||
"max_instances": 10
|
||||
},
|
||||
"active": true
|
||||
}
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```python
|
||||
response = requests.get(
|
||||
"http://localhost:11235/dispatchers/default",
|
||||
headers={"Authorization": f"Bearer {token}"}
|
||||
)
|
||||
default_dispatcher = response.json()
|
||||
print(f"Default: {default_dispatcher['type']}")
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- Shows which dispatcher is used by default
|
||||
- Default can be configured via server settings
|
||||
- Override with `dispatcher` parameter in /crawl requests
|
||||
"""
|
||||
try:
|
||||
default_type = request.app.state.default_dispatcher_type
|
||||
dispatcher = request.app.state.dispatchers.get(default_type)
|
||||
|
||||
if not dispatcher:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Default dispatcher '{default_type}' not initialized"
|
||||
)
|
||||
|
||||
return {
|
||||
"type": default_type,
|
||||
"config": get_dispatcher_config(default_type),
|
||||
"active": True,
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting default dispatcher: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to get default dispatcher: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{dispatcher_type}/stats",
|
||||
summary="Get Dispatcher Statistics",
|
||||
description="Get runtime statistics for a specific dispatcher.",
|
||||
response_description="Dispatcher statistics and metrics",
|
||||
response_model=DispatcherStatsResponse
|
||||
)
|
||||
async def get_dispatcher_stats(dispatcher_type: DispatcherType, request: Request):
|
||||
"""
|
||||
Get runtime statistics for a specific dispatcher.
|
||||
|
||||
Returns active sessions, configuration, and dispatcher-specific metrics.
|
||||
Useful for monitoring and debugging dispatcher performance.
|
||||
|
||||
**Parameters:**
|
||||
- `dispatcher_type`: Dispatcher type (memory_adaptive, semaphore)
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"type": "memory_adaptive",
|
||||
"active_sessions": 3,
|
||||
"config": {
|
||||
"max_memory_percent": 80,
|
||||
"check_interval": 10
|
||||
},
|
||||
"stats": {
|
||||
"current_memory_percent": 45.2,
|
||||
"active_instances": 3,
|
||||
"max_instances": 10,
|
||||
"throttled_count": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```python
|
||||
response = requests.get(
|
||||
"http://localhost:11235/dispatchers/memory_adaptive/stats",
|
||||
headers={"Authorization": f"Bearer {token}"}
|
||||
)
|
||||
stats = response.json()
|
||||
print(f"Active sessions: {stats['active_sessions']}")
|
||||
print(f"Memory usage: {stats['stats']['current_memory_percent']}%")
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- Real-time statistics
|
||||
- Stats vary by dispatcher type
|
||||
- Use for monitoring and capacity planning
|
||||
- Returns 404 if dispatcher type not found
|
||||
"""
|
||||
try:
|
||||
dispatcher_name = dispatcher_type.value
|
||||
dispatcher = request.app.state.dispatchers.get(dispatcher_name)
|
||||
|
||||
if not dispatcher:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Dispatcher '{dispatcher_name}' not found or not initialized"
|
||||
)
|
||||
|
||||
# Get basic stats
|
||||
stats = {
|
||||
"type": dispatcher_type,
|
||||
"active_sessions": dispatcher.concurrent_sessions,
|
||||
"config": get_dispatcher_config(dispatcher_name),
|
||||
"stats": {}
|
||||
}
|
||||
|
||||
# Add dispatcher-specific stats
|
||||
if dispatcher_name == "memory_adaptive":
|
||||
stats["stats"] = {
|
||||
"current_memory_percent": getattr(dispatcher, "current_memory_percent", 0.0),
|
||||
"memory_pressure_mode": getattr(dispatcher, "memory_pressure_mode", False),
|
||||
"task_queue_size": dispatcher.task_queue.qsize() if hasattr(dispatcher, "task_queue") else 0,
|
||||
}
|
||||
elif dispatcher_name == "semaphore":
|
||||
# For semaphore dispatcher, show semaphore availability
|
||||
if hasattr(dispatcher, "semaphore_count"):
|
||||
stats["stats"] = {
|
||||
"max_concurrent": dispatcher.semaphore_count,
|
||||
}
|
||||
|
||||
return DispatcherStatsResponse(**stats)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting dispatcher stats for '{dispatcher_type}': {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to get dispatcher stats: {str(e)}"
|
||||
)
|
||||
746
deploy/docker/routers/monitoring.py
Normal file
746
deploy/docker/routers/monitoring.py
Normal file
@@ -0,0 +1,746 @@
|
||||
"""
|
||||
Monitoring and Profiling Router
|
||||
|
||||
Provides endpoints for:
|
||||
- Browser performance profiling
|
||||
- Real-time crawler statistics
|
||||
- System resource monitoring
|
||||
- Session management
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, BackgroundTasks, Query
|
||||
from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Dict, List, Optional, Any, AsyncGenerator
|
||||
from datetime import datetime, timedelta
|
||||
import uuid
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import psutil
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/monitoring",
|
||||
tags=["Monitoring & Profiling"],
|
||||
responses={
|
||||
404: {"description": "Session not found"},
|
||||
500: {"description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
|
||||
# ============================================================================
|
||||
# Data Structures
|
||||
# ============================================================================
|
||||
|
||||
# In-memory storage for profiling sessions
|
||||
PROFILING_SESSIONS: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
# Real-time crawler statistics
|
||||
CRAWLER_STATS = {
|
||||
"active_crawls": 0,
|
||||
"total_crawls": 0,
|
||||
"successful_crawls": 0,
|
||||
"failed_crawls": 0,
|
||||
"total_bytes_processed": 0,
|
||||
"average_response_time_ms": 0.0,
|
||||
"last_updated": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
# Per-URL statistics
|
||||
URL_STATS: Dict[str, Dict[str, Any]] = defaultdict(lambda: {
|
||||
"total_requests": 0,
|
||||
"success_count": 0,
|
||||
"failure_count": 0,
|
||||
"average_time_ms": 0.0,
|
||||
"last_accessed": None,
|
||||
})
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Pydantic Models
|
||||
# ============================================================================
|
||||
|
||||
class ProfilingStartRequest(BaseModel):
|
||||
"""Request to start a profiling session."""
|
||||
url: str = Field(..., description="URL to profile")
|
||||
browser_config: Optional[Dict[str, Any]] = Field(
|
||||
default_factory=dict,
|
||||
description="Browser configuration"
|
||||
)
|
||||
crawler_config: Optional[Dict[str, Any]] = Field(
|
||||
default_factory=dict,
|
||||
description="Crawler configuration"
|
||||
)
|
||||
profile_duration: Optional[int] = Field(
|
||||
default=30,
|
||||
ge=5,
|
||||
le=300,
|
||||
description="Maximum profiling duration in seconds"
|
||||
)
|
||||
collect_network: bool = Field(
|
||||
default=True,
|
||||
description="Collect network performance data"
|
||||
)
|
||||
collect_memory: bool = Field(
|
||||
default=True,
|
||||
description="Collect memory usage data"
|
||||
)
|
||||
collect_cpu: bool = Field(
|
||||
default=True,
|
||||
description="Collect CPU usage data"
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"example": {
|
||||
"url": "https://example.com",
|
||||
"profile_duration": 30,
|
||||
"collect_network": True,
|
||||
"collect_memory": True,
|
||||
"collect_cpu": True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class ProfilingSession(BaseModel):
|
||||
"""Profiling session information."""
|
||||
session_id: str = Field(..., description="Unique session identifier")
|
||||
status: str = Field(..., description="Session status: running, completed, failed")
|
||||
url: str = Field(..., description="URL being profiled")
|
||||
start_time: str = Field(..., description="Session start time (ISO format)")
|
||||
end_time: Optional[str] = Field(None, description="Session end time (ISO format)")
|
||||
duration_seconds: Optional[float] = Field(None, description="Total duration in seconds")
|
||||
results: Optional[Dict[str, Any]] = Field(None, description="Profiling results")
|
||||
error: Optional[str] = Field(None, description="Error message if failed")
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"example": {
|
||||
"session_id": "abc123",
|
||||
"status": "completed",
|
||||
"url": "https://example.com",
|
||||
"start_time": "2025-10-16T10:30:00",
|
||||
"end_time": "2025-10-16T10:30:30",
|
||||
"duration_seconds": 30.5,
|
||||
"results": {
|
||||
"performance": {
|
||||
"page_load_time_ms": 1234,
|
||||
"dom_content_loaded_ms": 890,
|
||||
"first_paint_ms": 567
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class CrawlerStats(BaseModel):
|
||||
"""Current crawler statistics."""
|
||||
active_crawls: int = Field(..., description="Number of currently active crawls")
|
||||
total_crawls: int = Field(..., description="Total crawls since server start")
|
||||
successful_crawls: int = Field(..., description="Number of successful crawls")
|
||||
failed_crawls: int = Field(..., description="Number of failed crawls")
|
||||
success_rate: float = Field(..., description="Success rate percentage")
|
||||
total_bytes_processed: int = Field(..., description="Total bytes processed")
|
||||
average_response_time_ms: float = Field(..., description="Average response time")
|
||||
uptime_seconds: float = Field(..., description="Server uptime in seconds")
|
||||
memory_usage_mb: float = Field(..., description="Current memory usage in MB")
|
||||
cpu_percent: float = Field(..., description="Current CPU usage percentage")
|
||||
last_updated: str = Field(..., description="Last update timestamp")
|
||||
|
||||
|
||||
class URLStatistics(BaseModel):
|
||||
"""Statistics for a specific URL pattern."""
|
||||
url_pattern: str
|
||||
total_requests: int
|
||||
success_count: int
|
||||
failure_count: int
|
||||
success_rate: float
|
||||
average_time_ms: float
|
||||
last_accessed: Optional[str]
|
||||
|
||||
|
||||
class SessionListResponse(BaseModel):
|
||||
"""List of profiling sessions."""
|
||||
total: int
|
||||
sessions: List[ProfilingSession]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
def get_system_stats() -> Dict[str, Any]:
|
||||
"""Get current system resource usage."""
|
||||
try:
|
||||
process = psutil.Process()
|
||||
|
||||
return {
|
||||
"memory_usage_mb": process.memory_info().rss / 1024 / 1024,
|
||||
"cpu_percent": process.cpu_percent(interval=0.1),
|
||||
"num_threads": process.num_threads(),
|
||||
"open_files": len(process.open_files()),
|
||||
"connections": len(process.connections()),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting system stats: {e}")
|
||||
return {
|
||||
"memory_usage_mb": 0.0,
|
||||
"cpu_percent": 0.0,
|
||||
"num_threads": 0,
|
||||
"open_files": 0,
|
||||
"connections": 0,
|
||||
}
|
||||
|
||||
|
||||
def cleanup_old_sessions(max_age_hours: int = 24):
|
||||
"""Remove old profiling sessions to prevent memory leaks."""
|
||||
cutoff = datetime.now() - timedelta(hours=max_age_hours)
|
||||
|
||||
to_remove = []
|
||||
for session_id, session in PROFILING_SESSIONS.items():
|
||||
try:
|
||||
start_time = datetime.fromisoformat(session["start_time"])
|
||||
if start_time < cutoff:
|
||||
to_remove.append(session_id)
|
||||
except (ValueError, KeyError):
|
||||
continue
|
||||
|
||||
for session_id in to_remove:
|
||||
del PROFILING_SESSIONS[session_id]
|
||||
logger.info(f"Cleaned up old session: {session_id}")
|
||||
|
||||
return len(to_remove)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Profiling Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.post(
|
||||
"/profile/start",
|
||||
response_model=ProfilingSession,
|
||||
summary="Start profiling session",
|
||||
description="Start a new browser profiling session for performance analysis"
|
||||
)
|
||||
async def start_profiling_session(
|
||||
request: ProfilingStartRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
):
|
||||
"""
|
||||
Start a new profiling session.
|
||||
|
||||
Returns a session ID that can be used to retrieve results later.
|
||||
The profiling runs in the background and collects:
|
||||
- Page load performance metrics
|
||||
- Network requests and timing
|
||||
- Memory usage patterns
|
||||
- CPU utilization
|
||||
- Browser-specific metrics
|
||||
"""
|
||||
session_id = str(uuid.uuid4())
|
||||
start_time = datetime.now()
|
||||
|
||||
session_data = {
|
||||
"session_id": session_id,
|
||||
"status": "running",
|
||||
"url": request.url,
|
||||
"start_time": start_time.isoformat(),
|
||||
"end_time": None,
|
||||
"duration_seconds": None,
|
||||
"results": None,
|
||||
"error": None,
|
||||
"config": {
|
||||
"profile_duration": request.profile_duration,
|
||||
"collect_network": request.collect_network,
|
||||
"collect_memory": request.collect_memory,
|
||||
"collect_cpu": request.collect_cpu,
|
||||
}
|
||||
}
|
||||
|
||||
PROFILING_SESSIONS[session_id] = session_data
|
||||
|
||||
# Add background task to run profiling
|
||||
background_tasks.add_task(
|
||||
run_profiling_session,
|
||||
session_id,
|
||||
request
|
||||
)
|
||||
|
||||
logger.info(f"Started profiling session {session_id} for {request.url}")
|
||||
|
||||
return ProfilingSession(**session_data)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/profile/{session_id}",
|
||||
response_model=ProfilingSession,
|
||||
summary="Get profiling results",
|
||||
description="Retrieve results from a profiling session"
|
||||
)
|
||||
async def get_profiling_results(session_id: str):
|
||||
"""
|
||||
Get profiling session results.
|
||||
|
||||
Returns the current status and results of a profiling session.
|
||||
If the session is still running, results will be None.
|
||||
"""
|
||||
if session_id not in PROFILING_SESSIONS:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Profiling session '{session_id}' not found"
|
||||
)
|
||||
|
||||
session = PROFILING_SESSIONS[session_id]
|
||||
return ProfilingSession(**session)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/profile",
|
||||
response_model=SessionListResponse,
|
||||
summary="List profiling sessions",
|
||||
description="List all profiling sessions with optional filtering"
|
||||
)
|
||||
async def list_profiling_sessions(
|
||||
status: Optional[str] = Query(None, description="Filter by status: running, completed, failed"),
|
||||
limit: int = Query(50, ge=1, le=500, description="Maximum number of sessions to return")
|
||||
):
|
||||
"""
|
||||
List all profiling sessions.
|
||||
|
||||
Can be filtered by status and limited in number.
|
||||
"""
|
||||
sessions = list(PROFILING_SESSIONS.values())
|
||||
|
||||
# Filter by status if provided
|
||||
if status:
|
||||
sessions = [s for s in sessions if s["status"] == status]
|
||||
|
||||
# Sort by start time (newest first)
|
||||
sessions.sort(key=lambda x: x["start_time"], reverse=True)
|
||||
|
||||
# Limit results
|
||||
sessions = sessions[:limit]
|
||||
|
||||
return SessionListResponse(
|
||||
total=len(sessions),
|
||||
sessions=[ProfilingSession(**s) for s in sessions]
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/profile/{session_id}",
|
||||
summary="Delete profiling session",
|
||||
description="Delete a profiling session and its results"
|
||||
)
|
||||
async def delete_profiling_session(session_id: str):
|
||||
"""
|
||||
Delete a profiling session.
|
||||
|
||||
Removes the session and all associated data from memory.
|
||||
"""
|
||||
if session_id not in PROFILING_SESSIONS:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Profiling session '{session_id}' not found"
|
||||
)
|
||||
|
||||
session = PROFILING_SESSIONS.pop(session_id)
|
||||
logger.info(f"Deleted profiling session {session_id}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Session {session_id} deleted",
|
||||
"session": ProfilingSession(**session)
|
||||
}
|
||||
|
||||
|
||||
@router.post(
|
||||
"/profile/cleanup",
|
||||
summary="Cleanup old sessions",
|
||||
description="Remove old profiling sessions to free memory"
|
||||
)
|
||||
async def cleanup_sessions(
|
||||
max_age_hours: int = Query(24, ge=1, le=168, description="Maximum age in hours")
|
||||
):
|
||||
"""
|
||||
Cleanup old profiling sessions.
|
||||
|
||||
Removes sessions older than the specified age.
|
||||
"""
|
||||
removed = cleanup_old_sessions(max_age_hours)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"removed_count": removed,
|
||||
"remaining_count": len(PROFILING_SESSIONS),
|
||||
"message": f"Removed {removed} sessions older than {max_age_hours} hours"
|
||||
}
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Statistics Endpoints
|
||||
# ============================================================================
|
||||
|
||||
@router.get(
|
||||
"/stats",
|
||||
response_model=CrawlerStats,
|
||||
summary="Get crawler statistics",
|
||||
description="Get current crawler statistics and system metrics"
|
||||
)
|
||||
async def get_crawler_stats():
|
||||
"""
|
||||
Get current crawler statistics.
|
||||
|
||||
Returns real-time metrics about:
|
||||
- Active and total crawls
|
||||
- Success/failure rates
|
||||
- Response times
|
||||
- System resource usage
|
||||
"""
|
||||
system_stats = get_system_stats()
|
||||
|
||||
total = CRAWLER_STATS["successful_crawls"] + CRAWLER_STATS["failed_crawls"]
|
||||
success_rate = (
|
||||
(CRAWLER_STATS["successful_crawls"] / total * 100)
|
||||
if total > 0 else 0.0
|
||||
)
|
||||
|
||||
# Calculate uptime
|
||||
# In a real implementation, you'd track server start time
|
||||
uptime_seconds = 0.0 # Placeholder
|
||||
|
||||
stats = CrawlerStats(
|
||||
active_crawls=CRAWLER_STATS["active_crawls"],
|
||||
total_crawls=CRAWLER_STATS["total_crawls"],
|
||||
successful_crawls=CRAWLER_STATS["successful_crawls"],
|
||||
failed_crawls=CRAWLER_STATS["failed_crawls"],
|
||||
success_rate=success_rate,
|
||||
total_bytes_processed=CRAWLER_STATS["total_bytes_processed"],
|
||||
average_response_time_ms=CRAWLER_STATS["average_response_time_ms"],
|
||||
uptime_seconds=uptime_seconds,
|
||||
memory_usage_mb=system_stats["memory_usage_mb"],
|
||||
cpu_percent=system_stats["cpu_percent"],
|
||||
last_updated=datetime.now().isoformat()
|
||||
)
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
@router.get(
|
||||
"/stats/stream",
|
||||
summary="Stream crawler statistics",
|
||||
description="Server-Sent Events stream of real-time crawler statistics"
|
||||
)
|
||||
async def stream_crawler_stats(
|
||||
interval: int = Query(2, ge=1, le=60, description="Update interval in seconds")
|
||||
):
|
||||
"""
|
||||
Stream real-time crawler statistics.
|
||||
|
||||
Returns an SSE (Server-Sent Events) stream that pushes
|
||||
statistics updates at the specified interval.
|
||||
|
||||
Example:
|
||||
```javascript
|
||||
const eventSource = new EventSource('/monitoring/stats/stream?interval=2');
|
||||
eventSource.onmessage = (event) => {
|
||||
const stats = JSON.parse(event.data);
|
||||
console.log('Stats:', stats);
|
||||
};
|
||||
```
|
||||
"""
|
||||
|
||||
async def generate_stats() -> AsyncGenerator[str, None]:
|
||||
"""Generate stats stream."""
|
||||
try:
|
||||
while True:
|
||||
# Get current stats
|
||||
stats = await get_crawler_stats()
|
||||
|
||||
# Format as SSE
|
||||
data = json.dumps(stats.dict())
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
# Wait for next interval
|
||||
await asyncio.sleep(interval)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("Stats stream cancelled by client")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in stats stream: {e}")
|
||||
yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"
|
||||
|
||||
return StreamingResponse(
|
||||
generate_stats(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/stats/urls",
|
||||
response_model=List[URLStatistics],
|
||||
summary="Get URL statistics",
|
||||
description="Get statistics for crawled URLs"
|
||||
)
|
||||
async def get_url_statistics(
|
||||
limit: int = Query(100, ge=1, le=1000, description="Maximum number of URLs to return"),
|
||||
sort_by: str = Query("total_requests", description="Sort field: total_requests, success_rate, average_time_ms")
|
||||
):
|
||||
"""
|
||||
Get statistics for crawled URLs.
|
||||
|
||||
Returns metrics for each URL that has been crawled,
|
||||
including request counts, success rates, and timing.
|
||||
"""
|
||||
stats_list = []
|
||||
|
||||
for url, stats in URL_STATS.items():
|
||||
total = stats["total_requests"]
|
||||
success_rate = (stats["success_count"] / total * 100) if total > 0 else 0.0
|
||||
|
||||
stats_list.append(URLStatistics(
|
||||
url_pattern=url,
|
||||
total_requests=stats["total_requests"],
|
||||
success_count=stats["success_count"],
|
||||
failure_count=stats["failure_count"],
|
||||
success_rate=success_rate,
|
||||
average_time_ms=stats["average_time_ms"],
|
||||
last_accessed=stats["last_accessed"]
|
||||
))
|
||||
|
||||
# Sort
|
||||
if sort_by == "success_rate":
|
||||
stats_list.sort(key=lambda x: x.success_rate, reverse=True)
|
||||
elif sort_by == "average_time_ms":
|
||||
stats_list.sort(key=lambda x: x.average_time_ms)
|
||||
else: # total_requests
|
||||
stats_list.sort(key=lambda x: x.total_requests, reverse=True)
|
||||
|
||||
return stats_list[:limit]
|
||||
|
||||
|
||||
@router.post(
|
||||
"/stats/reset",
|
||||
summary="Reset statistics",
|
||||
description="Reset all crawler statistics to zero"
|
||||
)
|
||||
async def reset_statistics():
|
||||
"""
|
||||
Reset all statistics.
|
||||
|
||||
Clears all accumulated statistics but keeps the server running.
|
||||
Useful for testing or starting fresh measurements.
|
||||
"""
|
||||
global CRAWLER_STATS, URL_STATS
|
||||
|
||||
CRAWLER_STATS = {
|
||||
"active_crawls": 0,
|
||||
"total_crawls": 0,
|
||||
"successful_crawls": 0,
|
||||
"failed_crawls": 0,
|
||||
"total_bytes_processed": 0,
|
||||
"average_response_time_ms": 0.0,
|
||||
"last_updated": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
URL_STATS.clear()
|
||||
|
||||
logger.info("All statistics reset")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": "All statistics have been reset",
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Background Tasks
|
||||
# ============================================================================
|
||||
|
||||
async def run_profiling_session(session_id: str, request: ProfilingStartRequest):
|
||||
"""
|
||||
Background task to run profiling session.
|
||||
|
||||
This performs the actual profiling work:
|
||||
1. Creates a crawler with profiling enabled
|
||||
2. Crawls the target URL
|
||||
3. Collects performance metrics
|
||||
4. Stores results in the session
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from crawl4ai.browser_profiler import BrowserProfiler
|
||||
|
||||
logger.info(f"Starting profiling for session {session_id}")
|
||||
|
||||
# Create profiler
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# Configure browser and crawler
|
||||
browser_config = BrowserConfig.load(request.browser_config)
|
||||
crawler_config = CrawlerRunConfig.load(request.crawler_config)
|
||||
|
||||
# Enable profiling options
|
||||
browser_config.profiling_enabled = True
|
||||
|
||||
results = {}
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
# Start profiling
|
||||
profiler.start()
|
||||
|
||||
# Collect system stats before
|
||||
stats_before = get_system_stats()
|
||||
|
||||
# Crawl with timeout
|
||||
try:
|
||||
result = await asyncio.wait_for(
|
||||
crawler.arun(request.url, config=crawler_config),
|
||||
timeout=request.profile_duration
|
||||
)
|
||||
|
||||
crawl_success = result.success
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Profiling session {session_id} timed out")
|
||||
crawl_success = False
|
||||
result = None
|
||||
|
||||
# Stop profiling
|
||||
profiler_results = profiler.stop()
|
||||
|
||||
# Collect system stats after
|
||||
stats_after = get_system_stats()
|
||||
|
||||
# Build results
|
||||
results = {
|
||||
"crawl_success": crawl_success,
|
||||
"url": request.url,
|
||||
"performance": profiler_results if profiler_results else {},
|
||||
"system": {
|
||||
"before": stats_before,
|
||||
"after": stats_after,
|
||||
"delta": {
|
||||
"memory_mb": stats_after["memory_usage_mb"] - stats_before["memory_usage_mb"],
|
||||
"cpu_percent": stats_after["cpu_percent"] - stats_before["cpu_percent"],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result:
|
||||
results["content"] = {
|
||||
"markdown_length": len(result.markdown) if result.markdown else 0,
|
||||
"html_length": len(result.html) if result.html else 0,
|
||||
"links_count": len(result.links["internal"]) + len(result.links["external"]),
|
||||
"media_count": len(result.media["images"]) + len(result.media["videos"]),
|
||||
}
|
||||
|
||||
# Update session with results
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
|
||||
PROFILING_SESSIONS[session_id].update({
|
||||
"status": "completed",
|
||||
"end_time": datetime.now().isoformat(),
|
||||
"duration_seconds": duration,
|
||||
"results": results
|
||||
})
|
||||
|
||||
logger.info(f"Profiling session {session_id} completed in {duration:.2f}s")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Profiling session {session_id} failed: {str(e)}")
|
||||
|
||||
PROFILING_SESSIONS[session_id].update({
|
||||
"status": "failed",
|
||||
"end_time": datetime.now().isoformat(),
|
||||
"duration_seconds": time.time() - start_time,
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Middleware Integration Points
|
||||
# ============================================================================
|
||||
|
||||
def track_crawl_start():
|
||||
"""Call this when a crawl starts."""
|
||||
CRAWLER_STATS["active_crawls"] += 1
|
||||
CRAWLER_STATS["total_crawls"] += 1
|
||||
CRAWLER_STATS["last_updated"] = datetime.now().isoformat()
|
||||
|
||||
|
||||
def track_crawl_end(url: str, success: bool, duration_ms: float, bytes_processed: int = 0):
|
||||
"""Call this when a crawl ends."""
|
||||
CRAWLER_STATS["active_crawls"] = max(0, CRAWLER_STATS["active_crawls"] - 1)
|
||||
|
||||
if success:
|
||||
CRAWLER_STATS["successful_crawls"] += 1
|
||||
else:
|
||||
CRAWLER_STATS["failed_crawls"] += 1
|
||||
|
||||
CRAWLER_STATS["total_bytes_processed"] += bytes_processed
|
||||
|
||||
# Update average response time (running average)
|
||||
total = CRAWLER_STATS["successful_crawls"] + CRAWLER_STATS["failed_crawls"]
|
||||
current_avg = CRAWLER_STATS["average_response_time_ms"]
|
||||
CRAWLER_STATS["average_response_time_ms"] = (
|
||||
(current_avg * (total - 1) + duration_ms) / total
|
||||
)
|
||||
|
||||
# Update URL stats
|
||||
url_stat = URL_STATS[url]
|
||||
url_stat["total_requests"] += 1
|
||||
|
||||
if success:
|
||||
url_stat["success_count"] += 1
|
||||
else:
|
||||
url_stat["failure_count"] += 1
|
||||
|
||||
# Update average time for this URL
|
||||
total_url = url_stat["total_requests"]
|
||||
current_avg_url = url_stat["average_time_ms"]
|
||||
url_stat["average_time_ms"] = (
|
||||
(current_avg_url * (total_url - 1) + duration_ms) / total_url
|
||||
)
|
||||
url_stat["last_accessed"] = datetime.now().isoformat()
|
||||
|
||||
CRAWLER_STATS["last_updated"] = datetime.now().isoformat()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Health Check
|
||||
# ============================================================================
|
||||
|
||||
@router.get(
|
||||
"/health",
|
||||
summary="Health check",
|
||||
description="Check if monitoring system is operational"
|
||||
)
|
||||
async def health_check():
|
||||
"""
|
||||
Health check endpoint.
|
||||
|
||||
Returns status of the monitoring system.
|
||||
"""
|
||||
system_stats = get_system_stats()
|
||||
|
||||
return {
|
||||
"status": "healthy",
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"active_sessions": len([s for s in PROFILING_SESSIONS.values() if s["status"] == "running"]),
|
||||
"total_sessions": len(PROFILING_SESSIONS),
|
||||
"system": system_stats
|
||||
}
|
||||
306
deploy/docker/routers/scripts.py
Normal file
306
deploy/docker/routers/scripts.py
Normal file
@@ -0,0 +1,306 @@
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, File, Form, HTTPException, UploadFile
|
||||
from schemas import C4AScriptPayload
|
||||
|
||||
from crawl4ai.script import (
|
||||
CompilationResult,
|
||||
ValidationResult,
|
||||
# ErrorDetail
|
||||
)
|
||||
|
||||
# Import all necessary components from the crawl4ai library
|
||||
# C4A Script Language Support
|
||||
from crawl4ai.script import (
|
||||
compile as c4a_compile,
|
||||
)
|
||||
from crawl4ai.script import (
|
||||
validate as c4a_validate,
|
||||
)
|
||||
|
||||
# --- APIRouter for c4a Scripts Endpoints ---
|
||||
router = APIRouter(
|
||||
prefix="/c4a",
|
||||
tags=["c4a Scripts"],
|
||||
)
|
||||
|
||||
# --- Background Worker Function ---
|
||||
|
||||
|
||||
@router.post("/validate",
|
||||
summary="Validate C4A-Script",
|
||||
description="Validate the syntax of a C4A-Script without compiling it.",
|
||||
response_description="Validation result with errors if any",
|
||||
response_model=ValidationResult
|
||||
)
|
||||
async def validate_c4a_script_endpoint(payload: C4AScriptPayload):
|
||||
"""
|
||||
Validate the syntax of a C4A-Script.
|
||||
|
||||
Checks the script syntax without compiling to executable JavaScript.
|
||||
Returns detailed error information if validation fails.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"script": "NAVIGATE https://example.com\\nWAIT 2\\nCLICK button.submit"
|
||||
}
|
||||
```
|
||||
|
||||
**Response (Valid):**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"errors": []
|
||||
}
|
||||
```
|
||||
|
||||
**Response (Invalid):**
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"errors": [
|
||||
{
|
||||
"line": 3,
|
||||
"message": "Unknown command: CLCK",
|
||||
"type": "SyntaxError"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```python
|
||||
response = requests.post(
|
||||
"http://localhost:11235/c4a/validate",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
json={
|
||||
"script": "NAVIGATE https://example.com\\nWAIT 2"
|
||||
}
|
||||
)
|
||||
result = response.json()
|
||||
if result["success"]:
|
||||
print("Script is valid!")
|
||||
else:
|
||||
for error in result["errors"]:
|
||||
print(f"Line {error['line']}: {error['message']}")
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- Validates syntax only, doesn't execute
|
||||
- Returns detailed error locations
|
||||
- Use before compiling to check for issues
|
||||
"""
|
||||
# The validate function is designed not to raise exceptions
|
||||
validation_result = c4a_validate(payload.script)
|
||||
return validation_result
|
||||
|
||||
|
||||
@router.post("/compile",
|
||||
summary="Compile C4A-Script",
|
||||
description="Compile a C4A-Script into executable JavaScript code.",
|
||||
response_description="Compiled JavaScript code or compilation errors",
|
||||
response_model=CompilationResult
|
||||
)
|
||||
async def compile_c4a_script_endpoint(payload: C4AScriptPayload):
|
||||
"""
|
||||
Compile a C4A-Script into executable JavaScript.
|
||||
|
||||
Transforms high-level C4A-Script commands into JavaScript that can be
|
||||
executed in a browser context.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"script": "NAVIGATE https://example.com\\nWAIT 2\\nCLICK button.submit"
|
||||
}
|
||||
```
|
||||
|
||||
**Response (Success):**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"javascript": "await page.goto('https://example.com');\\nawait page.waitForTimeout(2000);\\nawait page.click('button.submit');",
|
||||
"errors": []
|
||||
}
|
||||
```
|
||||
|
||||
**Response (Error):**
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"javascript": null,
|
||||
"errors": [
|
||||
{
|
||||
"line": 2,
|
||||
"message": "Invalid WAIT duration",
|
||||
"type": "CompilationError"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```python
|
||||
response = requests.post(
|
||||
"http://localhost:11235/c4a/compile",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
json={
|
||||
"script": "NAVIGATE https://example.com\\nCLICK .login-button"
|
||||
}
|
||||
)
|
||||
result = response.json()
|
||||
if result["success"]:
|
||||
print("Compiled JavaScript:")
|
||||
print(result["javascript"])
|
||||
else:
|
||||
print("Compilation failed:", result["errors"])
|
||||
```
|
||||
|
||||
**C4A-Script Commands:**
|
||||
- `NAVIGATE <url>` - Navigate to URL
|
||||
- `WAIT <seconds>` - Wait for specified time
|
||||
- `CLICK <selector>` - Click element
|
||||
- `TYPE <selector> <text>` - Type text into element
|
||||
- `SCROLL <direction>` - Scroll page
|
||||
- And many more...
|
||||
|
||||
**Notes:**
|
||||
- Returns HTTP 400 if compilation fails
|
||||
- JavaScript can be used with /execute_js endpoint
|
||||
- Simplifies browser automation scripting
|
||||
"""
|
||||
# The compile function also returns a result object instead of raising
|
||||
compilation_result = c4a_compile(payload.script)
|
||||
|
||||
if not compilation_result.success:
|
||||
# You can optionally raise an HTTP exception for failed compilations
|
||||
# This makes it clearer on the client-side that it was a bad request
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=compilation_result.to_dict(), # FastAPI will serialize this
|
||||
)
|
||||
|
||||
return compilation_result
|
||||
|
||||
|
||||
@router.post("/compile-file",
|
||||
summary="Compile C4A-Script from File",
|
||||
description="Compile a C4A-Script from an uploaded file or form string.",
|
||||
response_description="Compiled JavaScript code or compilation errors",
|
||||
response_model=CompilationResult
|
||||
)
|
||||
async def compile_c4a_script_file_endpoint(
|
||||
file: Optional[UploadFile] = File(None), script: Optional[str] = Form(None)
|
||||
):
|
||||
"""
|
||||
Compile a C4A-Script from file upload or form data.
|
||||
|
||||
Accepts either a file upload or a string parameter. Useful for uploading
|
||||
C4A-Script files or sending multipart form data.
|
||||
|
||||
**Parameters:**
|
||||
- `file`: C4A-Script file upload (multipart/form-data)
|
||||
- `script`: C4A-Script content as string (form field)
|
||||
|
||||
**Note:** Provide either file OR script, not both.
|
||||
|
||||
**Request (File Upload):**
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/c4a/compile-file" \\
|
||||
-H "Authorization: Bearer YOUR_TOKEN" \\
|
||||
-F "file=@myscript.c4a"
|
||||
```
|
||||
|
||||
**Request (Form String):**
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/c4a/compile-file" \\
|
||||
-H "Authorization: Bearer YOUR_TOKEN" \\
|
||||
-F "script=NAVIGATE https://example.com"
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"javascript": "await page.goto('https://example.com');",
|
||||
"errors": []
|
||||
}
|
||||
```
|
||||
|
||||
**Usage (Python with file):**
|
||||
```python
|
||||
with open('script.c4a', 'rb') as f:
|
||||
response = requests.post(
|
||||
"http://localhost:11235/c4a/compile-file",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
files={"file": f}
|
||||
)
|
||||
result = response.json()
|
||||
print(result["javascript"])
|
||||
```
|
||||
|
||||
**Usage (Python with string):**
|
||||
```python
|
||||
response = requests.post(
|
||||
"http://localhost:11235/c4a/compile-file",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
data={"script": "NAVIGATE https://example.com"}
|
||||
)
|
||||
result = response.json()
|
||||
print(result["javascript"])
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- File must be UTF-8 encoded text
|
||||
- Use for batch script compilation
|
||||
- Returns HTTP 400 if both or neither parameter provided
|
||||
- Returns HTTP 400 if compilation fails
|
||||
"""
|
||||
script_content = None
|
||||
|
||||
# Validate that at least one input is provided
|
||||
if not file and not script:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"error": "Either 'file' or 'script' parameter must be provided"},
|
||||
)
|
||||
|
||||
# If both are provided, prioritize the file
|
||||
if file and script:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"error": "Please provide either 'file' or 'script', not both"},
|
||||
)
|
||||
|
||||
# Handle file upload
|
||||
if file:
|
||||
try:
|
||||
file_content = await file.read()
|
||||
script_content = file_content.decode("utf-8")
|
||||
except UnicodeDecodeError as exc:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={"error": "File must be a valid UTF-8 text file"},
|
||||
) from exc
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=400, detail={"error": f"Error reading file: {str(e)}"}
|
||||
) from e
|
||||
|
||||
# Handle string content
|
||||
elif script:
|
||||
script_content = script
|
||||
|
||||
# Compile the script content
|
||||
compilation_result = c4a_compile(script_content)
|
||||
|
||||
if not compilation_result.success:
|
||||
# You can optionally raise an HTTP exception for failed compilations
|
||||
# This makes it clearer on the client-side that it was a bad request
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=compilation_result.to_dict(), # FastAPI will serialize this
|
||||
)
|
||||
|
||||
return compilation_result
|
||||
301
deploy/docker/routers/tables.py
Normal file
301
deploy/docker/routers/tables.py
Normal file
@@ -0,0 +1,301 @@
|
||||
"""
|
||||
Table Extraction Router for Crawl4AI Docker Server
|
||||
|
||||
This module provides dedicated endpoints for table extraction from HTML or URLs,
|
||||
separate from the main crawling functionality.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Dict, Any
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
# Import crawler pool for browser reuse
|
||||
from crawler_pool import get_crawler
|
||||
|
||||
# Import schemas
|
||||
from schemas import (
|
||||
TableExtractionRequest,
|
||||
TableExtractionBatchRequest,
|
||||
TableExtractionConfig,
|
||||
)
|
||||
|
||||
# Import utilities
|
||||
from utils import (
|
||||
extract_tables_from_html,
|
||||
format_table_response,
|
||||
create_table_extraction_strategy,
|
||||
)
|
||||
|
||||
# Configure logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Create router
|
||||
router = APIRouter(prefix="/tables", tags=["Table Extraction"])
|
||||
|
||||
|
||||
@router.post(
|
||||
"/extract",
|
||||
summary="Extract Tables from HTML or URL",
|
||||
description="""
|
||||
Extract tables from HTML content or by fetching a URL.
|
||||
Supports multiple extraction strategies: default, LLM-based, or financial.
|
||||
|
||||
**Input Options:**
|
||||
- Provide `html` for direct HTML content extraction
|
||||
- Provide `url` to fetch and extract from a live page
|
||||
- Cannot provide both `html` and `url` simultaneously
|
||||
|
||||
**Strategies:**
|
||||
- `default`: Fast regex and HTML structure-based extraction
|
||||
- `llm`: AI-powered extraction with semantic understanding (requires LLM config)
|
||||
- `financial`: Specialized extraction for financial tables with numerical formatting
|
||||
|
||||
**Returns:**
|
||||
- List of extracted tables with headers, rows, and metadata
|
||||
- Each table includes cell-level details and formatting information
|
||||
""",
|
||||
response_description="Extracted tables with metadata",
|
||||
)
|
||||
async def extract_tables(request: TableExtractionRequest) -> JSONResponse:
|
||||
"""
|
||||
Extract tables from HTML content or URL.
|
||||
|
||||
Args:
|
||||
request: TableExtractionRequest with html/url and extraction config
|
||||
|
||||
Returns:
|
||||
JSONResponse with extracted tables and metadata
|
||||
|
||||
Raises:
|
||||
HTTPException: If validation fails or extraction errors occur
|
||||
"""
|
||||
try:
|
||||
# Validate input
|
||||
if request.html and request.url:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Cannot provide both 'html' and 'url'. Choose one input method."
|
||||
)
|
||||
|
||||
if not request.html and not request.url:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Must provide either 'html' or 'url' for table extraction."
|
||||
)
|
||||
|
||||
# Handle URL-based extraction
|
||||
if request.url:
|
||||
# Import crawler configs
|
||||
from async_configs import BrowserConfig, CrawlerRunConfig
|
||||
|
||||
try:
|
||||
# Create minimal browser config
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create crawler config with table extraction
|
||||
table_strategy = create_table_extraction_strategy(request.config)
|
||||
crawler_config = CrawlerRunConfig(
|
||||
table_extraction_strategy=table_strategy,
|
||||
)
|
||||
|
||||
# Get crawler from pool (browser reuse for memory efficiency)
|
||||
crawler = await get_crawler(browser_config, adapter=None)
|
||||
|
||||
# Crawl the URL
|
||||
result = await crawler.arun(
|
||||
url=request.url,
|
||||
config=crawler_config,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch URL: {result.error_message}"
|
||||
)
|
||||
|
||||
# Extract HTML
|
||||
html_content = result.html
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching URL {request.url}: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch and extract from URL: {str(e)}"
|
||||
)
|
||||
|
||||
else:
|
||||
# Use provided HTML
|
||||
html_content = request.html
|
||||
|
||||
# Extract tables from HTML
|
||||
tables = await extract_tables_from_html(html_content, request.config)
|
||||
|
||||
# Format response
|
||||
formatted_tables = format_table_response(tables)
|
||||
|
||||
return JSONResponse({
|
||||
"success": True,
|
||||
"table_count": len(formatted_tables),
|
||||
"tables": formatted_tables,
|
||||
"strategy": request.config.strategy.value,
|
||||
})
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting tables: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Table extraction failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/extract/batch",
|
||||
summary="Extract Tables from Multiple Sources (Batch)",
|
||||
description="""
|
||||
Extract tables from multiple HTML contents or URLs in a single request.
|
||||
Processes each input independently and returns results for all.
|
||||
|
||||
**Batch Processing:**
|
||||
- Provide list of HTML contents and/or URLs
|
||||
- Each input is processed with the same extraction strategy
|
||||
- Partial failures are allowed (returns results for successful extractions)
|
||||
|
||||
**Use Cases:**
|
||||
- Extracting tables from multiple pages simultaneously
|
||||
- Bulk financial data extraction
|
||||
- Comparing table structures across multiple sources
|
||||
""",
|
||||
response_description="Batch extraction results with per-item success status",
|
||||
)
|
||||
async def extract_tables_batch(request: TableExtractionBatchRequest) -> JSONResponse:
|
||||
"""
|
||||
Extract tables from multiple HTML contents or URLs in batch.
|
||||
|
||||
Args:
|
||||
request: TableExtractionBatchRequest with list of html/url and config
|
||||
|
||||
Returns:
|
||||
JSONResponse with batch results
|
||||
|
||||
Raises:
|
||||
HTTPException: If validation fails
|
||||
"""
|
||||
try:
|
||||
# Validate batch request
|
||||
total_items = len(request.html_list or []) + len(request.url_list or [])
|
||||
|
||||
if total_items == 0:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Must provide at least one HTML content or URL in batch request."
|
||||
)
|
||||
|
||||
if total_items > 50: # Reasonable batch limit
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Batch size ({total_items}) exceeds maximum allowed (50)."
|
||||
)
|
||||
|
||||
results = []
|
||||
|
||||
# Process HTML list
|
||||
if request.html_list:
|
||||
for idx, html_content in enumerate(request.html_list):
|
||||
try:
|
||||
tables = await extract_tables_from_html(html_content, request.config)
|
||||
formatted_tables = format_table_response(tables)
|
||||
|
||||
results.append({
|
||||
"success": True,
|
||||
"source": f"html_{idx}",
|
||||
"table_count": len(formatted_tables),
|
||||
"tables": formatted_tables,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting tables from html_{idx}: {e}")
|
||||
results.append({
|
||||
"success": False,
|
||||
"source": f"html_{idx}",
|
||||
"error": str(e),
|
||||
})
|
||||
|
||||
# Process URL list
|
||||
if request.url_list:
|
||||
from async_configs import BrowserConfig, CrawlerRunConfig
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
verbose=False,
|
||||
)
|
||||
table_strategy = create_table_extraction_strategy(request.config)
|
||||
crawler_config = CrawlerRunConfig(
|
||||
table_extraction_strategy=table_strategy,
|
||||
)
|
||||
|
||||
# Get crawler from pool (reuse browser for all URLs in batch)
|
||||
crawler = await get_crawler(browser_config, adapter=None)
|
||||
|
||||
for url in request.url_list:
|
||||
try:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
config=crawler_config,
|
||||
)
|
||||
|
||||
if result.success:
|
||||
html_content = result.html
|
||||
tables = await extract_tables_from_html(html_content, request.config)
|
||||
formatted_tables = format_table_response(tables)
|
||||
|
||||
results.append({
|
||||
"success": True,
|
||||
"source": url,
|
||||
"table_count": len(formatted_tables),
|
||||
"tables": formatted_tables,
|
||||
})
|
||||
else:
|
||||
results.append({
|
||||
"success": False,
|
||||
"source": url,
|
||||
"error": result.error_message,
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting tables from {url}: {e}")
|
||||
results.append({
|
||||
"success": False,
|
||||
"source": url,
|
||||
"error": str(e),
|
||||
})
|
||||
|
||||
# Calculate summary
|
||||
successful = sum(1 for r in results if r["success"])
|
||||
failed = len(results) - successful
|
||||
total_tables = sum(r.get("table_count", 0) for r in results if r["success"])
|
||||
|
||||
return JSONResponse({
|
||||
"success": True,
|
||||
"summary": {
|
||||
"total_processed": len(results),
|
||||
"successful": successful,
|
||||
"failed": failed,
|
||||
"total_tables_extracted": total_tables,
|
||||
},
|
||||
"results": results,
|
||||
"strategy": request.config.strategy.value,
|
||||
})
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error in batch table extraction: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Batch table extraction failed: {str(e)}"
|
||||
)
|
||||
@@ -1,28 +1,249 @@
|
||||
from typing import List, Optional, Dict
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, Field, HttpUrl
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from utils import FilterType
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Dispatcher Schemas
|
||||
# ============================================================================
|
||||
|
||||
class DispatcherType(str, Enum):
|
||||
"""Available dispatcher types for crawling."""
|
||||
MEMORY_ADAPTIVE = "memory_adaptive"
|
||||
SEMAPHORE = "semaphore"
|
||||
|
||||
|
||||
class DispatcherInfo(BaseModel):
|
||||
"""Information about a dispatcher type."""
|
||||
type: DispatcherType
|
||||
name: str
|
||||
description: str
|
||||
config: Dict[str, Any]
|
||||
features: List[str]
|
||||
|
||||
|
||||
class DispatcherStatsResponse(BaseModel):
|
||||
"""Response model for dispatcher statistics."""
|
||||
type: DispatcherType
|
||||
active_sessions: int
|
||||
config: Dict[str, Any]
|
||||
stats: Optional[Dict[str, Any]] = Field(
|
||||
None,
|
||||
description="Additional dispatcher-specific statistics"
|
||||
)
|
||||
|
||||
|
||||
class DispatcherSelection(BaseModel):
|
||||
"""Model for selecting a dispatcher in crawl requests."""
|
||||
dispatcher: Optional[DispatcherType] = Field(
|
||||
None,
|
||||
description="Dispatcher type to use. Defaults to memory_adaptive if not specified."
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# End Dispatcher Schemas
|
||||
# ============================================================================
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Table Extraction Schemas
|
||||
# ============================================================================
|
||||
|
||||
class TableExtractionStrategy(str, Enum):
|
||||
"""Available table extraction strategies."""
|
||||
NONE = "none"
|
||||
DEFAULT = "default"
|
||||
LLM = "llm"
|
||||
FINANCIAL = "financial"
|
||||
|
||||
|
||||
class TableExtractionConfig(BaseModel):
|
||||
"""Configuration for table extraction."""
|
||||
|
||||
strategy: TableExtractionStrategy = Field(
|
||||
default=TableExtractionStrategy.DEFAULT,
|
||||
description="Table extraction strategy to use"
|
||||
)
|
||||
|
||||
# Common configuration for all strategies
|
||||
table_score_threshold: int = Field(
|
||||
default=7,
|
||||
ge=0,
|
||||
le=100,
|
||||
description="Minimum score for a table to be considered a data table (default strategy)"
|
||||
)
|
||||
min_rows: int = Field(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Minimum number of rows for a valid table"
|
||||
)
|
||||
min_cols: int = Field(
|
||||
default=0,
|
||||
ge=0,
|
||||
description="Minimum number of columns for a valid table"
|
||||
)
|
||||
|
||||
# LLM-specific configuration
|
||||
llm_provider: Optional[str] = Field(
|
||||
None,
|
||||
description="LLM provider for LLM strategy (e.g., 'openai/gpt-4')"
|
||||
)
|
||||
llm_model: Optional[str] = Field(
|
||||
None,
|
||||
description="Specific LLM model to use"
|
||||
)
|
||||
llm_api_key: Optional[str] = Field(
|
||||
None,
|
||||
description="API key for LLM provider (if not in environment)"
|
||||
)
|
||||
llm_base_url: Optional[str] = Field(
|
||||
None,
|
||||
description="Custom base URL for LLM API"
|
||||
)
|
||||
extraction_prompt: Optional[str] = Field(
|
||||
None,
|
||||
description="Custom prompt for LLM table extraction"
|
||||
)
|
||||
|
||||
# Financial-specific configuration
|
||||
decimal_separator: str = Field(
|
||||
default=".",
|
||||
description="Decimal separator for financial tables (e.g., '.' or ',')"
|
||||
)
|
||||
thousand_separator: str = Field(
|
||||
default=",",
|
||||
description="Thousand separator for financial tables (e.g., ',' or '.')"
|
||||
)
|
||||
|
||||
# General options
|
||||
verbose: bool = Field(
|
||||
default=False,
|
||||
description="Enable verbose logging for table extraction"
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"example": {
|
||||
"strategy": "default",
|
||||
"table_score_threshold": 7,
|
||||
"min_rows": 2,
|
||||
"min_cols": 2
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TableExtractionRequest(BaseModel):
|
||||
"""Request for dedicated table extraction endpoint."""
|
||||
|
||||
url: Optional[str] = Field(
|
||||
None,
|
||||
description="URL to crawl and extract tables from"
|
||||
)
|
||||
html: Optional[str] = Field(
|
||||
None,
|
||||
description="Raw HTML content to extract tables from"
|
||||
)
|
||||
config: TableExtractionConfig = Field(
|
||||
default_factory=lambda: TableExtractionConfig(),
|
||||
description="Table extraction configuration"
|
||||
)
|
||||
|
||||
# Browser config (only used if URL is provided)
|
||||
browser_config: Optional[Dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Browser configuration for URL crawling"
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"example": {
|
||||
"url": "https://example.com/data-table",
|
||||
"config": {
|
||||
"strategy": "default",
|
||||
"min_rows": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TableExtractionBatchRequest(BaseModel):
|
||||
"""Request for batch table extraction."""
|
||||
|
||||
html_list: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="List of HTML contents to extract tables from"
|
||||
)
|
||||
url_list: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="List of URLs to extract tables from"
|
||||
)
|
||||
config: TableExtractionConfig = Field(
|
||||
default_factory=lambda: TableExtractionConfig(),
|
||||
description="Table extraction configuration"
|
||||
)
|
||||
browser_config: Optional[Dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Browser configuration"
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# End Table Extraction Schemas
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class CrawlRequest(BaseModel):
|
||||
urls: List[str] = Field(min_length=1, max_length=100)
|
||||
browser_config: Optional[Dict] = Field(default_factory=dict)
|
||||
crawler_config: Optional[Dict] = Field(default_factory=dict)
|
||||
|
||||
anti_bot_strategy: Literal["default", "stealth", "undetected", "max_evasion"] = (
|
||||
Field("default", description="The anti-bot strategy to use for the crawl.")
|
||||
)
|
||||
headless: bool = Field(True, description="Run the browser in headless mode.")
|
||||
|
||||
# Dispatcher selection
|
||||
dispatcher: Optional[DispatcherType] = Field(
|
||||
None,
|
||||
description="Dispatcher type to use for crawling. Defaults to memory_adaptive if not specified."
|
||||
)
|
||||
|
||||
# Proxy rotation configuration
|
||||
proxy_rotation_strategy: Optional[Literal["round_robin", "random", "least_used", "failure_aware"]] = Field(
|
||||
None, description="Proxy rotation strategy to use for the crawl."
|
||||
)
|
||||
proxies: Optional[List[Dict[str, Any]]] = Field(
|
||||
None, description="List of proxy configurations (dicts with server, username, password, etc.)"
|
||||
)
|
||||
proxy_failure_threshold: Optional[int] = Field(
|
||||
3, ge=1, le=10, description="Failure threshold for failure_aware strategy"
|
||||
)
|
||||
proxy_recovery_time: Optional[int] = Field(
|
||||
300, ge=60, le=3600, description="Recovery time in seconds for failure_aware strategy"
|
||||
)
|
||||
|
||||
# Table extraction configuration
|
||||
table_extraction: Optional[TableExtractionConfig] = Field(
|
||||
None, description="Optional table extraction configuration to extract tables during crawl"
|
||||
)
|
||||
|
||||
|
||||
class HookConfig(BaseModel):
|
||||
"""Configuration for user-provided hooks"""
|
||||
|
||||
code: Dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="Map of hook points to Python code strings"
|
||||
default_factory=dict, description="Map of hook points to Python code strings"
|
||||
)
|
||||
timeout: int = Field(
|
||||
default=30,
|
||||
ge=1,
|
||||
le=120,
|
||||
description="Timeout in seconds for each hook execution"
|
||||
description="Timeout in seconds for each hook execution",
|
||||
)
|
||||
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"example": {
|
||||
@@ -39,42 +260,81 @@ async def hook(page, context, **kwargs):
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
||||
await page.wait_for_timeout(2000)
|
||||
return page
|
||||
"""
|
||||
""",
|
||||
},
|
||||
"timeout": 30
|
||||
"timeout": 30,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class CrawlRequestWithHooks(CrawlRequest):
|
||||
"""Extended crawl request with hooks support"""
|
||||
|
||||
hooks: Optional[HookConfig] = Field(
|
||||
default=None,
|
||||
description="Optional user-provided hook functions"
|
||||
default=None, description="Optional user-provided hook functions"
|
||||
)
|
||||
|
||||
|
||||
class HTTPCrawlRequest(BaseModel):
|
||||
"""Request model for HTTP-only crawling endpoints."""
|
||||
|
||||
urls: List[str] = Field(min_length=1, max_length=100, description="List of URLs to crawl")
|
||||
http_config: Optional[Dict] = Field(
|
||||
default_factory=dict,
|
||||
description="HTTP crawler configuration (method, headers, timeout, etc.)"
|
||||
)
|
||||
crawler_config: Optional[Dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Crawler run configuration (extraction, filtering, etc.)"
|
||||
)
|
||||
|
||||
# Dispatcher selection (same as browser crawling)
|
||||
dispatcher: Optional[DispatcherType] = Field(
|
||||
None,
|
||||
description="Dispatcher type to use. Defaults to memory_adaptive if not specified."
|
||||
)
|
||||
|
||||
|
||||
class HTTPCrawlRequestWithHooks(HTTPCrawlRequest):
|
||||
"""Extended HTTP crawl request with hooks support"""
|
||||
|
||||
hooks: Optional[HookConfig] = Field(
|
||||
default=None, description="Optional user-provided hook functions"
|
||||
)
|
||||
|
||||
|
||||
class MarkdownRequest(BaseModel):
|
||||
"""Request body for the /md endpoint."""
|
||||
url: str = Field(..., description="Absolute http/https URL to fetch")
|
||||
f: FilterType = Field(FilterType.FIT, description="Content‑filter strategy: fit, raw, bm25, or llm")
|
||||
q: Optional[str] = Field(None, description="Query string used by BM25/LLM filters")
|
||||
c: Optional[str] = Field("0", description="Cache‑bust / revision counter")
|
||||
provider: Optional[str] = Field(None, description="LLM provider override (e.g., 'anthropic/claude-3-opus')")
|
||||
temperature: Optional[float] = Field(None, description="LLM temperature override (0.0-2.0)")
|
||||
|
||||
url: str = Field(..., description="Absolute http/https URL to fetch")
|
||||
f: FilterType = Field(
|
||||
FilterType.FIT, description="Content‑filter strategy: fit, raw, bm25, or llm"
|
||||
)
|
||||
q: Optional[str] = Field(None, description="Query string used by BM25/LLM filters")
|
||||
c: Optional[str] = Field("0", description="Cache‑bust / revision counter")
|
||||
provider: Optional[str] = Field(
|
||||
None, description="LLM provider override (e.g., 'anthropic/claude-3-opus')"
|
||||
)
|
||||
temperature: Optional[float] = Field(
|
||||
None, description="LLM temperature override (0.0-2.0)"
|
||||
)
|
||||
base_url: Optional[str] = Field(None, description="LLM API base URL override")
|
||||
|
||||
|
||||
class RawCode(BaseModel):
|
||||
code: str
|
||||
|
||||
|
||||
class HTMLRequest(BaseModel):
|
||||
url: str
|
||||
|
||||
|
||||
|
||||
class ScreenshotRequest(BaseModel):
|
||||
url: str
|
||||
screenshot_wait_for: Optional[float] = 2
|
||||
output_path: Optional[str] = None
|
||||
|
||||
|
||||
class PDFRequest(BaseModel):
|
||||
url: str
|
||||
output_path: Optional[str] = None
|
||||
@@ -83,24 +343,89 @@ class PDFRequest(BaseModel):
|
||||
class JSEndpointRequest(BaseModel):
|
||||
url: str
|
||||
scripts: List[str] = Field(
|
||||
...,
|
||||
description="List of separated JavaScript snippets to execute"
|
||||
..., description="List of separated JavaScript snippets to execute"
|
||||
)
|
||||
|
||||
|
||||
class WebhookConfig(BaseModel):
|
||||
"""Configuration for webhook notifications."""
|
||||
webhook_url: HttpUrl
|
||||
webhook_data_in_payload: bool = False
|
||||
webhook_headers: Optional[Dict[str, str]] = None
|
||||
class SeedRequest(BaseModel):
|
||||
"""Request model for URL seeding endpoint."""
|
||||
|
||||
url: str = Field(..., example="https://docs.crawl4ai.com")
|
||||
config: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class WebhookPayload(BaseModel):
|
||||
"""Payload sent to webhook endpoints."""
|
||||
class URLDiscoveryRequest(BaseModel):
|
||||
"""Request model for URL discovery endpoint."""
|
||||
|
||||
domain: str = Field(..., example="docs.crawl4ai.com", description="Domain to discover URLs from")
|
||||
seeding_config: Dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Configuration for URL discovery using AsyncUrlSeeder",
|
||||
example={
|
||||
"source": "sitemap+cc",
|
||||
"pattern": "*",
|
||||
"live_check": False,
|
||||
"extract_head": False,
|
||||
"max_urls": -1,
|
||||
"concurrency": 1000,
|
||||
"hits_per_sec": 5,
|
||||
"force": False,
|
||||
"verbose": False,
|
||||
"query": None,
|
||||
"score_threshold": None,
|
||||
"scoring_method": "bm25",
|
||||
"filter_nonsense_urls": True
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# --- C4A Script Schemas ---
|
||||
|
||||
|
||||
class C4AScriptPayload(BaseModel):
|
||||
"""Input model for receiving a C4A-Script."""
|
||||
|
||||
script: str = Field(..., description="The C4A-Script content to process.")
|
||||
|
||||
|
||||
# --- Adaptive Crawling Schemas ---
|
||||
|
||||
|
||||
class AdaptiveConfigPayload(BaseModel):
|
||||
"""Pydantic model for receiving AdaptiveConfig parameters."""
|
||||
|
||||
confidence_threshold: float = 0.7
|
||||
max_pages: int = 20
|
||||
top_k_links: int = 3
|
||||
strategy: str = "statistical" # "statistical" or "embedding"
|
||||
embedding_model: Optional[str] = "sentence-transformers/all-MiniLM-L6-v2"
|
||||
# Add any other AdaptiveConfig fields you want to expose
|
||||
|
||||
|
||||
class AdaptiveCrawlRequest(BaseModel):
|
||||
"""Input model for the adaptive digest job."""
|
||||
|
||||
start_url: str = Field(..., description="The starting URL for the adaptive crawl.")
|
||||
query: str = Field(..., description="The user query to guide the crawl.")
|
||||
config: Optional[AdaptiveConfigPayload] = Field(
|
||||
None, description="Optional adaptive crawler configuration."
|
||||
)
|
||||
|
||||
|
||||
class AdaptiveJobStatus(BaseModel):
|
||||
"""Output model for the job status."""
|
||||
|
||||
task_id: str
|
||||
task_type: str # "crawl", "llm_extraction", etc.
|
||||
status: str # "completed" or "failed"
|
||||
timestamp: str # ISO 8601 format
|
||||
urls: List[str]
|
||||
status: str
|
||||
metrics: Optional[Dict[str, Any]] = None
|
||||
result: Optional[Dict[str, Any]] = None
|
||||
error: Optional[str] = None
|
||||
data: Optional[Dict] = None # Included only if webhook_data_in_payload=True
|
||||
|
||||
|
||||
class LinkAnalysisRequest(BaseModel):
|
||||
"""Request body for the /links/analyze endpoint."""
|
||||
url: str = Field(..., description="URL to analyze for links")
|
||||
config: Optional[Dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Optional LinkPreviewConfig dictionary"
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 5.8 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 1.6 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 11 KiB |
File diff suppressed because it is too large
Load Diff
@@ -167,14 +167,11 @@
|
||||
</a>
|
||||
</h1>
|
||||
|
||||
<div class="ml-auto flex items-center space-x-4">
|
||||
<a href="/dashboard" class="text-xs text-secondary hover:text-primary underline">Monitor</a>
|
||||
<div class="flex space-x-2">
|
||||
<button id="play-tab"
|
||||
class="px-3 py-1 rounded-t bg-surface border border-b-0 border-border text-primary">Playground</button>
|
||||
<button id="stress-tab" class="px-3 py-1 rounded-t border border-border hover:bg-surface">Stress
|
||||
Test</button>
|
||||
</div>
|
||||
<div class="ml-auto flex space-x-2">
|
||||
<button id="play-tab"
|
||||
class="px-3 py-1 rounded-t bg-surface border border-b-0 border-border text-primary">Playground</button>
|
||||
<button id="stress-tab" class="px-3 py-1 rounded-t border border-border hover:bg-surface">Stress
|
||||
Test</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick WebSocket test - Connect to monitor WebSocket and print updates
|
||||
"""
|
||||
import asyncio
|
||||
import websockets
|
||||
import json
|
||||
|
||||
async def test_websocket():
|
||||
uri = "ws://localhost:11235/monitor/ws"
|
||||
print(f"Connecting to {uri}...")
|
||||
|
||||
try:
|
||||
async with websockets.connect(uri) as websocket:
|
||||
print("✅ Connected!")
|
||||
|
||||
# Receive and print 5 updates
|
||||
for i in range(5):
|
||||
message = await websocket.recv()
|
||||
data = json.loads(message)
|
||||
print(f"\n📊 Update #{i+1}:")
|
||||
print(f" - Health: CPU {data['health']['container']['cpu_percent']}%, Memory {data['health']['container']['memory_percent']}%")
|
||||
print(f" - Active Requests: {len(data['requests']['active'])}")
|
||||
print(f" - Browsers: {len(data['browsers'])}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return 1
|
||||
|
||||
print("\n✅ WebSocket test passed!")
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit(asyncio.run(test_websocket()))
|
||||
@@ -1,164 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Monitor Dashboard Demo Script
|
||||
Generates varied activity to showcase all monitoring features for video recording.
|
||||
"""
|
||||
import httpx
|
||||
import asyncio
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
BASE_URL = "http://localhost:11235"
|
||||
|
||||
async def demo_dashboard():
|
||||
print("🎬 Monitor Dashboard Demo - Starting...\n")
|
||||
print(f"📊 Dashboard: {BASE_URL}/dashboard")
|
||||
print("=" * 60)
|
||||
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
|
||||
# Phase 1: Simple requests (permanent browser)
|
||||
print("\n🔷 Phase 1: Testing permanent browser pool")
|
||||
print("-" * 60)
|
||||
for i in range(5):
|
||||
print(f" {i+1}/5 Request to /crawl (default config)...")
|
||||
try:
|
||||
r = await client.post(
|
||||
f"{BASE_URL}/crawl",
|
||||
json={"urls": [f"https://httpbin.org/html?req={i}"], "crawler_config": {}}
|
||||
)
|
||||
print(f" ✅ Status: {r.status_code}, Time: {r.elapsed.total_seconds():.2f}s")
|
||||
except Exception as e:
|
||||
print(f" ❌ Error: {e}")
|
||||
await asyncio.sleep(1) # Small delay between requests
|
||||
|
||||
# Phase 2: Create variant browsers (different configs)
|
||||
print("\n🔶 Phase 2: Testing cold→hot pool promotion")
|
||||
print("-" * 60)
|
||||
viewports = [
|
||||
{"width": 1920, "height": 1080},
|
||||
{"width": 1280, "height": 720},
|
||||
{"width": 800, "height": 600}
|
||||
]
|
||||
|
||||
for idx, viewport in enumerate(viewports):
|
||||
print(f" Viewport {viewport['width']}x{viewport['height']}:")
|
||||
for i in range(4): # 4 requests each to trigger promotion at 3
|
||||
try:
|
||||
r = await client.post(
|
||||
f"{BASE_URL}/crawl",
|
||||
json={
|
||||
"urls": [f"https://httpbin.org/json?v={idx}&r={i}"],
|
||||
"browser_config": {"viewport": viewport},
|
||||
"crawler_config": {}
|
||||
}
|
||||
)
|
||||
print(f" {i+1}/4 ✅ {r.status_code} - Should see cold→hot after 3 uses")
|
||||
except Exception as e:
|
||||
print(f" {i+1}/4 ❌ {e}")
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Phase 3: Concurrent burst (stress pool)
|
||||
print("\n🔷 Phase 3: Concurrent burst (10 parallel)")
|
||||
print("-" * 60)
|
||||
tasks = []
|
||||
for i in range(10):
|
||||
tasks.append(
|
||||
client.post(
|
||||
f"{BASE_URL}/crawl",
|
||||
json={"urls": [f"https://httpbin.org/delay/2?burst={i}"], "crawler_config": {}}
|
||||
)
|
||||
)
|
||||
|
||||
print(" Sending 10 concurrent requests...")
|
||||
start = time.time()
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
elapsed = time.time() - start
|
||||
|
||||
successes = sum(1 for r in results if not isinstance(r, Exception) and r.status_code == 200)
|
||||
print(f" ✅ {successes}/10 succeeded in {elapsed:.2f}s")
|
||||
|
||||
# Phase 4: Multi-endpoint coverage
|
||||
print("\n🔶 Phase 4: Testing multiple endpoints")
|
||||
print("-" * 60)
|
||||
endpoints = [
|
||||
("/md", {"url": "https://httpbin.org/html", "f": "fit", "c": "0"}),
|
||||
("/screenshot", {"url": "https://httpbin.org/html"}),
|
||||
("/pdf", {"url": "https://httpbin.org/html"}),
|
||||
]
|
||||
|
||||
for endpoint, payload in endpoints:
|
||||
print(f" Testing {endpoint}...")
|
||||
try:
|
||||
if endpoint == "/md":
|
||||
r = await client.post(f"{BASE_URL}{endpoint}", json=payload)
|
||||
else:
|
||||
r = await client.post(f"{BASE_URL}{endpoint}", json=payload)
|
||||
print(f" ✅ {r.status_code}")
|
||||
except Exception as e:
|
||||
print(f" ❌ {e}")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Phase 5: Intentional error (to populate errors tab)
|
||||
print("\n🔷 Phase 5: Generating error examples")
|
||||
print("-" * 60)
|
||||
print(" Triggering invalid URL error...")
|
||||
try:
|
||||
r = await client.post(
|
||||
f"{BASE_URL}/crawl",
|
||||
json={"urls": ["invalid://bad-url"], "crawler_config": {}}
|
||||
)
|
||||
print(f" Response: {r.status_code}")
|
||||
except Exception as e:
|
||||
print(f" ✅ Error captured: {type(e).__name__}")
|
||||
|
||||
# Phase 6: Wait for janitor activity
|
||||
print("\n🔶 Phase 6: Waiting for janitor cleanup...")
|
||||
print("-" * 60)
|
||||
print(" Idle for 40s to allow janitor to clean cold pool browsers...")
|
||||
for i in range(40, 0, -10):
|
||||
print(f" {i}s remaining... (Check dashboard for cleanup events)")
|
||||
await asyncio.sleep(10)
|
||||
|
||||
# Phase 7: Final stats check
|
||||
print("\n🔷 Phase 7: Final dashboard state")
|
||||
print("-" * 60)
|
||||
|
||||
r = await client.get(f"{BASE_URL}/monitor/health")
|
||||
health = r.json()
|
||||
print(f" Memory: {health['container']['memory_percent']:.1f}%")
|
||||
print(f" Browsers: Perm={health['pool']['permanent']['active']}, "
|
||||
f"Hot={health['pool']['hot']['count']}, Cold={health['pool']['cold']['count']}")
|
||||
|
||||
r = await client.get(f"{BASE_URL}/monitor/endpoints/stats")
|
||||
stats = r.json()
|
||||
print(f"\n Endpoint Stats:")
|
||||
for endpoint, data in stats.items():
|
||||
print(f" {endpoint}: {data['count']} req, "
|
||||
f"{data['avg_latency_ms']:.0f}ms avg, "
|
||||
f"{data['success_rate_percent']:.1f}% success")
|
||||
|
||||
r = await client.get(f"{BASE_URL}/monitor/browsers")
|
||||
browsers = r.json()
|
||||
print(f"\n Pool Efficiency:")
|
||||
print(f" Total browsers: {browsers['summary']['total_count']}")
|
||||
print(f" Memory usage: {browsers['summary']['total_memory_mb']} MB")
|
||||
print(f" Reuse rate: {browsers['summary']['reuse_rate_percent']:.1f}%")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("✅ Demo complete! Dashboard is now populated with rich data.")
|
||||
print(f"\n📹 Recording tip: Refresh {BASE_URL}/dashboard")
|
||||
print(" You should see:")
|
||||
print(" • Active & completed requests")
|
||||
print(" • Browser pool (permanent + hot/cold)")
|
||||
print(" • Janitor cleanup events")
|
||||
print(" • Endpoint analytics")
|
||||
print(" • Memory timeline")
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
asyncio.run(demo_dashboard())
|
||||
except KeyboardInterrupt:
|
||||
print("\n\n⚠️ Demo interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\n\n❌ Demo failed: {e}")
|
||||
@@ -1,2 +0,0 @@
|
||||
httpx>=0.25.0
|
||||
docker>=7.0.0
|
||||
@@ -1,138 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test 1: Basic Container Health + Single Endpoint
|
||||
- Starts container
|
||||
- Hits /health endpoint 10 times
|
||||
- Reports success rate and basic latency
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
import docker
|
||||
import httpx
|
||||
|
||||
# Config
|
||||
IMAGE = "crawl4ai-local:latest"
|
||||
CONTAINER_NAME = "crawl4ai-test"
|
||||
PORT = 11235
|
||||
REQUESTS = 10
|
||||
|
||||
async def test_endpoint(url: str, count: int):
|
||||
"""Hit endpoint multiple times, return stats."""
|
||||
results = []
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
for i in range(count):
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.get(url)
|
||||
elapsed = (time.time() - start) * 1000 # ms
|
||||
results.append({
|
||||
"success": resp.status_code == 200,
|
||||
"latency_ms": elapsed,
|
||||
"status": resp.status_code
|
||||
})
|
||||
print(f" [{i+1}/{count}] ✓ {resp.status_code} - {elapsed:.0f}ms")
|
||||
except Exception as e:
|
||||
results.append({
|
||||
"success": False,
|
||||
"latency_ms": None,
|
||||
"error": str(e)
|
||||
})
|
||||
print(f" [{i+1}/{count}] ✗ Error: {e}")
|
||||
return results
|
||||
|
||||
def start_container(client, image: str, name: str, port: int):
|
||||
"""Start container, return container object."""
|
||||
# Clean up existing
|
||||
try:
|
||||
old = client.containers.get(name)
|
||||
print(f"🧹 Stopping existing container '{name}'...")
|
||||
old.stop()
|
||||
old.remove()
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
print(f"🚀 Starting container '{name}' from image '{image}'...")
|
||||
container = client.containers.run(
|
||||
image,
|
||||
name=name,
|
||||
ports={f"{port}/tcp": port},
|
||||
detach=True,
|
||||
shm_size="1g",
|
||||
environment={"PYTHON_ENV": "production"}
|
||||
)
|
||||
|
||||
# Wait for health
|
||||
print(f"⏳ Waiting for container to be healthy...")
|
||||
for _ in range(30): # 30s timeout
|
||||
time.sleep(1)
|
||||
container.reload()
|
||||
if container.status == "running":
|
||||
try:
|
||||
# Quick health check
|
||||
import requests
|
||||
resp = requests.get(f"http://localhost:{port}/health", timeout=2)
|
||||
if resp.status_code == 200:
|
||||
print(f"✅ Container healthy!")
|
||||
return container
|
||||
except:
|
||||
pass
|
||||
raise TimeoutError("Container failed to start")
|
||||
|
||||
def stop_container(container):
|
||||
"""Stop and remove container."""
|
||||
print(f"🛑 Stopping container...")
|
||||
container.stop()
|
||||
container.remove()
|
||||
print(f"✅ Container removed")
|
||||
|
||||
async def main():
|
||||
print("="*60)
|
||||
print("TEST 1: Basic Container Health + Single Endpoint")
|
||||
print("="*60)
|
||||
|
||||
client = docker.from_env()
|
||||
container = None
|
||||
|
||||
try:
|
||||
# Start container
|
||||
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
|
||||
|
||||
# Test /health endpoint
|
||||
print(f"\n📊 Testing /health endpoint ({REQUESTS} requests)...")
|
||||
url = f"http://localhost:{PORT}/health"
|
||||
results = await test_endpoint(url, REQUESTS)
|
||||
|
||||
# Calculate stats
|
||||
successes = sum(1 for r in results if r["success"])
|
||||
success_rate = (successes / len(results)) * 100
|
||||
latencies = [r["latency_ms"] for r in results if r["latency_ms"] is not None]
|
||||
avg_latency = sum(latencies) / len(latencies) if latencies else 0
|
||||
|
||||
# Print results
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS:")
|
||||
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})")
|
||||
print(f" Avg Latency: {avg_latency:.0f}ms")
|
||||
if latencies:
|
||||
print(f" Min Latency: {min(latencies):.0f}ms")
|
||||
print(f" Max Latency: {max(latencies):.0f}ms")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Pass/Fail
|
||||
if success_rate >= 100:
|
||||
print(f"✅ TEST PASSED")
|
||||
return 0
|
||||
else:
|
||||
print(f"❌ TEST FAILED (expected 100% success rate)")
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ TEST ERROR: {e}")
|
||||
return 1
|
||||
finally:
|
||||
if container:
|
||||
stop_container(container)
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
exit(exit_code)
|
||||
@@ -1,205 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test 2: Docker Stats Monitoring
|
||||
- Extends Test 1 with real-time container stats
|
||||
- Monitors memory % and CPU during requests
|
||||
- Reports baseline, peak, and final memory
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
import docker
|
||||
import httpx
|
||||
from threading import Thread, Event
|
||||
|
||||
# Config
|
||||
IMAGE = "crawl4ai-local:latest"
|
||||
CONTAINER_NAME = "crawl4ai-test"
|
||||
PORT = 11235
|
||||
REQUESTS = 20 # More requests to see memory usage
|
||||
|
||||
# Stats tracking
|
||||
stats_history = []
|
||||
stop_monitoring = Event()
|
||||
|
||||
def monitor_stats(container):
|
||||
"""Background thread to collect container stats."""
|
||||
for stat in container.stats(decode=True, stream=True):
|
||||
if stop_monitoring.is_set():
|
||||
break
|
||||
|
||||
try:
|
||||
# Extract memory stats
|
||||
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024) # MB
|
||||
mem_limit = stat['memory_stats'].get('limit', 1) / (1024 * 1024)
|
||||
mem_percent = (mem_usage / mem_limit * 100) if mem_limit > 0 else 0
|
||||
|
||||
# Extract CPU stats (handle missing fields on Mac)
|
||||
cpu_percent = 0
|
||||
try:
|
||||
cpu_delta = stat['cpu_stats']['cpu_usage']['total_usage'] - \
|
||||
stat['precpu_stats']['cpu_usage']['total_usage']
|
||||
system_delta = stat['cpu_stats'].get('system_cpu_usage', 0) - \
|
||||
stat['precpu_stats'].get('system_cpu_usage', 0)
|
||||
if system_delta > 0:
|
||||
num_cpus = stat['cpu_stats'].get('online_cpus', 1)
|
||||
cpu_percent = (cpu_delta / system_delta * num_cpus * 100.0)
|
||||
except (KeyError, ZeroDivisionError):
|
||||
pass
|
||||
|
||||
stats_history.append({
|
||||
'timestamp': time.time(),
|
||||
'memory_mb': mem_usage,
|
||||
'memory_percent': mem_percent,
|
||||
'cpu_percent': cpu_percent
|
||||
})
|
||||
except Exception as e:
|
||||
# Skip malformed stats
|
||||
pass
|
||||
|
||||
time.sleep(0.5) # Sample every 500ms
|
||||
|
||||
async def test_endpoint(url: str, count: int):
|
||||
"""Hit endpoint, return stats."""
|
||||
results = []
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
for i in range(count):
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.get(url)
|
||||
elapsed = (time.time() - start) * 1000
|
||||
results.append({
|
||||
"success": resp.status_code == 200,
|
||||
"latency_ms": elapsed,
|
||||
})
|
||||
if (i + 1) % 5 == 0: # Print every 5 requests
|
||||
print(f" [{i+1}/{count}] ✓ {resp.status_code} - {elapsed:.0f}ms")
|
||||
except Exception as e:
|
||||
results.append({"success": False, "error": str(e)})
|
||||
print(f" [{i+1}/{count}] ✗ Error: {e}")
|
||||
return results
|
||||
|
||||
def start_container(client, image: str, name: str, port: int):
|
||||
"""Start container."""
|
||||
try:
|
||||
old = client.containers.get(name)
|
||||
print(f"🧹 Stopping existing container '{name}'...")
|
||||
old.stop()
|
||||
old.remove()
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
print(f"🚀 Starting container '{name}'...")
|
||||
container = client.containers.run(
|
||||
image,
|
||||
name=name,
|
||||
ports={f"{port}/tcp": port},
|
||||
detach=True,
|
||||
shm_size="1g",
|
||||
mem_limit="4g", # Set explicit memory limit
|
||||
)
|
||||
|
||||
print(f"⏳ Waiting for health...")
|
||||
for _ in range(30):
|
||||
time.sleep(1)
|
||||
container.reload()
|
||||
if container.status == "running":
|
||||
try:
|
||||
import requests
|
||||
resp = requests.get(f"http://localhost:{port}/health", timeout=2)
|
||||
if resp.status_code == 200:
|
||||
print(f"✅ Container healthy!")
|
||||
return container
|
||||
except:
|
||||
pass
|
||||
raise TimeoutError("Container failed to start")
|
||||
|
||||
def stop_container(container):
|
||||
"""Stop container."""
|
||||
print(f"🛑 Stopping container...")
|
||||
container.stop()
|
||||
container.remove()
|
||||
|
||||
async def main():
|
||||
print("="*60)
|
||||
print("TEST 2: Docker Stats Monitoring")
|
||||
print("="*60)
|
||||
|
||||
client = docker.from_env()
|
||||
container = None
|
||||
monitor_thread = None
|
||||
|
||||
try:
|
||||
# Start container
|
||||
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
|
||||
|
||||
# Start stats monitoring in background
|
||||
print(f"\n📊 Starting stats monitor...")
|
||||
stop_monitoring.clear()
|
||||
stats_history.clear()
|
||||
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
|
||||
monitor_thread.start()
|
||||
|
||||
# Wait a bit for baseline
|
||||
await asyncio.sleep(2)
|
||||
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
print(f"📏 Baseline memory: {baseline_mem:.1f} MB")
|
||||
|
||||
# Test /health endpoint
|
||||
print(f"\n🔄 Running {REQUESTS} requests to /health...")
|
||||
url = f"http://localhost:{PORT}/health"
|
||||
results = await test_endpoint(url, REQUESTS)
|
||||
|
||||
# Wait a bit to capture peak
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Stop monitoring
|
||||
stop_monitoring.set()
|
||||
if monitor_thread:
|
||||
monitor_thread.join(timeout=2)
|
||||
|
||||
# Calculate stats
|
||||
successes = sum(1 for r in results if r.get("success"))
|
||||
success_rate = (successes / len(results)) * 100
|
||||
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
|
||||
avg_latency = sum(latencies) / len(latencies) if latencies else 0
|
||||
|
||||
# Memory stats
|
||||
memory_samples = [s['memory_mb'] for s in stats_history]
|
||||
peak_mem = max(memory_samples) if memory_samples else 0
|
||||
final_mem = memory_samples[-1] if memory_samples else 0
|
||||
mem_delta = final_mem - baseline_mem
|
||||
|
||||
# Print results
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS:")
|
||||
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})")
|
||||
print(f" Avg Latency: {avg_latency:.0f}ms")
|
||||
print(f"\n Memory Stats:")
|
||||
print(f" Baseline: {baseline_mem:.1f} MB")
|
||||
print(f" Peak: {peak_mem:.1f} MB")
|
||||
print(f" Final: {final_mem:.1f} MB")
|
||||
print(f" Delta: {mem_delta:+.1f} MB")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Pass/Fail
|
||||
if success_rate >= 100 and mem_delta < 100: # No significant memory growth
|
||||
print(f"✅ TEST PASSED")
|
||||
return 0
|
||||
else:
|
||||
if success_rate < 100:
|
||||
print(f"❌ TEST FAILED (success rate < 100%)")
|
||||
if mem_delta >= 100:
|
||||
print(f"⚠️ WARNING: Memory grew by {mem_delta:.1f} MB")
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ TEST ERROR: {e}")
|
||||
return 1
|
||||
finally:
|
||||
stop_monitoring.set()
|
||||
if container:
|
||||
stop_container(container)
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
exit(exit_code)
|
||||
@@ -1,229 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test 3: Pool Validation - Permanent Browser Reuse
|
||||
- Tests /html endpoint (should use permanent browser)
|
||||
- Monitors container logs for pool hit markers
|
||||
- Validates browser reuse rate
|
||||
- Checks memory after browser creation
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
import docker
|
||||
import httpx
|
||||
from threading import Thread, Event
|
||||
|
||||
# Config
|
||||
IMAGE = "crawl4ai-local:latest"
|
||||
CONTAINER_NAME = "crawl4ai-test"
|
||||
PORT = 11235
|
||||
REQUESTS = 30
|
||||
|
||||
# Stats tracking
|
||||
stats_history = []
|
||||
stop_monitoring = Event()
|
||||
|
||||
def monitor_stats(container):
|
||||
"""Background stats collector."""
|
||||
for stat in container.stats(decode=True, stream=True):
|
||||
if stop_monitoring.is_set():
|
||||
break
|
||||
try:
|
||||
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
|
||||
stats_history.append({
|
||||
'timestamp': time.time(),
|
||||
'memory_mb': mem_usage,
|
||||
})
|
||||
except:
|
||||
pass
|
||||
time.sleep(0.5)
|
||||
|
||||
def count_log_markers(container):
|
||||
"""Extract pool usage markers from logs."""
|
||||
logs = container.logs().decode('utf-8')
|
||||
|
||||
permanent_hits = logs.count("🔥 Using permanent browser")
|
||||
hot_hits = logs.count("♨️ Using hot pool browser")
|
||||
cold_hits = logs.count("❄️ Using cold pool browser")
|
||||
new_created = logs.count("🆕 Creating new browser")
|
||||
|
||||
return {
|
||||
'permanent_hits': permanent_hits,
|
||||
'hot_hits': hot_hits,
|
||||
'cold_hits': cold_hits,
|
||||
'new_created': new_created,
|
||||
'total_hits': permanent_hits + hot_hits + cold_hits
|
||||
}
|
||||
|
||||
async def test_endpoint(url: str, count: int):
|
||||
"""Hit endpoint multiple times."""
|
||||
results = []
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
for i in range(count):
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.post(url, json={"url": "https://httpbin.org/html"})
|
||||
elapsed = (time.time() - start) * 1000
|
||||
results.append({
|
||||
"success": resp.status_code == 200,
|
||||
"latency_ms": elapsed,
|
||||
})
|
||||
if (i + 1) % 10 == 0:
|
||||
print(f" [{i+1}/{count}] ✓ {resp.status_code} - {elapsed:.0f}ms")
|
||||
except Exception as e:
|
||||
results.append({"success": False, "error": str(e)})
|
||||
print(f" [{i+1}/{count}] ✗ Error: {e}")
|
||||
return results
|
||||
|
||||
def start_container(client, image: str, name: str, port: int):
|
||||
"""Start container."""
|
||||
try:
|
||||
old = client.containers.get(name)
|
||||
print(f"🧹 Stopping existing container...")
|
||||
old.stop()
|
||||
old.remove()
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
print(f"🚀 Starting container...")
|
||||
container = client.containers.run(
|
||||
image,
|
||||
name=name,
|
||||
ports={f"{port}/tcp": port},
|
||||
detach=True,
|
||||
shm_size="1g",
|
||||
mem_limit="4g",
|
||||
)
|
||||
|
||||
print(f"⏳ Waiting for health...")
|
||||
for _ in range(30):
|
||||
time.sleep(1)
|
||||
container.reload()
|
||||
if container.status == "running":
|
||||
try:
|
||||
import requests
|
||||
resp = requests.get(f"http://localhost:{port}/health", timeout=2)
|
||||
if resp.status_code == 200:
|
||||
print(f"✅ Container healthy!")
|
||||
return container
|
||||
except:
|
||||
pass
|
||||
raise TimeoutError("Container failed to start")
|
||||
|
||||
def stop_container(container):
|
||||
"""Stop container."""
|
||||
print(f"🛑 Stopping container...")
|
||||
container.stop()
|
||||
container.remove()
|
||||
|
||||
async def main():
|
||||
print("="*60)
|
||||
print("TEST 3: Pool Validation - Permanent Browser Reuse")
|
||||
print("="*60)
|
||||
|
||||
client = docker.from_env()
|
||||
container = None
|
||||
monitor_thread = None
|
||||
|
||||
try:
|
||||
# Start container
|
||||
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
|
||||
|
||||
# Wait for permanent browser initialization
|
||||
print(f"\n⏳ Waiting for permanent browser init (3s)...")
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Start stats monitoring
|
||||
print(f"📊 Starting stats monitor...")
|
||||
stop_monitoring.clear()
|
||||
stats_history.clear()
|
||||
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
|
||||
monitor_thread.start()
|
||||
|
||||
await asyncio.sleep(1)
|
||||
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
print(f"📏 Baseline (with permanent browser): {baseline_mem:.1f} MB")
|
||||
|
||||
# Test /html endpoint (uses permanent browser for default config)
|
||||
print(f"\n🔄 Running {REQUESTS} requests to /html...")
|
||||
url = f"http://localhost:{PORT}/html"
|
||||
results = await test_endpoint(url, REQUESTS)
|
||||
|
||||
# Wait a bit
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Stop monitoring
|
||||
stop_monitoring.set()
|
||||
if monitor_thread:
|
||||
monitor_thread.join(timeout=2)
|
||||
|
||||
# Analyze logs for pool markers
|
||||
print(f"\n📋 Analyzing pool usage...")
|
||||
pool_stats = count_log_markers(container)
|
||||
|
||||
# Calculate request stats
|
||||
successes = sum(1 for r in results if r.get("success"))
|
||||
success_rate = (successes / len(results)) * 100
|
||||
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
|
||||
avg_latency = sum(latencies) / len(latencies) if latencies else 0
|
||||
|
||||
# Memory stats
|
||||
memory_samples = [s['memory_mb'] for s in stats_history]
|
||||
peak_mem = max(memory_samples) if memory_samples else 0
|
||||
final_mem = memory_samples[-1] if memory_samples else 0
|
||||
mem_delta = final_mem - baseline_mem
|
||||
|
||||
# Calculate reuse rate
|
||||
total_requests = len(results)
|
||||
total_pool_hits = pool_stats['total_hits']
|
||||
reuse_rate = (total_pool_hits / total_requests * 100) if total_requests > 0 else 0
|
||||
|
||||
# Print results
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS:")
|
||||
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(results)})")
|
||||
print(f" Avg Latency: {avg_latency:.0f}ms")
|
||||
print(f"\n Pool Stats:")
|
||||
print(f" 🔥 Permanent Hits: {pool_stats['permanent_hits']}")
|
||||
print(f" ♨️ Hot Pool Hits: {pool_stats['hot_hits']}")
|
||||
print(f" ❄️ Cold Pool Hits: {pool_stats['cold_hits']}")
|
||||
print(f" 🆕 New Created: {pool_stats['new_created']}")
|
||||
print(f" 📊 Reuse Rate: {reuse_rate:.1f}%")
|
||||
print(f"\n Memory Stats:")
|
||||
print(f" Baseline: {baseline_mem:.1f} MB")
|
||||
print(f" Peak: {peak_mem:.1f} MB")
|
||||
print(f" Final: {final_mem:.1f} MB")
|
||||
print(f" Delta: {mem_delta:+.1f} MB")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Pass/Fail
|
||||
passed = True
|
||||
if success_rate < 100:
|
||||
print(f"❌ FAIL: Success rate {success_rate:.1f}% < 100%")
|
||||
passed = False
|
||||
if reuse_rate < 80:
|
||||
print(f"❌ FAIL: Reuse rate {reuse_rate:.1f}% < 80% (expected high permanent browser usage)")
|
||||
passed = False
|
||||
if pool_stats['permanent_hits'] < (total_requests * 0.8):
|
||||
print(f"⚠️ WARNING: Only {pool_stats['permanent_hits']} permanent hits out of {total_requests} requests")
|
||||
if mem_delta > 200:
|
||||
print(f"⚠️ WARNING: Memory grew by {mem_delta:.1f} MB (possible browser leak)")
|
||||
|
||||
if passed:
|
||||
print(f"✅ TEST PASSED")
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ TEST ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
finally:
|
||||
stop_monitoring.set()
|
||||
if container:
|
||||
stop_container(container)
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
exit(exit_code)
|
||||
@@ -1,236 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test 4: Concurrent Load Testing
|
||||
- Tests pool under concurrent load
|
||||
- Escalates: 10 → 50 → 100 concurrent requests
|
||||
- Validates latency distribution (P50, P95, P99)
|
||||
- Monitors memory stability
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
import docker
|
||||
import httpx
|
||||
from threading import Thread, Event
|
||||
from collections import defaultdict
|
||||
|
||||
# Config
|
||||
IMAGE = "crawl4ai-local:latest"
|
||||
CONTAINER_NAME = "crawl4ai-test"
|
||||
PORT = 11235
|
||||
LOAD_LEVELS = [
|
||||
{"name": "Light", "concurrent": 10, "requests": 20},
|
||||
{"name": "Medium", "concurrent": 50, "requests": 100},
|
||||
{"name": "Heavy", "concurrent": 100, "requests": 200},
|
||||
]
|
||||
|
||||
# Stats
|
||||
stats_history = []
|
||||
stop_monitoring = Event()
|
||||
|
||||
def monitor_stats(container):
|
||||
"""Background stats collector."""
|
||||
for stat in container.stats(decode=True, stream=True):
|
||||
if stop_monitoring.is_set():
|
||||
break
|
||||
try:
|
||||
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
|
||||
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
|
||||
except:
|
||||
pass
|
||||
time.sleep(0.5)
|
||||
|
||||
def count_log_markers(container):
|
||||
"""Extract pool markers."""
|
||||
logs = container.logs().decode('utf-8')
|
||||
return {
|
||||
'permanent': logs.count("🔥 Using permanent browser"),
|
||||
'hot': logs.count("♨️ Using hot pool browser"),
|
||||
'cold': logs.count("❄️ Using cold pool browser"),
|
||||
'new': logs.count("🆕 Creating new browser"),
|
||||
}
|
||||
|
||||
async def hit_endpoint(client, url, payload, semaphore):
|
||||
"""Single request with concurrency control."""
|
||||
async with semaphore:
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.post(url, json=payload, timeout=60.0)
|
||||
elapsed = (time.time() - start) * 1000
|
||||
return {"success": resp.status_code == 200, "latency_ms": elapsed}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
async def run_concurrent_test(url, payload, concurrent, total_requests):
|
||||
"""Run concurrent requests."""
|
||||
semaphore = asyncio.Semaphore(concurrent)
|
||||
async with httpx.AsyncClient() as client:
|
||||
tasks = [hit_endpoint(client, url, payload, semaphore) for _ in range(total_requests)]
|
||||
results = await asyncio.gather(*tasks)
|
||||
return results
|
||||
|
||||
def calculate_percentiles(latencies):
|
||||
"""Calculate P50, P95, P99."""
|
||||
if not latencies:
|
||||
return 0, 0, 0
|
||||
sorted_lat = sorted(latencies)
|
||||
n = len(sorted_lat)
|
||||
return (
|
||||
sorted_lat[int(n * 0.50)],
|
||||
sorted_lat[int(n * 0.95)],
|
||||
sorted_lat[int(n * 0.99)],
|
||||
)
|
||||
|
||||
def start_container(client, image, name, port):
|
||||
"""Start container."""
|
||||
try:
|
||||
old = client.containers.get(name)
|
||||
print(f"🧹 Stopping existing container...")
|
||||
old.stop()
|
||||
old.remove()
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
print(f"🚀 Starting container...")
|
||||
container = client.containers.run(
|
||||
image, name=name, ports={f"{port}/tcp": port},
|
||||
detach=True, shm_size="1g", mem_limit="4g",
|
||||
)
|
||||
|
||||
print(f"⏳ Waiting for health...")
|
||||
for _ in range(30):
|
||||
time.sleep(1)
|
||||
container.reload()
|
||||
if container.status == "running":
|
||||
try:
|
||||
import requests
|
||||
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
|
||||
print(f"✅ Container healthy!")
|
||||
return container
|
||||
except:
|
||||
pass
|
||||
raise TimeoutError("Container failed to start")
|
||||
|
||||
async def main():
|
||||
print("="*60)
|
||||
print("TEST 4: Concurrent Load Testing")
|
||||
print("="*60)
|
||||
|
||||
client = docker.from_env()
|
||||
container = None
|
||||
monitor_thread = None
|
||||
|
||||
try:
|
||||
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
|
||||
|
||||
print(f"\n⏳ Waiting for permanent browser init (3s)...")
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Start monitoring
|
||||
stop_monitoring.clear()
|
||||
stats_history.clear()
|
||||
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
|
||||
monitor_thread.start()
|
||||
|
||||
await asyncio.sleep(1)
|
||||
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
print(f"📏 Baseline: {baseline_mem:.1f} MB\n")
|
||||
|
||||
url = f"http://localhost:{PORT}/html"
|
||||
payload = {"url": "https://httpbin.org/html"}
|
||||
|
||||
all_results = []
|
||||
level_stats = []
|
||||
|
||||
# Run load levels
|
||||
for level in LOAD_LEVELS:
|
||||
print(f"{'='*60}")
|
||||
print(f"🔄 {level['name']} Load: {level['concurrent']} concurrent, {level['requests']} total")
|
||||
print(f"{'='*60}")
|
||||
|
||||
start_time = time.time()
|
||||
results = await run_concurrent_test(url, payload, level['concurrent'], level['requests'])
|
||||
duration = time.time() - start_time
|
||||
|
||||
successes = sum(1 for r in results if r.get("success"))
|
||||
success_rate = (successes / len(results)) * 100
|
||||
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
|
||||
p50, p95, p99 = calculate_percentiles(latencies)
|
||||
avg_lat = sum(latencies) / len(latencies) if latencies else 0
|
||||
|
||||
print(f" Duration: {duration:.1f}s")
|
||||
print(f" Success: {success_rate:.1f}% ({successes}/{len(results)})")
|
||||
print(f" Avg Latency: {avg_lat:.0f}ms")
|
||||
print(f" P50/P95/P99: {p50:.0f}ms / {p95:.0f}ms / {p99:.0f}ms")
|
||||
|
||||
level_stats.append({
|
||||
'name': level['name'],
|
||||
'concurrent': level['concurrent'],
|
||||
'success_rate': success_rate,
|
||||
'avg_latency': avg_lat,
|
||||
'p50': p50, 'p95': p95, 'p99': p99,
|
||||
})
|
||||
all_results.extend(results)
|
||||
|
||||
await asyncio.sleep(2) # Cool down between levels
|
||||
|
||||
# Stop monitoring
|
||||
await asyncio.sleep(1)
|
||||
stop_monitoring.set()
|
||||
if monitor_thread:
|
||||
monitor_thread.join(timeout=2)
|
||||
|
||||
# Final stats
|
||||
pool_stats = count_log_markers(container)
|
||||
memory_samples = [s['memory_mb'] for s in stats_history]
|
||||
peak_mem = max(memory_samples) if memory_samples else 0
|
||||
final_mem = memory_samples[-1] if memory_samples else 0
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"FINAL RESULTS:")
|
||||
print(f"{'='*60}")
|
||||
print(f" Total Requests: {len(all_results)}")
|
||||
print(f"\n Pool Utilization:")
|
||||
print(f" 🔥 Permanent: {pool_stats['permanent']}")
|
||||
print(f" ♨️ Hot: {pool_stats['hot']}")
|
||||
print(f" ❄️ Cold: {pool_stats['cold']}")
|
||||
print(f" 🆕 New: {pool_stats['new']}")
|
||||
print(f"\n Memory:")
|
||||
print(f" Baseline: {baseline_mem:.1f} MB")
|
||||
print(f" Peak: {peak_mem:.1f} MB")
|
||||
print(f" Final: {final_mem:.1f} MB")
|
||||
print(f" Delta: {final_mem - baseline_mem:+.1f} MB")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Pass/Fail
|
||||
passed = True
|
||||
for ls in level_stats:
|
||||
if ls['success_rate'] < 99:
|
||||
print(f"❌ FAIL: {ls['name']} success rate {ls['success_rate']:.1f}% < 99%")
|
||||
passed = False
|
||||
if ls['p99'] > 10000: # 10s threshold
|
||||
print(f"⚠️ WARNING: {ls['name']} P99 latency {ls['p99']:.0f}ms very high")
|
||||
|
||||
if final_mem - baseline_mem > 300:
|
||||
print(f"⚠️ WARNING: Memory grew {final_mem - baseline_mem:.1f} MB")
|
||||
|
||||
if passed:
|
||||
print(f"✅ TEST PASSED")
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ TEST ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
finally:
|
||||
stop_monitoring.set()
|
||||
if container:
|
||||
print(f"🛑 Stopping container...")
|
||||
container.stop()
|
||||
container.remove()
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
exit(exit_code)
|
||||
@@ -1,267 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test 5: Pool Stress - Mixed Configs
|
||||
- Tests hot/cold pool with different browser configs
|
||||
- Uses different viewports to create config variants
|
||||
- Validates cold → hot promotion after 3 uses
|
||||
- Monitors pool tier distribution
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
import docker
|
||||
import httpx
|
||||
from threading import Thread, Event
|
||||
import random
|
||||
|
||||
# Config
|
||||
IMAGE = "crawl4ai-local:latest"
|
||||
CONTAINER_NAME = "crawl4ai-test"
|
||||
PORT = 11235
|
||||
REQUESTS_PER_CONFIG = 5 # 5 requests per config variant
|
||||
|
||||
# Different viewport configs to test pool tiers
|
||||
VIEWPORT_CONFIGS = [
|
||||
None, # Default (permanent browser)
|
||||
{"width": 1920, "height": 1080}, # Desktop
|
||||
{"width": 1024, "height": 768}, # Tablet
|
||||
{"width": 375, "height": 667}, # Mobile
|
||||
]
|
||||
|
||||
# Stats
|
||||
stats_history = []
|
||||
stop_monitoring = Event()
|
||||
|
||||
def monitor_stats(container):
|
||||
"""Background stats collector."""
|
||||
for stat in container.stats(decode=True, stream=True):
|
||||
if stop_monitoring.is_set():
|
||||
break
|
||||
try:
|
||||
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
|
||||
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
|
||||
except:
|
||||
pass
|
||||
time.sleep(0.5)
|
||||
|
||||
def analyze_pool_logs(container):
|
||||
"""Extract detailed pool stats from logs."""
|
||||
logs = container.logs().decode('utf-8')
|
||||
|
||||
permanent = logs.count("🔥 Using permanent browser")
|
||||
hot = logs.count("♨️ Using hot pool browser")
|
||||
cold = logs.count("❄️ Using cold pool browser")
|
||||
new = logs.count("🆕 Creating new browser")
|
||||
promotions = logs.count("⬆️ Promoting to hot pool")
|
||||
|
||||
return {
|
||||
'permanent': permanent,
|
||||
'hot': hot,
|
||||
'cold': cold,
|
||||
'new': new,
|
||||
'promotions': promotions,
|
||||
'total': permanent + hot + cold
|
||||
}
|
||||
|
||||
async def crawl_with_viewport(client, url, viewport):
|
||||
"""Single request with specific viewport."""
|
||||
payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"browser_config": {},
|
||||
"crawler_config": {}
|
||||
}
|
||||
|
||||
# Add viewport if specified
|
||||
if viewport:
|
||||
payload["browser_config"] = {
|
||||
"type": "BrowserConfig",
|
||||
"params": {
|
||||
"viewport": {"type": "dict", "value": viewport},
|
||||
"headless": True,
|
||||
"text_mode": True,
|
||||
"extra_args": [
|
||||
"--no-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--disable-gpu",
|
||||
"--disable-software-rasterizer",
|
||||
"--disable-web-security",
|
||||
"--allow-insecure-localhost",
|
||||
"--ignore-certificate-errors"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.post(url, json=payload, timeout=60.0)
|
||||
elapsed = (time.time() - start) * 1000
|
||||
return {"success": resp.status_code == 200, "latency_ms": elapsed, "viewport": viewport}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e), "viewport": viewport}
|
||||
|
||||
def start_container(client, image, name, port):
|
||||
"""Start container."""
|
||||
try:
|
||||
old = client.containers.get(name)
|
||||
print(f"🧹 Stopping existing container...")
|
||||
old.stop()
|
||||
old.remove()
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
print(f"🚀 Starting container...")
|
||||
container = client.containers.run(
|
||||
image, name=name, ports={f"{port}/tcp": port},
|
||||
detach=True, shm_size="1g", mem_limit="4g",
|
||||
)
|
||||
|
||||
print(f"⏳ Waiting for health...")
|
||||
for _ in range(30):
|
||||
time.sleep(1)
|
||||
container.reload()
|
||||
if container.status == "running":
|
||||
try:
|
||||
import requests
|
||||
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
|
||||
print(f"✅ Container healthy!")
|
||||
return container
|
||||
except:
|
||||
pass
|
||||
raise TimeoutError("Container failed to start")
|
||||
|
||||
async def main():
|
||||
print("="*60)
|
||||
print("TEST 5: Pool Stress - Mixed Configs")
|
||||
print("="*60)
|
||||
|
||||
client = docker.from_env()
|
||||
container = None
|
||||
monitor_thread = None
|
||||
|
||||
try:
|
||||
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
|
||||
|
||||
print(f"\n⏳ Waiting for permanent browser init (3s)...")
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Start monitoring
|
||||
stop_monitoring.clear()
|
||||
stats_history.clear()
|
||||
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
|
||||
monitor_thread.start()
|
||||
|
||||
await asyncio.sleep(1)
|
||||
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
print(f"📏 Baseline: {baseline_mem:.1f} MB\n")
|
||||
|
||||
url = f"http://localhost:{PORT}/crawl"
|
||||
|
||||
print(f"Testing {len(VIEWPORT_CONFIGS)} different configs:")
|
||||
for i, vp in enumerate(VIEWPORT_CONFIGS):
|
||||
vp_str = "Default" if vp is None else f"{vp['width']}x{vp['height']}"
|
||||
print(f" {i+1}. {vp_str}")
|
||||
print()
|
||||
|
||||
# Run requests: repeat each config REQUESTS_PER_CONFIG times
|
||||
all_results = []
|
||||
config_sequence = []
|
||||
|
||||
for _ in range(REQUESTS_PER_CONFIG):
|
||||
for viewport in VIEWPORT_CONFIGS:
|
||||
config_sequence.append(viewport)
|
||||
|
||||
# Shuffle to mix configs
|
||||
random.shuffle(config_sequence)
|
||||
|
||||
print(f"🔄 Running {len(config_sequence)} requests with mixed configs...")
|
||||
|
||||
async with httpx.AsyncClient() as http_client:
|
||||
for i, viewport in enumerate(config_sequence):
|
||||
result = await crawl_with_viewport(http_client, url, viewport)
|
||||
all_results.append(result)
|
||||
|
||||
if (i + 1) % 5 == 0:
|
||||
vp_str = "default" if result['viewport'] is None else f"{result['viewport']['width']}x{result['viewport']['height']}"
|
||||
status = "✓" if result.get('success') else "✗"
|
||||
lat = f"{result.get('latency_ms', 0):.0f}ms" if 'latency_ms' in result else "error"
|
||||
print(f" [{i+1}/{len(config_sequence)}] {status} {vp_str} - {lat}")
|
||||
|
||||
# Stop monitoring
|
||||
await asyncio.sleep(2)
|
||||
stop_monitoring.set()
|
||||
if monitor_thread:
|
||||
monitor_thread.join(timeout=2)
|
||||
|
||||
# Analyze results
|
||||
pool_stats = analyze_pool_logs(container)
|
||||
|
||||
successes = sum(1 for r in all_results if r.get("success"))
|
||||
success_rate = (successes / len(all_results)) * 100
|
||||
latencies = [r["latency_ms"] for r in all_results if "latency_ms" in r]
|
||||
avg_lat = sum(latencies) / len(latencies) if latencies else 0
|
||||
|
||||
memory_samples = [s['memory_mb'] for s in stats_history]
|
||||
peak_mem = max(memory_samples) if memory_samples else 0
|
||||
final_mem = memory_samples[-1] if memory_samples else 0
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS:")
|
||||
print(f"{'='*60}")
|
||||
print(f" Requests: {len(all_results)}")
|
||||
print(f" Success Rate: {success_rate:.1f}% ({successes}/{len(all_results)})")
|
||||
print(f" Avg Latency: {avg_lat:.0f}ms")
|
||||
print(f"\n Pool Statistics:")
|
||||
print(f" 🔥 Permanent: {pool_stats['permanent']}")
|
||||
print(f" ♨️ Hot: {pool_stats['hot']}")
|
||||
print(f" ❄️ Cold: {pool_stats['cold']}")
|
||||
print(f" 🆕 New: {pool_stats['new']}")
|
||||
print(f" ⬆️ Promotions: {pool_stats['promotions']}")
|
||||
print(f" 📊 Reuse: {(pool_stats['total'] / len(all_results) * 100):.1f}%")
|
||||
print(f"\n Memory:")
|
||||
print(f" Baseline: {baseline_mem:.1f} MB")
|
||||
print(f" Peak: {peak_mem:.1f} MB")
|
||||
print(f" Final: {final_mem:.1f} MB")
|
||||
print(f" Delta: {final_mem - baseline_mem:+.1f} MB")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Pass/Fail
|
||||
passed = True
|
||||
|
||||
if success_rate < 99:
|
||||
print(f"❌ FAIL: Success rate {success_rate:.1f}% < 99%")
|
||||
passed = False
|
||||
|
||||
# Should see promotions since we repeat each config 5 times
|
||||
if pool_stats['promotions'] < (len(VIEWPORT_CONFIGS) - 1): # -1 for default
|
||||
print(f"⚠️ WARNING: Only {pool_stats['promotions']} promotions (expected ~{len(VIEWPORT_CONFIGS)-1})")
|
||||
|
||||
# Should have created some browsers for different configs
|
||||
if pool_stats['new'] == 0:
|
||||
print(f"⚠️ NOTE: No new browsers created (all used default?)")
|
||||
|
||||
if pool_stats['permanent'] == len(all_results):
|
||||
print(f"⚠️ NOTE: All requests used permanent browser (configs not varying enough?)")
|
||||
|
||||
if final_mem - baseline_mem > 500:
|
||||
print(f"⚠️ WARNING: Memory grew {final_mem - baseline_mem:.1f} MB")
|
||||
|
||||
if passed:
|
||||
print(f"✅ TEST PASSED")
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ TEST ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
finally:
|
||||
stop_monitoring.set()
|
||||
if container:
|
||||
print(f"🛑 Stopping container...")
|
||||
container.stop()
|
||||
container.remove()
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
exit(exit_code)
|
||||
@@ -1,234 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test 6: Multi-Endpoint Testing
|
||||
- Tests multiple endpoints together: /html, /screenshot, /pdf, /crawl
|
||||
- Validates each endpoint works correctly
|
||||
- Monitors success rates per endpoint
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
import docker
|
||||
import httpx
|
||||
from threading import Thread, Event
|
||||
|
||||
# Config
|
||||
IMAGE = "crawl4ai-local:latest"
|
||||
CONTAINER_NAME = "crawl4ai-test"
|
||||
PORT = 11235
|
||||
REQUESTS_PER_ENDPOINT = 10
|
||||
|
||||
# Stats
|
||||
stats_history = []
|
||||
stop_monitoring = Event()
|
||||
|
||||
def monitor_stats(container):
|
||||
"""Background stats collector."""
|
||||
for stat in container.stats(decode=True, stream=True):
|
||||
if stop_monitoring.is_set():
|
||||
break
|
||||
try:
|
||||
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
|
||||
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
|
||||
except:
|
||||
pass
|
||||
time.sleep(0.5)
|
||||
|
||||
async def test_html(client, base_url, count):
|
||||
"""Test /html endpoint."""
|
||||
url = f"{base_url}/html"
|
||||
results = []
|
||||
for _ in range(count):
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0)
|
||||
elapsed = (time.time() - start) * 1000
|
||||
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
|
||||
except Exception as e:
|
||||
results.append({"success": False, "error": str(e)})
|
||||
return results
|
||||
|
||||
async def test_screenshot(client, base_url, count):
|
||||
"""Test /screenshot endpoint."""
|
||||
url = f"{base_url}/screenshot"
|
||||
results = []
|
||||
for _ in range(count):
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0)
|
||||
elapsed = (time.time() - start) * 1000
|
||||
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
|
||||
except Exception as e:
|
||||
results.append({"success": False, "error": str(e)})
|
||||
return results
|
||||
|
||||
async def test_pdf(client, base_url, count):
|
||||
"""Test /pdf endpoint."""
|
||||
url = f"{base_url}/pdf"
|
||||
results = []
|
||||
for _ in range(count):
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.post(url, json={"url": "https://httpbin.org/html"}, timeout=30.0)
|
||||
elapsed = (time.time() - start) * 1000
|
||||
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
|
||||
except Exception as e:
|
||||
results.append({"success": False, "error": str(e)})
|
||||
return results
|
||||
|
||||
async def test_crawl(client, base_url, count):
|
||||
"""Test /crawl endpoint."""
|
||||
url = f"{base_url}/crawl"
|
||||
results = []
|
||||
payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"browser_config": {},
|
||||
"crawler_config": {}
|
||||
}
|
||||
for _ in range(count):
|
||||
start = time.time()
|
||||
try:
|
||||
resp = await client.post(url, json=payload, timeout=30.0)
|
||||
elapsed = (time.time() - start) * 1000
|
||||
results.append({"success": resp.status_code == 200, "latency_ms": elapsed})
|
||||
except Exception as e:
|
||||
results.append({"success": False, "error": str(e)})
|
||||
return results
|
||||
|
||||
def start_container(client, image, name, port):
|
||||
"""Start container."""
|
||||
try:
|
||||
old = client.containers.get(name)
|
||||
print(f"🧹 Stopping existing container...")
|
||||
old.stop()
|
||||
old.remove()
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
print(f"🚀 Starting container...")
|
||||
container = client.containers.run(
|
||||
image, name=name, ports={f"{port}/tcp": port},
|
||||
detach=True, shm_size="1g", mem_limit="4g",
|
||||
)
|
||||
|
||||
print(f"⏳ Waiting for health...")
|
||||
for _ in range(30):
|
||||
time.sleep(1)
|
||||
container.reload()
|
||||
if container.status == "running":
|
||||
try:
|
||||
import requests
|
||||
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
|
||||
print(f"✅ Container healthy!")
|
||||
return container
|
||||
except:
|
||||
pass
|
||||
raise TimeoutError("Container failed to start")
|
||||
|
||||
async def main():
|
||||
print("="*60)
|
||||
print("TEST 6: Multi-Endpoint Testing")
|
||||
print("="*60)
|
||||
|
||||
client = docker.from_env()
|
||||
container = None
|
||||
monitor_thread = None
|
||||
|
||||
try:
|
||||
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
|
||||
|
||||
print(f"\n⏳ Waiting for permanent browser init (3s)...")
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Start monitoring
|
||||
stop_monitoring.clear()
|
||||
stats_history.clear()
|
||||
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
|
||||
monitor_thread.start()
|
||||
|
||||
await asyncio.sleep(1)
|
||||
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
print(f"📏 Baseline: {baseline_mem:.1f} MB\n")
|
||||
|
||||
base_url = f"http://localhost:{PORT}"
|
||||
|
||||
# Test each endpoint
|
||||
endpoints = {
|
||||
"/html": test_html,
|
||||
"/screenshot": test_screenshot,
|
||||
"/pdf": test_pdf,
|
||||
"/crawl": test_crawl,
|
||||
}
|
||||
|
||||
all_endpoint_stats = {}
|
||||
|
||||
async with httpx.AsyncClient() as http_client:
|
||||
for endpoint_name, test_func in endpoints.items():
|
||||
print(f"🔄 Testing {endpoint_name} ({REQUESTS_PER_ENDPOINT} requests)...")
|
||||
results = await test_func(http_client, base_url, REQUESTS_PER_ENDPOINT)
|
||||
|
||||
successes = sum(1 for r in results if r.get("success"))
|
||||
success_rate = (successes / len(results)) * 100
|
||||
latencies = [r["latency_ms"] for r in results if "latency_ms" in r]
|
||||
avg_lat = sum(latencies) / len(latencies) if latencies else 0
|
||||
|
||||
all_endpoint_stats[endpoint_name] = {
|
||||
'success_rate': success_rate,
|
||||
'avg_latency': avg_lat,
|
||||
'total': len(results),
|
||||
'successes': successes
|
||||
}
|
||||
|
||||
print(f" ✓ Success: {success_rate:.1f}% ({successes}/{len(results)}), Avg: {avg_lat:.0f}ms")
|
||||
|
||||
# Stop monitoring
|
||||
await asyncio.sleep(1)
|
||||
stop_monitoring.set()
|
||||
if monitor_thread:
|
||||
monitor_thread.join(timeout=2)
|
||||
|
||||
# Final stats
|
||||
memory_samples = [s['memory_mb'] for s in stats_history]
|
||||
peak_mem = max(memory_samples) if memory_samples else 0
|
||||
final_mem = memory_samples[-1] if memory_samples else 0
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS:")
|
||||
print(f"{'='*60}")
|
||||
for endpoint, stats in all_endpoint_stats.items():
|
||||
print(f" {endpoint:12} Success: {stats['success_rate']:5.1f}% Avg: {stats['avg_latency']:6.0f}ms")
|
||||
|
||||
print(f"\n Memory:")
|
||||
print(f" Baseline: {baseline_mem:.1f} MB")
|
||||
print(f" Peak: {peak_mem:.1f} MB")
|
||||
print(f" Final: {final_mem:.1f} MB")
|
||||
print(f" Delta: {final_mem - baseline_mem:+.1f} MB")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Pass/Fail
|
||||
passed = True
|
||||
for endpoint, stats in all_endpoint_stats.items():
|
||||
if stats['success_rate'] < 100:
|
||||
print(f"❌ FAIL: {endpoint} success rate {stats['success_rate']:.1f}% < 100%")
|
||||
passed = False
|
||||
|
||||
if passed:
|
||||
print(f"✅ TEST PASSED")
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ TEST ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
finally:
|
||||
stop_monitoring.set()
|
||||
if container:
|
||||
print(f"🛑 Stopping container...")
|
||||
container.stop()
|
||||
container.remove()
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
exit(exit_code)
|
||||
@@ -1,199 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test 7: Cleanup Verification (Janitor)
|
||||
- Creates load spike then goes idle
|
||||
- Verifies memory returns to near baseline
|
||||
- Tests janitor cleanup of idle browsers
|
||||
- Monitors memory recovery time
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
import docker
|
||||
import httpx
|
||||
from threading import Thread, Event
|
||||
|
||||
# Config
|
||||
IMAGE = "crawl4ai-local:latest"
|
||||
CONTAINER_NAME = "crawl4ai-test"
|
||||
PORT = 11235
|
||||
SPIKE_REQUESTS = 20 # Create some browsers
|
||||
IDLE_TIME = 90 # Wait 90s for janitor (runs every 60s)
|
||||
|
||||
# Stats
|
||||
stats_history = []
|
||||
stop_monitoring = Event()
|
||||
|
||||
def monitor_stats(container):
|
||||
"""Background stats collector."""
|
||||
for stat in container.stats(decode=True, stream=True):
|
||||
if stop_monitoring.is_set():
|
||||
break
|
||||
try:
|
||||
mem_usage = stat['memory_stats'].get('usage', 0) / (1024 * 1024)
|
||||
stats_history.append({'timestamp': time.time(), 'memory_mb': mem_usage})
|
||||
except:
|
||||
pass
|
||||
time.sleep(1) # Sample every 1s for this test
|
||||
|
||||
def start_container(client, image, name, port):
|
||||
"""Start container."""
|
||||
try:
|
||||
old = client.containers.get(name)
|
||||
print(f"🧹 Stopping existing container...")
|
||||
old.stop()
|
||||
old.remove()
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
print(f"🚀 Starting container...")
|
||||
container = client.containers.run(
|
||||
image, name=name, ports={f"{port}/tcp": port},
|
||||
detach=True, shm_size="1g", mem_limit="4g",
|
||||
)
|
||||
|
||||
print(f"⏳ Waiting for health...")
|
||||
for _ in range(30):
|
||||
time.sleep(1)
|
||||
container.reload()
|
||||
if container.status == "running":
|
||||
try:
|
||||
import requests
|
||||
if requests.get(f"http://localhost:{port}/health", timeout=2).status_code == 200:
|
||||
print(f"✅ Container healthy!")
|
||||
return container
|
||||
except:
|
||||
pass
|
||||
raise TimeoutError("Container failed to start")
|
||||
|
||||
async def main():
|
||||
print("="*60)
|
||||
print("TEST 7: Cleanup Verification (Janitor)")
|
||||
print("="*60)
|
||||
|
||||
client = docker.from_env()
|
||||
container = None
|
||||
monitor_thread = None
|
||||
|
||||
try:
|
||||
container = start_container(client, IMAGE, CONTAINER_NAME, PORT)
|
||||
|
||||
print(f"\n⏳ Waiting for permanent browser init (3s)...")
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Start monitoring
|
||||
stop_monitoring.clear()
|
||||
stats_history.clear()
|
||||
monitor_thread = Thread(target=monitor_stats, args=(container,), daemon=True)
|
||||
monitor_thread.start()
|
||||
|
||||
await asyncio.sleep(2)
|
||||
baseline_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
print(f"📏 Baseline: {baseline_mem:.1f} MB\n")
|
||||
|
||||
# Create load spike with different configs to populate pool
|
||||
print(f"🔥 Creating load spike ({SPIKE_REQUESTS} requests with varied configs)...")
|
||||
url = f"http://localhost:{PORT}/crawl"
|
||||
|
||||
viewports = [
|
||||
{"width": 1920, "height": 1080},
|
||||
{"width": 1024, "height": 768},
|
||||
{"width": 375, "height": 667},
|
||||
]
|
||||
|
||||
async with httpx.AsyncClient(timeout=60.0) as http_client:
|
||||
tasks = []
|
||||
for i in range(SPIKE_REQUESTS):
|
||||
vp = viewports[i % len(viewports)]
|
||||
payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"browser_config": {
|
||||
"type": "BrowserConfig",
|
||||
"params": {
|
||||
"viewport": {"type": "dict", "value": vp},
|
||||
"headless": True,
|
||||
"text_mode": True,
|
||||
"extra_args": [
|
||||
"--no-sandbox", "--disable-dev-shm-usage",
|
||||
"--disable-gpu", "--disable-software-rasterizer",
|
||||
"--disable-web-security", "--allow-insecure-localhost",
|
||||
"--ignore-certificate-errors"
|
||||
]
|
||||
}
|
||||
},
|
||||
"crawler_config": {}
|
||||
}
|
||||
tasks.append(http_client.post(url, json=payload))
|
||||
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
successes = sum(1 for r in results if hasattr(r, 'status_code') and r.status_code == 200)
|
||||
print(f" ✓ Spike completed: {successes}/{len(results)} successful")
|
||||
|
||||
# Measure peak
|
||||
await asyncio.sleep(2)
|
||||
peak_mem = max([s['memory_mb'] for s in stats_history]) if stats_history else baseline_mem
|
||||
print(f" 📊 Peak memory: {peak_mem:.1f} MB (+{peak_mem - baseline_mem:.1f} MB)")
|
||||
|
||||
# Now go idle and wait for janitor
|
||||
print(f"\n⏸️ Going idle for {IDLE_TIME}s (janitor cleanup)...")
|
||||
print(f" (Janitor runs every 60s, checking for idle browsers)")
|
||||
|
||||
for elapsed in range(0, IDLE_TIME, 10):
|
||||
await asyncio.sleep(10)
|
||||
current_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
print(f" [{elapsed+10:3d}s] Memory: {current_mem:.1f} MB")
|
||||
|
||||
# Stop monitoring
|
||||
stop_monitoring.set()
|
||||
if monitor_thread:
|
||||
monitor_thread.join(timeout=2)
|
||||
|
||||
# Analyze memory recovery
|
||||
final_mem = stats_history[-1]['memory_mb'] if stats_history else 0
|
||||
recovery_mb = peak_mem - final_mem
|
||||
recovery_pct = (recovery_mb / (peak_mem - baseline_mem) * 100) if (peak_mem - baseline_mem) > 0 else 0
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS:")
|
||||
print(f"{'='*60}")
|
||||
print(f" Memory Journey:")
|
||||
print(f" Baseline: {baseline_mem:.1f} MB")
|
||||
print(f" Peak: {peak_mem:.1f} MB (+{peak_mem - baseline_mem:.1f} MB)")
|
||||
print(f" Final: {final_mem:.1f} MB (+{final_mem - baseline_mem:.1f} MB)")
|
||||
print(f" Recovered: {recovery_mb:.1f} MB ({recovery_pct:.1f}%)")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Pass/Fail
|
||||
passed = True
|
||||
|
||||
# Should have created some memory pressure
|
||||
if peak_mem - baseline_mem < 100:
|
||||
print(f"⚠️ WARNING: Peak increase only {peak_mem - baseline_mem:.1f} MB (expected more browsers)")
|
||||
|
||||
# Should recover most memory (within 100MB of baseline)
|
||||
if final_mem - baseline_mem > 100:
|
||||
print(f"⚠️ WARNING: Memory didn't recover well (still +{final_mem - baseline_mem:.1f} MB above baseline)")
|
||||
else:
|
||||
print(f"✅ Good memory recovery!")
|
||||
|
||||
# Baseline + 50MB tolerance
|
||||
if final_mem - baseline_mem < 50:
|
||||
print(f"✅ Excellent cleanup (within 50MB of baseline)")
|
||||
|
||||
print(f"✅ TEST PASSED")
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ TEST ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
finally:
|
||||
stop_monitoring.set()
|
||||
if container:
|
||||
print(f"🛑 Stopping container...")
|
||||
container.stop()
|
||||
container.remove()
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
exit(exit_code)
|
||||
@@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Quick test to generate monitor dashboard activity"""
|
||||
import httpx
|
||||
import asyncio
|
||||
|
||||
async def test_dashboard():
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
print("📊 Generating dashboard activity...")
|
||||
|
||||
# Test 1: Simple crawl
|
||||
print("\n1️⃣ Running simple crawl...")
|
||||
r1 = await client.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={"urls": ["https://httpbin.org/html"], "crawler_config": {}}
|
||||
)
|
||||
print(f" Status: {r1.status_code}")
|
||||
|
||||
# Test 2: Multiple URLs
|
||||
print("\n2️⃣ Running multi-URL crawl...")
|
||||
r2 = await client.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={
|
||||
"urls": [
|
||||
"https://httpbin.org/html",
|
||||
"https://httpbin.org/json"
|
||||
],
|
||||
"crawler_config": {}
|
||||
}
|
||||
)
|
||||
print(f" Status: {r2.status_code}")
|
||||
|
||||
# Test 3: Check monitor health
|
||||
print("\n3️⃣ Checking monitor health...")
|
||||
r3 = await client.get("http://localhost:11235/monitor/health")
|
||||
health = r3.json()
|
||||
print(f" Memory: {health['container']['memory_percent']}%")
|
||||
print(f" Browsers: {health['pool']['permanent']['active']}")
|
||||
|
||||
# Test 4: Check requests
|
||||
print("\n4️⃣ Checking request log...")
|
||||
r4 = await client.get("http://localhost:11235/monitor/requests")
|
||||
reqs = r4.json()
|
||||
print(f" Active: {len(reqs['active'])}")
|
||||
print(f" Completed: {len(reqs['completed'])}")
|
||||
|
||||
# Test 5: Check endpoint stats
|
||||
print("\n5️⃣ Checking endpoint stats...")
|
||||
r5 = await client.get("http://localhost:11235/monitor/endpoints/stats")
|
||||
stats = r5.json()
|
||||
for endpoint, data in stats.items():
|
||||
print(f" {endpoint}: {data['count']} requests, {data['avg_latency_ms']}ms avg")
|
||||
|
||||
print("\n✅ Dashboard should now show activity!")
|
||||
print(f"\n🌐 Open: http://localhost:11235/dashboard")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_dashboard())
|
||||
@@ -6,7 +6,33 @@ from datetime import datetime
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from fastapi import Request
|
||||
from typing import Dict, Optional
|
||||
from typing import Dict, Optional, Any, List
|
||||
|
||||
# Import dispatchers from crawl4ai
|
||||
from crawl4ai.async_dispatcher import (
|
||||
BaseDispatcher,
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
)
|
||||
|
||||
# Import chunking strategies from crawl4ai
|
||||
from crawl4ai.chunking_strategy import (
|
||||
ChunkingStrategy,
|
||||
IdentityChunking,
|
||||
RegexChunking,
|
||||
NlpSentenceChunking,
|
||||
TopicSegmentationChunking,
|
||||
FixedLengthWordChunking,
|
||||
SlidingWindowChunking,
|
||||
OverlappingWindowChunking,
|
||||
)
|
||||
|
||||
# Import dispatchers from crawl4ai
|
||||
from crawl4ai.async_dispatcher import (
|
||||
BaseDispatcher,
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
)
|
||||
|
||||
class TaskStatus(str, Enum):
|
||||
PROCESSING = "processing"
|
||||
@@ -19,6 +45,124 @@ class FilterType(str, Enum):
|
||||
BM25 = "bm25"
|
||||
LLM = "llm"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Dispatcher Configuration and Factory
|
||||
# ============================================================================
|
||||
|
||||
# Default dispatcher configurations (hardcoded, no env variables)
|
||||
DISPATCHER_DEFAULTS = {
|
||||
"memory_adaptive": {
|
||||
"memory_threshold_percent": 70.0,
|
||||
"critical_threshold_percent": 85.0,
|
||||
"recovery_threshold_percent": 65.0,
|
||||
"check_interval": 1.0,
|
||||
"max_session_permit": 20,
|
||||
"fairness_timeout": 600.0,
|
||||
"memory_wait_timeout": None, # Disable memory timeout for testing
|
||||
},
|
||||
"semaphore": {
|
||||
"semaphore_count": 5,
|
||||
"max_session_permit": 10,
|
||||
}
|
||||
}
|
||||
|
||||
DEFAULT_DISPATCHER_TYPE = "memory_adaptive"
|
||||
|
||||
|
||||
def create_dispatcher(dispatcher_type: str) -> BaseDispatcher:
|
||||
"""
|
||||
Factory function to create dispatcher instances.
|
||||
|
||||
Args:
|
||||
dispatcher_type: Type of dispatcher to create ("memory_adaptive" or "semaphore")
|
||||
|
||||
Returns:
|
||||
BaseDispatcher instance
|
||||
|
||||
Raises:
|
||||
ValueError: If dispatcher type is unknown
|
||||
"""
|
||||
dispatcher_type = dispatcher_type.lower()
|
||||
|
||||
if dispatcher_type == "memory_adaptive":
|
||||
config = DISPATCHER_DEFAULTS["memory_adaptive"]
|
||||
return MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=config["memory_threshold_percent"],
|
||||
critical_threshold_percent=config["critical_threshold_percent"],
|
||||
recovery_threshold_percent=config["recovery_threshold_percent"],
|
||||
check_interval=config["check_interval"],
|
||||
max_session_permit=config["max_session_permit"],
|
||||
fairness_timeout=config["fairness_timeout"],
|
||||
memory_wait_timeout=config["memory_wait_timeout"],
|
||||
)
|
||||
elif dispatcher_type == "semaphore":
|
||||
config = DISPATCHER_DEFAULTS["semaphore"]
|
||||
return SemaphoreDispatcher(
|
||||
semaphore_count=config["semaphore_count"],
|
||||
max_session_permit=config["max_session_permit"],
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown dispatcher type: {dispatcher_type}")
|
||||
|
||||
|
||||
def get_dispatcher_config(dispatcher_type: str) -> Dict:
|
||||
"""
|
||||
Get configuration for a dispatcher type.
|
||||
|
||||
Args:
|
||||
dispatcher_type: Type of dispatcher ("memory_adaptive" or "semaphore")
|
||||
|
||||
Returns:
|
||||
Dictionary containing dispatcher configuration
|
||||
|
||||
Raises:
|
||||
ValueError: If dispatcher type is unknown
|
||||
"""
|
||||
dispatcher_type = dispatcher_type.lower()
|
||||
if dispatcher_type not in DISPATCHER_DEFAULTS:
|
||||
raise ValueError(f"Unknown dispatcher type: {dispatcher_type}")
|
||||
return DISPATCHER_DEFAULTS[dispatcher_type].copy()
|
||||
|
||||
|
||||
def get_available_dispatchers() -> Dict[str, Dict]:
|
||||
"""
|
||||
Get information about all available dispatchers.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping dispatcher types to their metadata
|
||||
"""
|
||||
return {
|
||||
"memory_adaptive": {
|
||||
"name": "Memory Adaptive Dispatcher",
|
||||
"description": "Dynamically adjusts concurrency based on system memory usage. "
|
||||
"Monitors memory pressure and adapts crawl sessions accordingly.",
|
||||
"config": DISPATCHER_DEFAULTS["memory_adaptive"],
|
||||
"features": [
|
||||
"Dynamic concurrency adjustment",
|
||||
"Memory pressure monitoring",
|
||||
"Automatic task requeuing under high memory",
|
||||
"Fairness timeout for long-waiting URLs"
|
||||
]
|
||||
},
|
||||
"semaphore": {
|
||||
"name": "Semaphore Dispatcher",
|
||||
"description": "Fixed concurrency limit using semaphore-based control. "
|
||||
"Simple and predictable for controlled crawling.",
|
||||
"config": DISPATCHER_DEFAULTS["semaphore"],
|
||||
"features": [
|
||||
"Fixed concurrency limit",
|
||||
"Simple semaphore-based control",
|
||||
"Predictable resource usage"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# End Dispatcher Configuration
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def load_config() -> Dict:
|
||||
"""Load and return application configuration with environment variable overrides."""
|
||||
config_path = Path(__file__).parent / "config.yml"
|
||||
@@ -180,27 +324,236 @@ def verify_email_domain(email: str) -> bool:
|
||||
except Exception as e:
|
||||
return False
|
||||
|
||||
def get_container_memory_percent() -> float:
|
||||
"""Get actual container memory usage vs limit (cgroup v1/v2 aware)."""
|
||||
|
||||
def create_chunking_strategy(config: Optional[Dict[str, Any]] = None) -> Optional[ChunkingStrategy]:
|
||||
"""
|
||||
Factory function to create chunking strategy instances from configuration.
|
||||
|
||||
Args:
|
||||
config: Dictionary containing 'type' and 'params' keys
|
||||
Example: {"type": "RegexChunking", "params": {"patterns": ["\\n\\n+"]}}
|
||||
|
||||
Returns:
|
||||
ChunkingStrategy instance or None if config is None
|
||||
|
||||
Raises:
|
||||
ValueError: If chunking strategy type is unknown or config is invalid
|
||||
"""
|
||||
if config is None:
|
||||
return None
|
||||
|
||||
if not isinstance(config, dict):
|
||||
raise ValueError(f"Chunking strategy config must be a dictionary, got {type(config)}")
|
||||
|
||||
if "type" not in config:
|
||||
raise ValueError("Chunking strategy config must contain 'type' field")
|
||||
|
||||
strategy_type = config["type"]
|
||||
params = config.get("params", {})
|
||||
|
||||
# Validate params is a dict
|
||||
if not isinstance(params, dict):
|
||||
raise ValueError(f"Chunking strategy params must be a dictionary, got {type(params)}")
|
||||
|
||||
# Strategy factory mapping
|
||||
strategies = {
|
||||
"IdentityChunking": IdentityChunking,
|
||||
"RegexChunking": RegexChunking,
|
||||
"NlpSentenceChunking": NlpSentenceChunking,
|
||||
"TopicSegmentationChunking": TopicSegmentationChunking,
|
||||
"FixedLengthWordChunking": FixedLengthWordChunking,
|
||||
"SlidingWindowChunking": SlidingWindowChunking,
|
||||
"OverlappingWindowChunking": OverlappingWindowChunking,
|
||||
}
|
||||
|
||||
if strategy_type not in strategies:
|
||||
available = ", ".join(strategies.keys())
|
||||
raise ValueError(f"Unknown chunking strategy type: {strategy_type}. Available: {available}")
|
||||
|
||||
try:
|
||||
# Try cgroup v2 first
|
||||
usage_path = Path("/sys/fs/cgroup/memory.current")
|
||||
limit_path = Path("/sys/fs/cgroup/memory.max")
|
||||
if not usage_path.exists():
|
||||
# Fall back to cgroup v1
|
||||
usage_path = Path("/sys/fs/cgroup/memory/memory.usage_in_bytes")
|
||||
limit_path = Path("/sys/fs/cgroup/memory/memory.limit_in_bytes")
|
||||
return strategies[strategy_type](**params)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to create {strategy_type} with params {params}: {str(e)}")
|
||||
|
||||
usage = int(usage_path.read_text())
|
||||
limit = int(limit_path.read_text())
|
||||
|
||||
# Handle unlimited (v2: "max", v1: > 1e18)
|
||||
if limit > 1e18:
|
||||
import psutil
|
||||
limit = psutil.virtual_memory().total
|
||||
# ============================================================================
|
||||
# Table Extraction Utilities
|
||||
# ============================================================================
|
||||
|
||||
return (usage / limit) * 100
|
||||
except:
|
||||
# Non-container or unsupported: fallback to host
|
||||
import psutil
|
||||
return psutil.virtual_memory().percent
|
||||
def create_table_extraction_strategy(config):
|
||||
"""
|
||||
Create a table extraction strategy from configuration.
|
||||
|
||||
Args:
|
||||
config: TableExtractionConfig instance or dict
|
||||
|
||||
Returns:
|
||||
TableExtractionStrategy instance
|
||||
|
||||
Raises:
|
||||
ValueError: If strategy type is unknown or configuration is invalid
|
||||
"""
|
||||
from crawl4ai.table_extraction import (
|
||||
NoTableExtraction,
|
||||
DefaultTableExtraction,
|
||||
LLMTableExtraction
|
||||
)
|
||||
from schemas import TableExtractionStrategy
|
||||
|
||||
# Handle both Pydantic model and dict
|
||||
if hasattr(config, 'strategy'):
|
||||
strategy_type = config.strategy
|
||||
elif isinstance(config, dict):
|
||||
strategy_type = config.get('strategy', 'default')
|
||||
else:
|
||||
strategy_type = 'default'
|
||||
|
||||
# Convert string to enum if needed
|
||||
if isinstance(strategy_type, str):
|
||||
strategy_type = strategy_type.lower()
|
||||
|
||||
# Extract configuration values
|
||||
def get_config_value(key, default=None):
|
||||
if hasattr(config, key):
|
||||
return getattr(config, key)
|
||||
elif isinstance(config, dict):
|
||||
return config.get(key, default)
|
||||
return default
|
||||
|
||||
# Create strategy based on type
|
||||
if strategy_type in ['none', TableExtractionStrategy.NONE]:
|
||||
return NoTableExtraction()
|
||||
|
||||
elif strategy_type in ['default', TableExtractionStrategy.DEFAULT]:
|
||||
return DefaultTableExtraction(
|
||||
table_score_threshold=get_config_value('table_score_threshold', 7),
|
||||
min_rows=get_config_value('min_rows', 0),
|
||||
min_cols=get_config_value('min_cols', 0),
|
||||
verbose=get_config_value('verbose', False)
|
||||
)
|
||||
|
||||
elif strategy_type in ['llm', TableExtractionStrategy.LLM]:
|
||||
from crawl4ai.types import LLMConfig
|
||||
|
||||
# Build LLM config
|
||||
llm_config = None
|
||||
llm_provider = get_config_value('llm_provider')
|
||||
llm_api_key = get_config_value('llm_api_key')
|
||||
llm_model = get_config_value('llm_model')
|
||||
llm_base_url = get_config_value('llm_base_url')
|
||||
|
||||
if llm_provider or llm_api_key:
|
||||
llm_config = LLMConfig(
|
||||
provider=llm_provider or "openai/gpt-4",
|
||||
api_token=llm_api_key,
|
||||
model=llm_model,
|
||||
base_url=llm_base_url
|
||||
)
|
||||
|
||||
return LLMTableExtraction(
|
||||
llm_config=llm_config,
|
||||
extraction_prompt=get_config_value('extraction_prompt'),
|
||||
table_score_threshold=get_config_value('table_score_threshold', 7),
|
||||
min_rows=get_config_value('min_rows', 0),
|
||||
min_cols=get_config_value('min_cols', 0),
|
||||
verbose=get_config_value('verbose', False)
|
||||
)
|
||||
|
||||
elif strategy_type in ['financial', TableExtractionStrategy.FINANCIAL]:
|
||||
# Financial strategy uses DefaultTableExtraction with specialized settings
|
||||
# optimized for financial data (tables with currency, numbers, etc.)
|
||||
return DefaultTableExtraction(
|
||||
table_score_threshold=get_config_value('table_score_threshold', 10), # Higher threshold for financial
|
||||
min_rows=get_config_value('min_rows', 2), # Financial tables usually have at least 2 rows
|
||||
min_cols=get_config_value('min_cols', 2), # Financial tables usually have at least 2 columns
|
||||
verbose=get_config_value('verbose', False)
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unknown table extraction strategy: {strategy_type}")
|
||||
|
||||
|
||||
def format_table_response(tables: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Format extracted tables for API response.
|
||||
|
||||
Args:
|
||||
tables: List of table dictionaries from table extraction strategy
|
||||
|
||||
Returns:
|
||||
List of formatted table dictionaries with consistent structure
|
||||
"""
|
||||
if not tables:
|
||||
return []
|
||||
|
||||
formatted_tables = []
|
||||
for idx, table in enumerate(tables):
|
||||
formatted = {
|
||||
"table_index": idx,
|
||||
"headers": table.get("headers", []),
|
||||
"rows": table.get("rows", []),
|
||||
"caption": table.get("caption"),
|
||||
"summary": table.get("summary"),
|
||||
"metadata": table.get("metadata", {}),
|
||||
"row_count": len(table.get("rows", [])),
|
||||
"col_count": len(table.get("headers", [])),
|
||||
}
|
||||
|
||||
# Add score if available (from scoring strategies)
|
||||
if "score" in table:
|
||||
formatted["score"] = table["score"]
|
||||
|
||||
# Add position information if available
|
||||
if "position" in table:
|
||||
formatted["position"] = table["position"]
|
||||
|
||||
formatted_tables.append(formatted)
|
||||
|
||||
return formatted_tables
|
||||
|
||||
|
||||
async def extract_tables_from_html(html: str, config = None):
|
||||
"""
|
||||
Extract tables from HTML content (async wrapper for CPU-bound operation).
|
||||
|
||||
Args:
|
||||
html: HTML content as string
|
||||
config: TableExtractionConfig instance or dict
|
||||
|
||||
Returns:
|
||||
List of formatted table dictionaries
|
||||
|
||||
Raises:
|
||||
ValueError: If HTML parsing fails
|
||||
"""
|
||||
import asyncio
|
||||
from functools import partial
|
||||
from lxml import html as lxml_html
|
||||
from schemas import TableExtractionConfig
|
||||
|
||||
# Define sync extraction function
|
||||
def _sync_extract():
|
||||
try:
|
||||
# Parse HTML
|
||||
element = lxml_html.fromstring(html)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to parse HTML: {str(e)}")
|
||||
|
||||
# Create strategy
|
||||
cfg = config if config is not None else TableExtractionConfig()
|
||||
strategy = create_table_extraction_strategy(cfg)
|
||||
|
||||
# Extract tables
|
||||
tables = strategy.extract_tables(element)
|
||||
|
||||
# Format response
|
||||
return format_table_response(tables)
|
||||
|
||||
# Run in executor to avoid blocking the event loop
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(None, _sync_extract)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# End Table Extraction Utilities
|
||||
# ============================================================================
|
||||
@@ -1,159 +0,0 @@
|
||||
"""
|
||||
Webhook delivery service for Crawl4AI.
|
||||
|
||||
This module provides webhook notification functionality with exponential backoff retry logic.
|
||||
"""
|
||||
import asyncio
|
||||
import httpx
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
from datetime import datetime, timezone
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WebhookDeliveryService:
|
||||
"""Handles webhook delivery with exponential backoff retry logic."""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initialize the webhook delivery service.
|
||||
|
||||
Args:
|
||||
config: Application configuration dictionary containing webhook settings
|
||||
"""
|
||||
self.config = config.get("webhooks", {})
|
||||
self.max_attempts = self.config.get("retry", {}).get("max_attempts", 5)
|
||||
self.initial_delay = self.config.get("retry", {}).get("initial_delay_ms", 1000) / 1000
|
||||
self.max_delay = self.config.get("retry", {}).get("max_delay_ms", 32000) / 1000
|
||||
self.timeout = self.config.get("retry", {}).get("timeout_ms", 30000) / 1000
|
||||
|
||||
async def send_webhook(
|
||||
self,
|
||||
webhook_url: str,
|
||||
payload: Dict,
|
||||
headers: Optional[Dict[str, str]] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Send webhook with exponential backoff retry logic.
|
||||
|
||||
Args:
|
||||
webhook_url: The URL to send the webhook to
|
||||
payload: The JSON payload to send
|
||||
headers: Optional custom headers
|
||||
|
||||
Returns:
|
||||
bool: True if delivered successfully, False otherwise
|
||||
"""
|
||||
default_headers = self.config.get("headers", {})
|
||||
merged_headers = {**default_headers, **(headers or {})}
|
||||
merged_headers["Content-Type"] = "application/json"
|
||||
|
||||
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
||||
for attempt in range(self.max_attempts):
|
||||
try:
|
||||
logger.info(
|
||||
f"Sending webhook (attempt {attempt + 1}/{self.max_attempts}) to {webhook_url}"
|
||||
)
|
||||
|
||||
response = await client.post(
|
||||
webhook_url,
|
||||
json=payload,
|
||||
headers=merged_headers
|
||||
)
|
||||
|
||||
# Success or client error (don't retry client errors)
|
||||
if response.status_code < 500:
|
||||
if 200 <= response.status_code < 300:
|
||||
logger.info(f"Webhook delivered successfully to {webhook_url}")
|
||||
return True
|
||||
else:
|
||||
logger.warning(
|
||||
f"Webhook rejected with status {response.status_code}: {response.text[:200]}"
|
||||
)
|
||||
return False # Client error - don't retry
|
||||
|
||||
# Server error - retry with backoff
|
||||
logger.warning(
|
||||
f"Webhook failed with status {response.status_code}, will retry"
|
||||
)
|
||||
|
||||
except httpx.TimeoutException as exc:
|
||||
logger.error(f"Webhook timeout (attempt {attempt + 1}): {exc}")
|
||||
except httpx.RequestError as exc:
|
||||
logger.error(f"Webhook request error (attempt {attempt + 1}): {exc}")
|
||||
except Exception as exc:
|
||||
logger.error(f"Webhook delivery error (attempt {attempt + 1}): {exc}")
|
||||
|
||||
# Calculate exponential backoff delay
|
||||
if attempt < self.max_attempts - 1:
|
||||
delay = min(self.initial_delay * (2 ** attempt), self.max_delay)
|
||||
logger.info(f"Retrying in {delay}s...")
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error(
|
||||
f"Webhook delivery failed after {self.max_attempts} attempts to {webhook_url}"
|
||||
)
|
||||
return False
|
||||
|
||||
async def notify_job_completion(
|
||||
self,
|
||||
task_id: str,
|
||||
task_type: str,
|
||||
status: str,
|
||||
urls: list,
|
||||
webhook_config: Optional[Dict],
|
||||
result: Optional[Dict] = None,
|
||||
error: Optional[str] = None
|
||||
):
|
||||
"""
|
||||
Notify webhook of job completion.
|
||||
|
||||
Args:
|
||||
task_id: The task identifier
|
||||
task_type: Type of task (e.g., "crawl", "llm_extraction")
|
||||
status: Task status ("completed" or "failed")
|
||||
urls: List of URLs that were crawled
|
||||
webhook_config: Webhook configuration from the job request
|
||||
result: Optional crawl result data
|
||||
error: Optional error message if failed
|
||||
"""
|
||||
# Determine webhook URL
|
||||
webhook_url = None
|
||||
data_in_payload = self.config.get("data_in_payload", False)
|
||||
custom_headers = None
|
||||
|
||||
if webhook_config:
|
||||
webhook_url = webhook_config.get("webhook_url")
|
||||
data_in_payload = webhook_config.get("webhook_data_in_payload", data_in_payload)
|
||||
custom_headers = webhook_config.get("webhook_headers")
|
||||
|
||||
if not webhook_url:
|
||||
webhook_url = self.config.get("default_url")
|
||||
|
||||
if not webhook_url:
|
||||
logger.debug("No webhook URL configured, skipping notification")
|
||||
return
|
||||
|
||||
# Check if webhooks are enabled
|
||||
if not self.config.get("enabled", True):
|
||||
logger.debug("Webhooks are disabled, skipping notification")
|
||||
return
|
||||
|
||||
# Build payload
|
||||
payload = {
|
||||
"task_id": task_id,
|
||||
"task_type": task_type,
|
||||
"status": status,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"urls": urls
|
||||
}
|
||||
|
||||
if error:
|
||||
payload["error"] = error
|
||||
|
||||
if data_in_payload and result:
|
||||
payload["data"] = result
|
||||
|
||||
# Send webhook (fire and forget - don't block on completion)
|
||||
await self.send_webhook(webhook_url, payload, custom_headers)
|
||||
@@ -6,16 +6,15 @@ x-base-config: &base-config
|
||||
- "11235:11235" # Gunicorn port
|
||||
env_file:
|
||||
- .llm.env # API keys (create from .llm.env.example)
|
||||
# Uncomment to set default environment variables (will overwrite .llm.env)
|
||||
# environment:
|
||||
# - OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
# - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
# - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
# - GROQ_API_KEY=${GROQ_API_KEY:-}
|
||||
# - TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
# - MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
# - GEMINI_API_KEY=${GEMINI_API_KEY:-}
|
||||
# - LLM_PROVIDER=${LLM_PROVIDER:-} # Optional: Override default provider (e.g., "anthropic/claude-3-opus")
|
||||
environment:
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
||||
- TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
- GEMINI_API_TOKEN=${GEMINI_API_TOKEN:-}
|
||||
- LLM_PROVIDER=${LLM_PROVIDER:-} # Optional: Override default provider (e.g., "anthropic/claude-3-opus")
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm # Chromium performance
|
||||
deploy:
|
||||
|
||||
431
docs/PROXY_ROTATION_STRATEGY_DOCS.md
Normal file
431
docs/PROXY_ROTATION_STRATEGY_DOCS.md
Normal file
@@ -0,0 +1,431 @@
|
||||
# Proxy Rotation Strategy Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The Crawl4AI FastAPI server now includes comprehensive proxy rotation functionality that allows you to distribute requests across multiple proxy servers using different rotation strategies. This feature helps prevent IP blocking, distributes load across proxy infrastructure, and provides redundancy for high-availability crawling operations.
|
||||
|
||||
## Available Proxy Rotation Strategies
|
||||
|
||||
| Strategy | Description | Use Case | Performance |
|
||||
|----------|-------------|----------|-------------|
|
||||
| `round_robin` | Cycles through proxies sequentially | Even distribution, predictable pattern | ⭐⭐⭐⭐⭐ |
|
||||
| `random` | Randomly selects from available proxies | Unpredictable traffic pattern | ⭐⭐⭐⭐ |
|
||||
| `least_used` | Uses proxy with lowest usage count | Optimal load balancing | ⭐⭐⭐ |
|
||||
| `failure_aware` | Avoids failed proxies with auto-recovery | High availability, fault tolerance | ⭐⭐⭐⭐ |
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### POST /crawl
|
||||
|
||||
Standard crawling endpoint with proxy rotation support.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"urls": ["https://example.com"],
|
||||
"proxy_rotation_strategy": "round_robin",
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"}
|
||||
],
|
||||
"browser_config": {},
|
||||
"crawler_config": {}
|
||||
}
|
||||
```
|
||||
|
||||
### POST /crawl/stream
|
||||
|
||||
Streaming crawling endpoint with proxy rotation support.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"urls": ["https://example.com"],
|
||||
"proxy_rotation_strategy": "failure_aware",
|
||||
"proxy_failure_threshold": 3,
|
||||
"proxy_recovery_time": 300,
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"}
|
||||
],
|
||||
"browser_config": {},
|
||||
"crawler_config": {
|
||||
"stream": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### proxy_rotation_strategy (optional)
|
||||
- **Type:** `string`
|
||||
- **Default:** `null` (no proxy rotation)
|
||||
- **Options:** `"round_robin"`, `"random"`, `"least_used"`, `"failure_aware"`
|
||||
- **Description:** Selects the proxy rotation strategy for distributing requests
|
||||
|
||||
### proxies (optional)
|
||||
- **Type:** `array of objects`
|
||||
- **Default:** `null`
|
||||
- **Description:** List of proxy configurations to rotate between
|
||||
- **Required when:** `proxy_rotation_strategy` is specified
|
||||
|
||||
### proxy_failure_threshold (optional)
|
||||
- **Type:** `integer`
|
||||
- **Default:** `3`
|
||||
- **Range:** `1-10`
|
||||
- **Description:** Number of failures before marking a proxy as unhealthy (failure_aware only)
|
||||
|
||||
### proxy_recovery_time (optional)
|
||||
- **Type:** `integer`
|
||||
- **Default:** `300` (5 minutes)
|
||||
- **Range:** `60-3600` seconds
|
||||
- **Description:** Time to wait before attempting to use a failed proxy again (failure_aware only)
|
||||
|
||||
## Proxy Configuration Format
|
||||
|
||||
### Full Configuration
|
||||
```json
|
||||
{
|
||||
"server": "http://proxy.example.com:8080",
|
||||
"username": "proxy_user",
|
||||
"password": "proxy_pass",
|
||||
"ip": "192.168.1.100"
|
||||
}
|
||||
```
|
||||
|
||||
### Minimal Configuration
|
||||
```json
|
||||
{
|
||||
"server": "http://192.168.1.100:8080"
|
||||
}
|
||||
```
|
||||
|
||||
### SOCKS Proxy Support
|
||||
```json
|
||||
{
|
||||
"server": "socks5://127.0.0.1:1080",
|
||||
"username": "socks_user",
|
||||
"password": "socks_pass"
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### 1. Round Robin Strategy
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/crawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://httpbin.org/ip"],
|
||||
"proxy_rotation_strategy": "round_robin",
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"},
|
||||
{"server": "http://proxy3.com:8080", "username": "user3", "password": "pass3"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
### 2. Random Strategy with Minimal Config
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/crawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://httpbin.org/headers"],
|
||||
"proxy_rotation_strategy": "random",
|
||||
"proxies": [
|
||||
{"server": "http://192.168.1.100:8080"},
|
||||
{"server": "http://192.168.1.101:8080"},
|
||||
{"server": "http://192.168.1.102:8080"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
### 3. Least Used Strategy with Load Balancing
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/crawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com", "https://httpbin.org/html", "https://httpbin.org/json"],
|
||||
"proxy_rotation_strategy": "least_used",
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"}
|
||||
],
|
||||
"crawler_config": {
|
||||
"cache_mode": "bypass"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### 4. Failure-Aware Strategy with High Availability
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/crawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"proxy_rotation_strategy": "failure_aware",
|
||||
"proxy_failure_threshold": 2,
|
||||
"proxy_recovery_time": 180,
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"},
|
||||
{"server": "http://proxy3.com:8080", "username": "user3", "password": "pass3"}
|
||||
],
|
||||
"headless": true
|
||||
}'
|
||||
```
|
||||
|
||||
### 5. Streaming with Proxy Rotation
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/crawl/stream" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com", "https://httpbin.org/html"],
|
||||
"proxy_rotation_strategy": "round_robin",
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"}
|
||||
],
|
||||
"crawler_config": {
|
||||
"stream": true,
|
||||
"cache_mode": "bypass"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Combining with Anti-Bot Strategies
|
||||
|
||||
You can combine proxy rotation with anti-bot strategies for maximum effectiveness:
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/crawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://protected-site.com"],
|
||||
"anti_bot_strategy": "stealth",
|
||||
"proxy_rotation_strategy": "failure_aware",
|
||||
"proxy_failure_threshold": 2,
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"}
|
||||
],
|
||||
"headless": true,
|
||||
"browser_config": {
|
||||
"enable_stealth": true
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Strategy Details
|
||||
|
||||
### Round Robin Strategy
|
||||
- **Algorithm:** Sequential cycling through proxy list
|
||||
- **Pros:** Predictable, even distribution, simple
|
||||
- **Cons:** Predictable pattern may be detectable
|
||||
- **Best for:** General use, development, testing
|
||||
|
||||
### Random Strategy
|
||||
- **Algorithm:** Random selection from available proxies
|
||||
- **Pros:** Unpredictable pattern, good for evasion
|
||||
- **Cons:** Uneven distribution possible
|
||||
- **Best for:** Anti-detection, varying traffic patterns
|
||||
|
||||
### Least Used Strategy
|
||||
- **Algorithm:** Selects proxy with minimum usage count
|
||||
- **Pros:** Optimal load balancing, prevents overloading
|
||||
- **Cons:** Slightly more complex, tracking overhead
|
||||
- **Best for:** High-volume crawling, load balancing
|
||||
|
||||
### Failure-Aware Strategy
|
||||
- **Algorithm:** Tracks proxy health, auto-recovery
|
||||
- **Pros:** High availability, fault tolerance, automatic recovery
|
||||
- **Cons:** Most complex, memory overhead for tracking
|
||||
- **Best for:** Production environments, critical crawling
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Errors
|
||||
|
||||
#### Invalid Proxy Configuration
|
||||
```json
|
||||
{
|
||||
"error": "Invalid proxy configuration: Proxy configuration missing 'server' field: {'username': 'user1'}"
|
||||
}
|
||||
```
|
||||
|
||||
#### Unsupported Strategy
|
||||
```json
|
||||
{
|
||||
"error": "Unsupported proxy rotation strategy: invalid_strategy. Available: round_robin, random, least_used, failure_aware"
|
||||
}
|
||||
```
|
||||
|
||||
#### Missing Proxies
|
||||
When `proxy_rotation_strategy` is specified but `proxies` is empty:
|
||||
```json
|
||||
{
|
||||
"error": "proxy_rotation_strategy specified but no proxies provided"
|
||||
}
|
||||
```
|
||||
|
||||
## Environment Variable Support
|
||||
|
||||
You can also configure proxies using environment variables:
|
||||
|
||||
```bash
|
||||
# Set proxy list (comma-separated)
|
||||
export PROXIES="proxy1.com:8080:user1:pass1,proxy2.com:8080:user2:pass2"
|
||||
|
||||
# Set default strategy
|
||||
export PROXY_ROTATION_STRATEGY="round_robin"
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
1. **Strategy Overhead:**
|
||||
- Round Robin: Minimal overhead
|
||||
- Random: Low overhead
|
||||
- Least Used: Medium overhead (usage tracking)
|
||||
- Failure Aware: High overhead (health tracking)
|
||||
|
||||
2. **Memory Usage:**
|
||||
- Round Robin: ~O(n) where n = number of proxies
|
||||
- Random: ~O(n)
|
||||
- Least Used: ~O(n) + usage counters
|
||||
- Failure Aware: ~O(n) + health tracking data
|
||||
|
||||
3. **Concurrent Safety:**
|
||||
- All strategies are async-safe with proper locking
|
||||
- No race conditions in proxy selection
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Production Deployment:**
|
||||
- Use `failure_aware` strategy for high availability
|
||||
- Set appropriate failure thresholds (2-3)
|
||||
- Use recovery times between 3-10 minutes
|
||||
|
||||
2. **Development/Testing:**
|
||||
- Use `round_robin` for predictable behavior
|
||||
- Start with small proxy pools (2-3 proxies)
|
||||
|
||||
3. **Anti-Detection:**
|
||||
- Combine with `stealth` or `undetected` anti-bot strategies
|
||||
- Use `random` strategy for unpredictable patterns
|
||||
- Vary proxy geographic locations
|
||||
|
||||
4. **Load Balancing:**
|
||||
- Use `least_used` for even distribution
|
||||
- Monitor proxy performance and adjust pools accordingly
|
||||
|
||||
5. **Error Monitoring:**
|
||||
- Monitor failure rates with `failure_aware` strategy
|
||||
- Set up alerts for proxy pool depletion
|
||||
- Implement fallback mechanisms
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### Python Requests
|
||||
```python
|
||||
import requests
|
||||
|
||||
payload = {
|
||||
"urls": ["https://example.com"],
|
||||
"proxy_rotation_strategy": "round_robin",
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"}
|
||||
]
|
||||
}
|
||||
|
||||
response = requests.post("http://localhost:11235/crawl", json=payload)
|
||||
print(response.json())
|
||||
```
|
||||
|
||||
### JavaScript/Node.js
|
||||
```javascript
|
||||
const axios = require('axios');
|
||||
|
||||
const payload = {
|
||||
urls: ["https://example.com"],
|
||||
proxy_rotation_strategy: "failure_aware",
|
||||
proxy_failure_threshold: 2,
|
||||
proxies: [
|
||||
{server: "http://proxy1.com:8080", username: "user1", password: "pass1"},
|
||||
{server: "http://proxy2.com:8080", username: "user2", password: "pass2"}
|
||||
]
|
||||
};
|
||||
|
||||
axios.post('http://localhost:11235/crawl', payload)
|
||||
.then(response => console.log(response.data))
|
||||
.catch(error => console.error(error));
|
||||
```
|
||||
|
||||
### cURL with Multiple URLs
|
||||
```bash
|
||||
curl -X POST "http://localhost:11235/crawl" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": [
|
||||
"https://example.com",
|
||||
"https://httpbin.org/html",
|
||||
"https://httpbin.org/json",
|
||||
"https://httpbin.org/xml"
|
||||
],
|
||||
"proxy_rotation_strategy": "least_used",
|
||||
"proxies": [
|
||||
{"server": "http://proxy1.com:8080", "username": "user1", "password": "pass1"},
|
||||
{"server": "http://proxy2.com:8080", "username": "user2", "password": "pass2"},
|
||||
{"server": "http://proxy3.com:8080", "username": "user3", "password": "pass3"}
|
||||
],
|
||||
"crawler_config": {
|
||||
"cache_mode": "bypass",
|
||||
"wait_for_images": false
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **All proxies failing:**
|
||||
- Check proxy connectivity
|
||||
- Verify authentication credentials
|
||||
- Ensure proxy servers support the target protocols
|
||||
|
||||
2. **Uneven distribution:**
|
||||
- Use `least_used` strategy for better balancing
|
||||
- Monitor proxy usage patterns
|
||||
|
||||
3. **High memory usage:**
|
||||
- Reduce proxy pool size
|
||||
- Consider using `round_robin` instead of `failure_aware`
|
||||
|
||||
4. **Slow performance:**
|
||||
- Check proxy response times
|
||||
- Use geographically closer proxies
|
||||
- Reduce failure thresholds
|
||||
|
||||
### Debug Information
|
||||
|
||||
Enable verbose logging to see proxy selection details:
|
||||
|
||||
```json
|
||||
{
|
||||
"urls": ["https://example.com"],
|
||||
"proxy_rotation_strategy": "failure_aware",
|
||||
"proxies": [...],
|
||||
"crawler_config": {
|
||||
"verbose": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will log which proxy is selected for each request and any failure/recovery events.
|
||||
@@ -10,6 +10,7 @@ Today I'm releasing Crawl4AI v0.7.4—the Intelligent Table Extraction & Perform
|
||||
|
||||
- **🚀 LLMTableExtraction**: Revolutionary table extraction with intelligent chunking for massive tables
|
||||
- **⚡ Enhanced Concurrency**: True concurrency improvements for fast-completing tasks in batch operations
|
||||
- **🧹 Memory Management Refactor**: Streamlined memory utilities and better resource management
|
||||
- **🔧 Browser Manager Fixes**: Resolved race conditions in concurrent page creation
|
||||
- **⌨️ Cross-Platform Browser Profiler**: Improved keyboard handling and quit mechanisms
|
||||
- **🔗 Advanced URL Processing**: Better handling of raw URLs and base tag link resolution
|
||||
@@ -157,6 +158,40 @@ async with AsyncWebCrawler() as crawler:
|
||||
- **Monitoring Systems**: Faster health checks and status page monitoring
|
||||
- **Data Aggregation**: Improved performance for real-time data collection
|
||||
|
||||
## 🧹 Memory Management Refactor: Cleaner Architecture
|
||||
|
||||
**The Problem:** Memory utilities were scattered and difficult to maintain, with potential import conflicts and unclear organization.
|
||||
|
||||
**My Solution:** I consolidated all memory-related utilities into the main `utils.py` module, creating a cleaner, more maintainable architecture.
|
||||
|
||||
### Improved Memory Handling
|
||||
|
||||
```python
|
||||
# All memory utilities now consolidated
|
||||
from crawl4ai.utils import get_true_memory_usage_percent, MemoryMonitor
|
||||
|
||||
# Enhanced memory monitoring
|
||||
monitor = MemoryMonitor()
|
||||
monitor.start_monitoring()
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Memory-efficient batch processing
|
||||
results = await crawler.arun_many(large_url_list)
|
||||
|
||||
# Get accurate memory metrics
|
||||
memory_usage = get_true_memory_usage_percent()
|
||||
memory_report = monitor.get_report()
|
||||
|
||||
print(f"Memory efficiency: {memory_report['efficiency']:.1f}%")
|
||||
print(f"Peak usage: {memory_report['peak_mb']:.1f} MB")
|
||||
```
|
||||
|
||||
**Expected Real-World Impact:**
|
||||
- **Production Stability**: More reliable memory tracking and management
|
||||
- **Code Maintainability**: Cleaner architecture for easier debugging
|
||||
- **Import Clarity**: Resolved potential conflicts and import issues
|
||||
- **Developer Experience**: Simpler API for memory monitoring
|
||||
|
||||
## 🔧 Critical Stability Fixes
|
||||
|
||||
### Browser Manager Race Condition Resolution
|
||||
|
||||
@@ -1,318 +0,0 @@
|
||||
# 🚀 Crawl4AI v0.7.5: The Docker Hooks & Security Update
|
||||
|
||||
*September 29, 2025 • 8 min read*
|
||||
|
||||
---
|
||||
|
||||
Today I'm releasing Crawl4AI v0.7.5—focused on extensibility and security. This update introduces the Docker Hooks System for pipeline customization, enhanced LLM integration, and important security improvements.
|
||||
|
||||
## 🎯 What's New at a Glance
|
||||
|
||||
- **Docker Hooks System**: Custom Python functions at key pipeline points with function-based API
|
||||
- **Function-Based Hooks**: New `hooks_to_string()` utility with Docker client auto-conversion
|
||||
- **Enhanced LLM Integration**: Custom providers with temperature control
|
||||
- **HTTPS Preservation**: Secure internal link handling
|
||||
- **Bug Fixes**: Resolved multiple community-reported issues
|
||||
- **Improved Docker Error Handling**: Better debugging and reliability
|
||||
|
||||
## 🔧 Docker Hooks System: Pipeline Customization
|
||||
|
||||
Every scraping project needs custom logic—authentication, performance optimization, content processing. Traditional solutions require forking or complex workarounds. Docker Hooks let you inject custom Python functions at 8 key points in the crawling pipeline.
|
||||
|
||||
### Real Example: Authentication & Performance
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Real working hooks for httpbin.org
|
||||
hooks_config = {
|
||||
"on_page_context_created": """
|
||||
async def hook(page, context, **kwargs):
|
||||
print("Hook: Setting up page context")
|
||||
# Block images to speed up crawling
|
||||
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
|
||||
print("Hook: Images blocked")
|
||||
return page
|
||||
""",
|
||||
|
||||
"before_retrieve_html": """
|
||||
async def hook(page, context, **kwargs):
|
||||
print("Hook: Before retrieving HTML")
|
||||
# Scroll to bottom to load lazy content
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
||||
await page.wait_for_timeout(1000)
|
||||
print("Hook: Scrolled to bottom")
|
||||
return page
|
||||
""",
|
||||
|
||||
"before_goto": """
|
||||
async def hook(page, context, url, **kwargs):
|
||||
print(f"Hook: About to navigate to {url}")
|
||||
# Add custom headers
|
||||
await page.set_extra_http_headers({
|
||||
'X-Test-Header': 'crawl4ai-hooks-test'
|
||||
})
|
||||
return page
|
||||
"""
|
||||
}
|
||||
|
||||
# Test with Docker API
|
||||
payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"hooks": {
|
||||
"code": hooks_config,
|
||||
"timeout": 30
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post("http://localhost:11235/crawl", json=payload)
|
||||
result = response.json()
|
||||
|
||||
if result.get('success'):
|
||||
print("✅ Hooks executed successfully!")
|
||||
print(f"Content length: {len(result.get('markdown', ''))} characters")
|
||||
```
|
||||
|
||||
**Available Hook Points:**
|
||||
- `on_browser_created`: Browser setup
|
||||
- `on_page_context_created`: Page context configuration
|
||||
- `before_goto`: Pre-navigation setup
|
||||
- `after_goto`: Post-navigation processing
|
||||
- `on_user_agent_updated`: User agent changes
|
||||
- `on_execution_started`: Crawl initialization
|
||||
- `before_retrieve_html`: Pre-extraction processing
|
||||
- `before_return_html`: Final HTML processing
|
||||
|
||||
### Function-Based Hooks API
|
||||
|
||||
Writing hooks as strings works, but lacks IDE support and type checking. v0.7.5 introduces a function-based approach with automatic conversion!
|
||||
|
||||
**Option 1: Using the `hooks_to_string()` Utility**
|
||||
|
||||
```python
|
||||
from crawl4ai import hooks_to_string
|
||||
import requests
|
||||
|
||||
# Define hooks as regular Python functions (with full IDE support!)
|
||||
async def on_page_context_created(page, context, **kwargs):
|
||||
"""Block images to speed up crawling"""
|
||||
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
|
||||
await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
return page
|
||||
|
||||
async def before_goto(page, context, url, **kwargs):
|
||||
"""Add custom headers"""
|
||||
await page.set_extra_http_headers({
|
||||
'X-Crawl4AI': 'v0.7.5',
|
||||
'X-Custom-Header': 'my-value'
|
||||
})
|
||||
return page
|
||||
|
||||
# Convert functions to strings
|
||||
hooks_code = hooks_to_string({
|
||||
"on_page_context_created": on_page_context_created,
|
||||
"before_goto": before_goto
|
||||
})
|
||||
|
||||
# Use with REST API
|
||||
payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"hooks": {"code": hooks_code, "timeout": 30}
|
||||
}
|
||||
response = requests.post("http://localhost:11235/crawl", json=payload)
|
||||
```
|
||||
|
||||
**Option 2: Docker Client with Automatic Conversion (Recommended!)**
|
||||
|
||||
```python
|
||||
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||
|
||||
# Define hooks as functions (same as above)
|
||||
async def on_page_context_created(page, context, **kwargs):
|
||||
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
|
||||
return page
|
||||
|
||||
async def before_retrieve_html(page, context, **kwargs):
|
||||
# Scroll to load lazy content
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
||||
await page.wait_for_timeout(1000)
|
||||
return page
|
||||
|
||||
# Use Docker client - conversion happens automatically!
|
||||
client = Crawl4aiDockerClient(base_url="http://localhost:11235")
|
||||
|
||||
results = await client.crawl(
|
||||
urls=["https://httpbin.org/html"],
|
||||
hooks={
|
||||
"on_page_context_created": on_page_context_created,
|
||||
"before_retrieve_html": before_retrieve_html
|
||||
},
|
||||
hooks_timeout=30
|
||||
)
|
||||
|
||||
if results and results.success:
|
||||
print(f"✅ Hooks executed! HTML length: {len(results.html)}")
|
||||
```
|
||||
|
||||
**Benefits of Function-Based Hooks:**
|
||||
- ✅ Full IDE support (autocomplete, syntax highlighting)
|
||||
- ✅ Type checking and linting
|
||||
- ✅ Easier to test and debug
|
||||
- ✅ Reusable across projects
|
||||
- ✅ Automatic conversion in Docker client
|
||||
- ✅ No breaking changes - string hooks still work!
|
||||
|
||||
## 🤖 Enhanced LLM Integration
|
||||
|
||||
Enhanced LLM integration with custom providers, temperature control, and base URL configuration.
|
||||
|
||||
### Multi-Provider Support
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
|
||||
# Test with different providers
|
||||
async def test_llm_providers():
|
||||
# OpenAI with custom temperature
|
||||
openai_strategy = LLMExtractionStrategy(
|
||||
provider="gemini/gemini-2.5-flash-lite",
|
||||
api_token="your-api-token",
|
||||
temperature=0.7, # New in v0.7.5
|
||||
instruction="Summarize this page in one sentence"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
"https://example.com",
|
||||
config=CrawlerRunConfig(extraction_strategy=openai_strategy)
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print("✅ LLM extraction completed")
|
||||
print(result.extracted_content)
|
||||
|
||||
# Docker API with enhanced LLM config
|
||||
llm_payload = {
|
||||
"url": "https://example.com",
|
||||
"f": "llm",
|
||||
"q": "Summarize this page in one sentence.",
|
||||
"provider": "gemini/gemini-2.5-flash-lite",
|
||||
"temperature": 0.7
|
||||
}
|
||||
|
||||
response = requests.post("http://localhost:11235/md", json=llm_payload)
|
||||
```
|
||||
|
||||
**New Features:**
|
||||
- Custom `temperature` parameter for creativity control
|
||||
- `base_url` for custom API endpoints
|
||||
- Multi-provider environment variable support
|
||||
- Docker API integration
|
||||
|
||||
## 🔒 HTTPS Preservation
|
||||
|
||||
**The Problem:** Modern web apps require HTTPS everywhere. When crawlers downgrade internal links from HTTPS to HTTP, authentication breaks and security warnings appear.
|
||||
|
||||
**Solution:** HTTPS preservation maintains secure protocols throughout crawling.
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, FilterChain, URLPatternFilter, BFSDeepCrawlStrategy
|
||||
|
||||
async def test_https_preservation():
|
||||
# Enable HTTPS preservation
|
||||
url_filter = URLPatternFilter(
|
||||
patterns=["^(https:\/\/)?quotes\.toscrape\.com(\/.*)?$"]
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
exclude_external_links=True,
|
||||
preserve_https_for_internal_links=True, # New in v0.7.5
|
||||
deep_crawl_strategy=BFSDeepCrawlStrategy(
|
||||
max_depth=2,
|
||||
max_pages=5,
|
||||
filter_chain=FilterChain([url_filter])
|
||||
)
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
async for result in await crawler.arun(
|
||||
url="https://quotes.toscrape.com",
|
||||
config=config
|
||||
):
|
||||
# All internal links maintain HTTPS
|
||||
internal_links = [link['href'] for link in result.links['internal']]
|
||||
https_links = [link for link in internal_links if link.startswith('https://')]
|
||||
|
||||
print(f"HTTPS links preserved: {len(https_links)}/{len(internal_links)}")
|
||||
for link in https_links[:3]:
|
||||
print(f" → {link}")
|
||||
```
|
||||
|
||||
## 🛠️ Bug Fixes and Improvements
|
||||
|
||||
### Major Fixes
|
||||
- **URL Processing**: Fixed '+' sign preservation in query parameters (#1332)
|
||||
- **Proxy Configuration**: Enhanced proxy string parsing (old `proxy` parameter deprecated)
|
||||
- **Docker Error Handling**: Comprehensive error messages with status codes
|
||||
- **Memory Management**: Fixed leaks in long-running sessions
|
||||
- **JWT Authentication**: Fixed Docker JWT validation issues (#1442)
|
||||
- **Playwright Stealth**: Fixed stealth features for Playwright integration (#1481)
|
||||
- **API Configuration**: Fixed config handling to prevent overriding user-provided settings (#1505)
|
||||
- **Docker Filter Serialization**: Resolved JSON encoding errors in deep crawl strategy (#1419)
|
||||
- **LLM Provider Support**: Fixed custom LLM provider integration for adaptive crawler (#1291)
|
||||
- **Performance Issues**: Resolved backoff strategy failures and timeout handling (#989)
|
||||
|
||||
### Community-Reported Issues Fixed
|
||||
This release addresses multiple issues reported by the community through GitHub issues and Discord discussions:
|
||||
- Fixed browser configuration reference errors
|
||||
- Resolved dependency conflicts with cssselect
|
||||
- Improved error messaging for failed authentications
|
||||
- Enhanced compatibility with various proxy configurations
|
||||
- Fixed edge cases in URL normalization
|
||||
|
||||
### Configuration Updates
|
||||
```python
|
||||
# Old proxy config (deprecated)
|
||||
# browser_config = BrowserConfig(proxy="http://proxy:8080")
|
||||
|
||||
# New enhanced proxy config
|
||||
browser_config = BrowserConfig(
|
||||
proxy_config={
|
||||
"server": "http://proxy:8080",
|
||||
"username": "optional-user",
|
||||
"password": "optional-pass"
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## 🔄 Breaking Changes
|
||||
|
||||
1. **Python 3.10+ Required**: Upgrade from Python 3.9
|
||||
2. **Proxy Parameter Deprecated**: Use new `proxy_config` structure
|
||||
3. **New Dependency**: Added `cssselect` for better CSS handling
|
||||
|
||||
## 🚀 Get Started
|
||||
|
||||
```bash
|
||||
# Install latest version
|
||||
pip install crawl4ai==0.7.5
|
||||
|
||||
# Docker deployment
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
**Try the Demo:**
|
||||
```bash
|
||||
# Run working examples
|
||||
python docs/releases_review/demo_v0.7.5.py
|
||||
```
|
||||
|
||||
**Resources:**
|
||||
- 📖 Documentation: [docs.crawl4ai.com](https://docs.crawl4ai.com)
|
||||
- 🐙 GitHub: [github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)
|
||||
- 💬 Discord: [discord.gg/crawl4ai](https://discord.gg/jP8KfhDhyN)
|
||||
- 🐦 Twitter: [@unclecode](https://x.com/unclecode)
|
||||
|
||||
Happy crawling! 🕷️
|
||||
@@ -1,314 +0,0 @@
|
||||
# Crawl4AI v0.7.6 Release Notes
|
||||
|
||||
*Release Date: October 22, 2025*
|
||||
|
||||
I'm excited to announce Crawl4AI v0.7.6, featuring a complete webhook infrastructure for the Docker job queue API! This release eliminates polling and brings real-time notifications to both crawling and LLM extraction workflows.
|
||||
|
||||
## 🎯 What's New
|
||||
|
||||
### Webhook Support for Docker Job Queue API
|
||||
|
||||
The headline feature of v0.7.6 is comprehensive webhook support for asynchronous job processing. No more constant polling to check if your jobs are done - get instant notifications when they complete!
|
||||
|
||||
**Key Capabilities:**
|
||||
|
||||
- ✅ **Universal Webhook Support**: Both `/crawl/job` and `/llm/job` endpoints now support webhooks
|
||||
- ✅ **Flexible Delivery Modes**: Choose notification-only or include full data in the webhook payload
|
||||
- ✅ **Reliable Delivery**: Exponential backoff retry mechanism (5 attempts: 1s → 2s → 4s → 8s → 16s)
|
||||
- ✅ **Custom Authentication**: Add custom headers for webhook authentication
|
||||
- ✅ **Global Configuration**: Set default webhook URL in `config.yml` for all jobs
|
||||
- ✅ **Task Type Identification**: Distinguish between `crawl` and `llm_extraction` tasks
|
||||
|
||||
### How It Works
|
||||
|
||||
Instead of constantly checking job status:
|
||||
|
||||
**OLD WAY (Polling):**
|
||||
```python
|
||||
# Submit job
|
||||
response = requests.post("http://localhost:11235/crawl/job", json=payload)
|
||||
task_id = response.json()['task_id']
|
||||
|
||||
# Poll until complete
|
||||
while True:
|
||||
status = requests.get(f"http://localhost:11235/crawl/job/{task_id}")
|
||||
if status.json()['status'] == 'completed':
|
||||
break
|
||||
time.sleep(5) # Wait and try again
|
||||
```
|
||||
|
||||
**NEW WAY (Webhooks):**
|
||||
```python
|
||||
# Submit job with webhook
|
||||
payload = {
|
||||
"urls": ["https://example.com"],
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhook",
|
||||
"webhook_data_in_payload": True
|
||||
}
|
||||
}
|
||||
response = requests.post("http://localhost:11235/crawl/job", json=payload)
|
||||
|
||||
# Done! Webhook will notify you when complete
|
||||
# Your webhook handler receives the results automatically
|
||||
```
|
||||
|
||||
### Crawl Job Webhooks
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com"],
|
||||
"browser_config": {"headless": true},
|
||||
"crawler_config": {"cache_mode": "bypass"},
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/crawl-complete",
|
||||
"webhook_data_in_payload": false,
|
||||
"webhook_headers": {
|
||||
"X-Webhook-Secret": "your-secret-token"
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### LLM Extraction Job Webhooks (NEW!)
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/llm/job \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"url": "https://example.com/article",
|
||||
"q": "Extract the article title, author, and publication date",
|
||||
"schema": "{\"type\":\"object\",\"properties\":{\"title\":{\"type\":\"string\"}}}",
|
||||
"provider": "openai/gpt-4o-mini",
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhooks/llm-complete",
|
||||
"webhook_data_in_payload": true
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Webhook Payload Structure
|
||||
|
||||
**Success (with data):**
|
||||
```json
|
||||
{
|
||||
"task_id": "llm_1698765432",
|
||||
"task_type": "llm_extraction",
|
||||
"status": "completed",
|
||||
"timestamp": "2025-10-22T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com/article"],
|
||||
"data": {
|
||||
"extracted_content": {
|
||||
"title": "Understanding Web Scraping",
|
||||
"author": "John Doe",
|
||||
"date": "2025-10-22"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Failure:**
|
||||
```json
|
||||
{
|
||||
"task_id": "crawl_abc123",
|
||||
"task_type": "crawl",
|
||||
"status": "failed",
|
||||
"timestamp": "2025-10-22T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com"],
|
||||
"error": "Connection timeout after 30s"
|
||||
}
|
||||
```
|
||||
|
||||
### Simple Webhook Handler Example
|
||||
|
||||
```python
|
||||
from flask import Flask, request, jsonify
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/webhook', methods=['POST'])
|
||||
def handle_webhook():
|
||||
payload = request.json
|
||||
|
||||
task_id = payload['task_id']
|
||||
task_type = payload['task_type']
|
||||
status = payload['status']
|
||||
|
||||
if status == 'completed':
|
||||
if 'data' in payload:
|
||||
# Process data directly
|
||||
data = payload['data']
|
||||
else:
|
||||
# Fetch from API
|
||||
endpoint = 'crawl' if task_type == 'crawl' else 'llm'
|
||||
response = requests.get(f'http://localhost:11235/{endpoint}/job/{task_id}')
|
||||
data = response.json()
|
||||
|
||||
# Your business logic here
|
||||
print(f"Job {task_id} completed!")
|
||||
|
||||
elif status == 'failed':
|
||||
error = payload.get('error', 'Unknown error')
|
||||
print(f"Job {task_id} failed: {error}")
|
||||
|
||||
return jsonify({"status": "received"}), 200
|
||||
|
||||
app.run(port=8080)
|
||||
```
|
||||
|
||||
## 📊 Performance Improvements
|
||||
|
||||
- **Reduced Server Load**: Eliminates constant polling requests
|
||||
- **Lower Latency**: Instant notification vs. polling interval delay
|
||||
- **Better Resource Usage**: Frees up client connections while jobs run in background
|
||||
- **Scalable Architecture**: Handles high-volume crawling workflows efficiently
|
||||
|
||||
## 🐛 Bug Fixes
|
||||
|
||||
- Fixed webhook configuration serialization for Pydantic HttpUrl fields
|
||||
- Improved error handling in webhook delivery service
|
||||
- Enhanced Redis task storage for webhook config persistence
|
||||
|
||||
## 🌍 Expected Real-World Impact
|
||||
|
||||
### For Web Scraping Workflows
|
||||
- **Reduced Costs**: Less API calls = lower bandwidth and server costs
|
||||
- **Better UX**: Instant notifications improve user experience
|
||||
- **Scalability**: Handle 100s of concurrent jobs without polling overhead
|
||||
|
||||
### For LLM Extraction Pipelines
|
||||
- **Async Processing**: Submit LLM extraction jobs and move on
|
||||
- **Batch Processing**: Queue multiple extractions, get notified as they complete
|
||||
- **Integration**: Easy integration with workflow automation tools (Zapier, n8n, etc.)
|
||||
|
||||
### For Microservices
|
||||
- **Event-Driven**: Perfect for event-driven microservice architectures
|
||||
- **Decoupling**: Decouple job submission from result processing
|
||||
- **Reliability**: Automatic retries ensure webhooks are delivered
|
||||
|
||||
## 🔄 Breaking Changes
|
||||
|
||||
**None!** This release is fully backward compatible.
|
||||
|
||||
- Webhook configuration is optional
|
||||
- Existing code continues to work without modification
|
||||
- Polling is still supported for jobs without webhook config
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
### New Documentation
|
||||
- **[WEBHOOK_EXAMPLES.md](../deploy/docker/WEBHOOK_EXAMPLES.md)** - Comprehensive webhook usage guide
|
||||
- **[docker_webhook_example.py](../docs/examples/docker_webhook_example.py)** - Working code examples
|
||||
|
||||
### Updated Documentation
|
||||
- **[Docker README](../deploy/docker/README.md)** - Added webhook sections
|
||||
- API documentation with webhook examples
|
||||
|
||||
## 🛠️ Migration Guide
|
||||
|
||||
No migration needed! Webhooks are opt-in:
|
||||
|
||||
1. **To use webhooks**: Add `webhook_config` to your job payload
|
||||
2. **To keep polling**: Continue using your existing code
|
||||
|
||||
### Quick Start
|
||||
|
||||
```python
|
||||
# Just add webhook_config to your existing payload
|
||||
payload = {
|
||||
# Your existing configuration
|
||||
"urls": ["https://example.com"],
|
||||
"browser_config": {...},
|
||||
"crawler_config": {...},
|
||||
|
||||
# NEW: Add webhook configuration
|
||||
"webhook_config": {
|
||||
"webhook_url": "https://myapp.com/webhook",
|
||||
"webhook_data_in_payload": True
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🔧 Configuration
|
||||
|
||||
### Global Webhook Configuration (config.yml)
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
enabled: true
|
||||
default_url: "https://myapp.com/webhooks/default" # Optional
|
||||
data_in_payload: false
|
||||
retry:
|
||||
max_attempts: 5
|
||||
initial_delay_ms: 1000
|
||||
max_delay_ms: 32000
|
||||
timeout_ms: 30000
|
||||
headers:
|
||||
User-Agent: "Crawl4AI-Webhook/1.0"
|
||||
```
|
||||
|
||||
## 🚀 Upgrade Instructions
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
# Pull the latest image
|
||||
docker pull unclecode/crawl4ai:0.7.6
|
||||
|
||||
# Or use latest tag
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
|
||||
# Run with webhook support
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--env-file .llm.env \
|
||||
--name crawl4ai \
|
||||
unclecode/crawl4ai:0.7.6
|
||||
```
|
||||
|
||||
### Python Package
|
||||
|
||||
```bash
|
||||
pip install --upgrade crawl4ai
|
||||
```
|
||||
|
||||
## 💡 Pro Tips
|
||||
|
||||
1. **Use notification-only mode** for large results - fetch data separately to avoid large webhook payloads
|
||||
2. **Set custom headers** for webhook authentication and request tracking
|
||||
3. **Configure global default webhook** for consistent handling across all jobs
|
||||
4. **Implement idempotent webhook handlers** - same webhook may be delivered multiple times on retry
|
||||
5. **Use structured schemas** with LLM extraction for predictable webhook data
|
||||
|
||||
## 🎬 Demo
|
||||
|
||||
Try the release demo:
|
||||
|
||||
```bash
|
||||
python docs/releases_review/demo_v0.7.6.py
|
||||
```
|
||||
|
||||
This comprehensive demo showcases:
|
||||
- Crawl job webhooks (notification-only and with data)
|
||||
- LLM extraction webhooks (with JSON schema support)
|
||||
- Custom headers for authentication
|
||||
- Webhook retry mechanism
|
||||
- Real-time webhook receiver
|
||||
|
||||
## 🙏 Acknowledgments
|
||||
|
||||
Thank you to the community for the feedback that shaped this feature! Special thanks to everyone who requested webhook support for asynchronous job processing.
|
||||
|
||||
## 📞 Support
|
||||
|
||||
- **Documentation**: https://docs.crawl4ai.com
|
||||
- **GitHub Issues**: https://github.com/unclecode/crawl4ai/issues
|
||||
- **Discord**: https://discord.gg/crawl4ai
|
||||
|
||||
---
|
||||
|
||||
**Happy crawling with webhooks!** 🕷️🪝
|
||||
|
||||
*- unclecode*
|
||||
@@ -1,626 +0,0 @@
|
||||
# 🚀 Crawl4AI v0.7.7: The Self-Hosting & Monitoring Update
|
||||
|
||||
*November 14, 2025 • 10 min read*
|
||||
|
||||
---
|
||||
|
||||
Today I'm releasing Crawl4AI v0.7.7—the Self-Hosting & Monitoring Update. This release transforms Crawl4AI Docker from a simple containerized crawler into a complete self-hosting platform with enterprise-grade real-time monitoring, full operational transparency, and production-ready observability.
|
||||
|
||||
## 🎯 What's New at a Glance
|
||||
|
||||
- **📊 Real-time Monitoring Dashboard**: Interactive web UI with live system metrics and browser pool status
|
||||
- **🔌 Comprehensive Monitor API**: Complete REST API for programmatic access to all monitoring data
|
||||
- **⚡ WebSocket Streaming**: Real-time updates every 2 seconds for custom dashboards
|
||||
- **🎮 Control Actions**: Manual browser management (kill, restart, cleanup)
|
||||
- **🔥 Smart Browser Pool**: 3-tier architecture (permanent/hot/cold) with automatic promotion
|
||||
- **🧹 Janitor Cleanup System**: Automatic resource management with event logging
|
||||
- **📈 Production Metrics**: 6 critical metrics for operational excellence
|
||||
- **🏭 Integration Ready**: Prometheus, alerting, and log aggregation examples
|
||||
- **🐛 Critical Bug Fixes**: Async LLM extraction, DFS crawling, viewport config, and more
|
||||
|
||||
## 📊 Real-time Monitoring Dashboard: Complete Visibility
|
||||
|
||||
**The Problem:** Running Crawl4AI in Docker was like flying blind. Users had no visibility into what was happening inside the container—memory usage, active requests, browser pools, or errors. Troubleshooting required checking logs, and there was no way to monitor performance or manually intervene when issues occurred.
|
||||
|
||||
**My Solution:** I built a complete real-time monitoring system with an interactive dashboard, comprehensive REST API, WebSocket streaming, and manual control actions. Now you have full transparency and control over your crawling infrastructure.
|
||||
|
||||
### The Self-Hosting Value Proposition
|
||||
|
||||
Before v0.7.7, Docker was just a containerized crawler. After v0.7.7, it's a complete self-hosting platform that gives you:
|
||||
|
||||
- **🔒 Data Privacy**: Your data never leaves your infrastructure
|
||||
- **💰 Cost Control**: No per-request pricing or rate limits
|
||||
- **🎯 Full Customization**: Complete control over configurations and strategies
|
||||
- **📊 Complete Transparency**: Real-time visibility into every aspect
|
||||
- **⚡ Performance**: Direct access without network overhead
|
||||
- **🛡️ Enterprise Security**: Keep workflows behind your firewall
|
||||
|
||||
### Interactive Monitoring Dashboard
|
||||
|
||||
Access the dashboard at `http://localhost:11235/dashboard` to see:
|
||||
|
||||
- **System Health Overview**: CPU, memory, network, and uptime in real-time
|
||||
- **Live Request Tracking**: Active and completed requests with full details
|
||||
- **Browser Pool Management**: Interactive table with permanent/hot/cold browsers
|
||||
- **Janitor Events Log**: Automatic cleanup activities
|
||||
- **Error Monitoring**: Full context error logs
|
||||
|
||||
The dashboard updates every 2 seconds via WebSocket, giving you live visibility into your crawling operations.
|
||||
|
||||
## 🔌 Monitor API: Programmatic Access
|
||||
|
||||
**The Problem:** Monitoring dashboards are great for humans, but automation and integration require programmatic access.
|
||||
|
||||
**My Solution:** A comprehensive REST API that exposes all monitoring data for integration with your existing infrastructure.
|
||||
|
||||
### System Health Endpoint
|
||||
|
||||
```python
|
||||
import httpx
|
||||
import asyncio
|
||||
|
||||
async def monitor_system_health():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get("http://localhost:11235/monitor/health")
|
||||
health = response.json()
|
||||
|
||||
print(f"Container Metrics:")
|
||||
print(f" CPU: {health['container']['cpu_percent']:.1f}%")
|
||||
print(f" Memory: {health['container']['memory_percent']:.1f}%")
|
||||
print(f" Uptime: {health['container']['uptime_seconds']}s")
|
||||
|
||||
print(f"\nBrowser Pool:")
|
||||
print(f" Permanent: {health['pool']['permanent']['active']} active")
|
||||
print(f" Hot Pool: {health['pool']['hot']['count']} browsers")
|
||||
print(f" Cold Pool: {health['pool']['cold']['count']} browsers")
|
||||
|
||||
print(f"\nStatistics:")
|
||||
print(f" Total Requests: {health['stats']['total_requests']}")
|
||||
print(f" Success Rate: {health['stats']['success_rate_percent']:.1f}%")
|
||||
print(f" Avg Latency: {health['stats']['avg_latency_ms']:.0f}ms")
|
||||
|
||||
asyncio.run(monitor_system_health())
|
||||
```
|
||||
|
||||
### Request Tracking
|
||||
|
||||
```python
|
||||
async def track_requests():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get("http://localhost:11235/monitor/requests")
|
||||
requests_data = response.json()
|
||||
|
||||
print(f"Active Requests: {len(requests_data['active'])}")
|
||||
print(f"Completed Requests: {len(requests_data['completed'])}")
|
||||
|
||||
# See details of recent requests
|
||||
for req in requests_data['completed'][:5]:
|
||||
status_icon = "✅" if req['success'] else "❌"
|
||||
print(f"{status_icon} {req['endpoint']} - {req['latency_ms']:.0f}ms")
|
||||
```
|
||||
|
||||
### Browser Pool Management
|
||||
|
||||
```python
|
||||
async def monitor_browser_pool():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get("http://localhost:11235/monitor/browsers")
|
||||
browsers = response.json()
|
||||
|
||||
print(f"Pool Summary:")
|
||||
print(f" Total Browsers: {browsers['summary']['total_count']}")
|
||||
print(f" Total Memory: {browsers['summary']['total_memory_mb']} MB")
|
||||
print(f" Reuse Rate: {browsers['summary']['reuse_rate_percent']:.1f}%")
|
||||
|
||||
# List all browsers
|
||||
for browser in browsers['permanent']:
|
||||
print(f"🔥 Permanent: {browser['browser_id'][:8]}... | "
|
||||
f"Requests: {browser['request_count']} | "
|
||||
f"Memory: {browser['memory_mb']:.0f} MB")
|
||||
```
|
||||
|
||||
### Endpoint Performance Statistics
|
||||
|
||||
```python
|
||||
async def get_endpoint_stats():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get("http://localhost:11235/monitor/endpoints/stats")
|
||||
stats = response.json()
|
||||
|
||||
print("Endpoint Analytics:")
|
||||
for endpoint, data in stats.items():
|
||||
print(f" {endpoint}:")
|
||||
print(f" Requests: {data['count']}")
|
||||
print(f" Avg Latency: {data['avg_latency_ms']:.0f}ms")
|
||||
print(f" Success Rate: {data['success_rate_percent']:.1f}%")
|
||||
```
|
||||
|
||||
### Complete API Reference
|
||||
|
||||
The Monitor API includes these endpoints:
|
||||
|
||||
- `GET /monitor/health` - System health with pool statistics
|
||||
- `GET /monitor/requests` - Active and completed request tracking
|
||||
- `GET /monitor/browsers` - Browser pool details and efficiency
|
||||
- `GET /monitor/endpoints/stats` - Per-endpoint performance analytics
|
||||
- `GET /monitor/timeline?minutes=5` - Time-series data for charts
|
||||
- `GET /monitor/logs/janitor?limit=10` - Cleanup activity logs
|
||||
- `GET /monitor/logs/errors?limit=10` - Error logs with context
|
||||
- `POST /monitor/actions/cleanup` - Force immediate cleanup
|
||||
- `POST /monitor/actions/kill_browser` - Kill specific browser
|
||||
- `POST /monitor/actions/restart_browser` - Restart browser
|
||||
- `POST /monitor/stats/reset` - Reset accumulated statistics
|
||||
|
||||
## ⚡ WebSocket Streaming: Real-time Updates
|
||||
|
||||
**The Problem:** Polling the API every few seconds wastes resources and adds latency. Real-time dashboards need instant updates.
|
||||
|
||||
**My Solution:** WebSocket streaming with 2-second update intervals for building custom real-time dashboards.
|
||||
|
||||
### WebSocket Integration Example
|
||||
|
||||
```python
|
||||
import websockets
|
||||
import json
|
||||
import asyncio
|
||||
|
||||
async def monitor_realtime():
|
||||
uri = "ws://localhost:11235/monitor/ws"
|
||||
|
||||
async with websockets.connect(uri) as websocket:
|
||||
print("Connected to real-time monitoring stream")
|
||||
|
||||
while True:
|
||||
# Receive update every 2 seconds
|
||||
data = await websocket.recv()
|
||||
update = json.loads(data)
|
||||
|
||||
# Access all monitoring data
|
||||
print(f"\n--- Update at {update['timestamp']} ---")
|
||||
print(f"Memory: {update['health']['container']['memory_percent']:.1f}%")
|
||||
print(f"Active Requests: {len(update['requests']['active'])}")
|
||||
print(f"Total Browsers: {update['browsers']['summary']['total_count']}")
|
||||
|
||||
if update['errors']:
|
||||
print(f"⚠️ Recent Errors: {len(update['errors'])}")
|
||||
|
||||
asyncio.run(monitor_realtime())
|
||||
```
|
||||
|
||||
**Expected Real-World Impact:**
|
||||
- **Custom Dashboards**: Build tailored monitoring UIs for your team
|
||||
- **Real-time Alerting**: Trigger alerts instantly when metrics exceed thresholds
|
||||
- **Integration**: Feed live data into monitoring tools like Grafana
|
||||
- **Automation**: React to events in real-time without polling
|
||||
|
||||
## 🔥 Smart Browser Pool: 3-Tier Architecture
|
||||
|
||||
**The Problem:** Creating a new browser for every request is slow and memory-intensive. Traditional browser pools are static and inefficient.
|
||||
|
||||
**My Solution:** A smart 3-tier browser pool that automatically adapts to usage patterns.
|
||||
|
||||
### How It Works
|
||||
|
||||
```python
|
||||
import httpx
|
||||
|
||||
async def demonstrate_browser_pool():
|
||||
async with httpx.AsyncClient() as client:
|
||||
# Request 1-3: Default config → Uses permanent browser
|
||||
print("Phase 1: Using permanent browser")
|
||||
for i in range(3):
|
||||
await client.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={"urls": [f"https://httpbin.org/html?req={i}"]}
|
||||
)
|
||||
print(f" Request {i+1}: Reused permanent browser")
|
||||
|
||||
# Request 4-6: Custom viewport → Cold pool (first use)
|
||||
print("\nPhase 2: Custom config creates cold pool browser")
|
||||
viewport_config = {"viewport": {"width": 1280, "height": 720}}
|
||||
for i in range(4):
|
||||
await client.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={
|
||||
"urls": [f"https://httpbin.org/json?v={i}"],
|
||||
"browser_config": viewport_config
|
||||
}
|
||||
)
|
||||
if i < 2:
|
||||
print(f" Request {i+1}: Cold pool browser")
|
||||
else:
|
||||
print(f" Request {i+1}: Promoted to hot pool! (after 3 uses)")
|
||||
|
||||
# Check pool status
|
||||
response = await client.get("http://localhost:11235/monitor/browsers")
|
||||
browsers = response.json()
|
||||
|
||||
print(f"\nPool Status:")
|
||||
print(f" Permanent: {len(browsers['permanent'])} (always active)")
|
||||
print(f" Hot: {len(browsers['hot'])} (frequently used configs)")
|
||||
print(f" Cold: {len(browsers['cold'])} (on-demand)")
|
||||
print(f" Reuse Rate: {browsers['summary']['reuse_rate_percent']:.1f}%")
|
||||
|
||||
asyncio.run(demonstrate_browser_pool())
|
||||
```
|
||||
|
||||
**Pool Tiers:**
|
||||
|
||||
- **🔥 Permanent Browser**: Always-on, default configuration, instant response
|
||||
- **♨️ Hot Pool**: Browsers promoted after 3+ uses, kept warm for quick access
|
||||
- **❄️ Cold Pool**: On-demand browsers for variant configs, cleaned up when idle
|
||||
|
||||
**Expected Real-World Impact:**
|
||||
- **Memory Efficiency**: 10x reduction in memory usage vs creating browsers per request
|
||||
- **Performance**: Instant access to frequently-used configurations
|
||||
- **Automatic Optimization**: Pool adapts to your usage patterns
|
||||
- **Resource Management**: Janitor automatically cleans up idle browsers
|
||||
|
||||
## 🧹 Janitor System: Automatic Cleanup
|
||||
|
||||
**The Problem:** Long-running crawlers accumulate idle browsers and consume memory over time.
|
||||
|
||||
**My Solution:** An automatic janitor system that monitors and cleans up idle resources.
|
||||
|
||||
```python
|
||||
async def monitor_janitor_activity():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get("http://localhost:11235/monitor/logs/janitor?limit=5")
|
||||
logs = response.json()
|
||||
|
||||
print("Recent Cleanup Activities:")
|
||||
for log in logs:
|
||||
print(f" {log['timestamp']}: {log['message']}")
|
||||
|
||||
# Example output:
|
||||
# 2025-11-14 10:30:00: Cleaned up 2 cold pool browsers (idle > 5min)
|
||||
# 2025-11-14 10:25:00: Browser reuse rate: 85.3%
|
||||
# 2025-11-14 10:20:00: Hot pool browser promoted (10 requests)
|
||||
```
|
||||
|
||||
## 🎮 Control Actions: Manual Management
|
||||
|
||||
**The Problem:** Sometimes you need to manually intervene—kill a stuck browser, force cleanup, or restart resources.
|
||||
|
||||
**My Solution:** Manual control actions via the API for operational troubleshooting.
|
||||
|
||||
### Force Cleanup
|
||||
|
||||
```python
|
||||
async def force_cleanup():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post("http://localhost:11235/monitor/actions/cleanup")
|
||||
result = response.json()
|
||||
|
||||
print(f"Cleanup completed:")
|
||||
print(f" Browsers cleaned: {result.get('cleaned_count', 0)}")
|
||||
print(f" Memory freed: {result.get('memory_freed_mb', 0):.1f} MB")
|
||||
```
|
||||
|
||||
### Kill Specific Browser
|
||||
|
||||
```python
|
||||
async def kill_stuck_browser(browser_id: str):
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
"http://localhost:11235/monitor/actions/kill_browser",
|
||||
json={"browser_id": browser_id}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
print(f"✅ Browser {browser_id} killed successfully")
|
||||
```
|
||||
|
||||
### Reset Statistics
|
||||
|
||||
```python
|
||||
async def reset_stats():
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post("http://localhost:11235/monitor/stats/reset")
|
||||
print("📊 Statistics reset for fresh monitoring")
|
||||
```
|
||||
|
||||
## 📈 Production Integration Patterns
|
||||
|
||||
### Prometheus Integration
|
||||
|
||||
```python
|
||||
# Export metrics for Prometheus scraping
|
||||
async def export_prometheus_metrics():
|
||||
async with httpx.AsyncClient() as client:
|
||||
health = await client.get("http://localhost:11235/monitor/health")
|
||||
data = health.json()
|
||||
|
||||
# Export in Prometheus format
|
||||
metrics = f"""
|
||||
# HELP crawl4ai_memory_usage_percent Memory usage percentage
|
||||
# TYPE crawl4ai_memory_usage_percent gauge
|
||||
crawl4ai_memory_usage_percent {data['container']['memory_percent']}
|
||||
|
||||
# HELP crawl4ai_request_success_rate Request success rate
|
||||
# TYPE crawl4ai_request_success_rate gauge
|
||||
crawl4ai_request_success_rate {data['stats']['success_rate_percent']}
|
||||
|
||||
# HELP crawl4ai_browser_pool_count Total browsers in pool
|
||||
# TYPE crawl4ai_browser_pool_count gauge
|
||||
crawl4ai_browser_pool_count {data['pool']['permanent']['active'] + data['pool']['hot']['count'] + data['pool']['cold']['count']}
|
||||
"""
|
||||
return metrics
|
||||
```
|
||||
|
||||
### Alerting Example
|
||||
|
||||
```python
|
||||
async def check_alerts():
|
||||
async with httpx.AsyncClient() as client:
|
||||
health = await client.get("http://localhost:11235/monitor/health")
|
||||
data = health.json()
|
||||
|
||||
# Memory alert
|
||||
if data['container']['memory_percent'] > 80:
|
||||
print("🚨 ALERT: Memory usage above 80%")
|
||||
# Trigger cleanup
|
||||
await client.post("http://localhost:11235/monitor/actions/cleanup")
|
||||
|
||||
# Success rate alert
|
||||
if data['stats']['success_rate_percent'] < 90:
|
||||
print("🚨 ALERT: Success rate below 90%")
|
||||
# Check error logs
|
||||
errors = await client.get("http://localhost:11235/monitor/logs/errors")
|
||||
print(f"Recent errors: {len(errors.json())}")
|
||||
|
||||
# Latency alert
|
||||
if data['stats']['avg_latency_ms'] > 5000:
|
||||
print("🚨 ALERT: Average latency above 5s")
|
||||
```
|
||||
|
||||
### Key Metrics to Track
|
||||
|
||||
```python
|
||||
CRITICAL_METRICS = {
|
||||
"memory_usage": {
|
||||
"current": "container.memory_percent",
|
||||
"target": "<80%",
|
||||
"alert_threshold": ">80%",
|
||||
"action": "Force cleanup or scale"
|
||||
},
|
||||
"success_rate": {
|
||||
"current": "stats.success_rate_percent",
|
||||
"target": ">95%",
|
||||
"alert_threshold": "<90%",
|
||||
"action": "Check error logs"
|
||||
},
|
||||
"avg_latency": {
|
||||
"current": "stats.avg_latency_ms",
|
||||
"target": "<2000ms",
|
||||
"alert_threshold": ">5000ms",
|
||||
"action": "Investigate slow requests"
|
||||
},
|
||||
"browser_reuse_rate": {
|
||||
"current": "browsers.summary.reuse_rate_percent",
|
||||
"target": ">80%",
|
||||
"alert_threshold": "<60%",
|
||||
"action": "Check pool configuration"
|
||||
},
|
||||
"total_browsers": {
|
||||
"current": "browsers.summary.total_count",
|
||||
"target": "<15",
|
||||
"alert_threshold": ">20",
|
||||
"action": "Check for browser leaks"
|
||||
},
|
||||
"error_frequency": {
|
||||
"current": "len(errors)",
|
||||
"target": "<5/hour",
|
||||
"alert_threshold": ">10/hour",
|
||||
"action": "Review error patterns"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🐛 Critical Bug Fixes
|
||||
|
||||
This release includes significant bug fixes that improve stability and performance:
|
||||
|
||||
### Async LLM Extraction (#1590)
|
||||
|
||||
**The Problem:** LLM extraction was blocking async execution, causing URLs to be processed sequentially instead of in parallel (issue #1055).
|
||||
|
||||
**The Fix:** Resolved the blocking issue to enable true parallel processing for LLM extraction.
|
||||
|
||||
```python
|
||||
# Before v0.7.7: Sequential processing
|
||||
# After v0.7.7: True parallel processing
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
urls = ["url1", "url2", "url3", "url4"]
|
||||
|
||||
# Now processes truly in parallel with LLM extraction
|
||||
results = await crawler.arun_many(
|
||||
urls,
|
||||
config=CrawlerRunConfig(
|
||||
extraction_strategy=LLMExtractionStrategy(...)
|
||||
)
|
||||
)
|
||||
# 4x faster for parallel LLM extraction!
|
||||
```
|
||||
|
||||
**Expected Impact:** Major performance improvement for batch LLM extraction workflows.
|
||||
|
||||
### DFS Deep Crawling (#1607)
|
||||
|
||||
**The Problem:** DFS (Depth-First Search) deep crawl strategy had implementation issues.
|
||||
|
||||
**The Fix:** Enhanced DFSDeepCrawlStrategy with proper seen URL tracking and improved documentation.
|
||||
|
||||
### Browser & Crawler Config Documentation (#1609)
|
||||
|
||||
**The Problem:** Documentation didn't match the actual `async_configs.py` implementation.
|
||||
|
||||
**The Fix:** Updated all configuration documentation to accurately reflect the current implementation.
|
||||
|
||||
### Sitemap Seeder (#1598)
|
||||
|
||||
**The Problem:** Sitemap parsing and URL normalization issues in AsyncUrlSeeder (issue #1559).
|
||||
|
||||
**The Fix:** Added comprehensive tests and fixes for sitemap namespace parsing and URL normalization.
|
||||
|
||||
### Remove Overlay Elements (#1529)
|
||||
|
||||
**The Problem:** The `remove_overlay_elements` functionality wasn't working (issue #1396).
|
||||
|
||||
**The Fix:** Fixed by properly calling the injected JavaScript function.
|
||||
|
||||
### Viewport Configuration (#1495)
|
||||
|
||||
**The Problem:** Viewport configuration wasn't working in managed browsers (issue #1490).
|
||||
|
||||
**The Fix:** Added proper viewport size configuration support for browser launch.
|
||||
|
||||
### Managed Browser CDP Timing (#1528)
|
||||
|
||||
**The Problem:** CDP (Chrome DevTools Protocol) endpoint verification had timing issues causing connection failures (issue #1445).
|
||||
|
||||
**The Fix:** Added exponential backoff for CDP endpoint verification to handle timing variations.
|
||||
|
||||
### Security Updates
|
||||
|
||||
- **pyOpenSSL**: Updated from >=24.3.0 to >=25.3.0 to address security vulnerability
|
||||
- Added verification tests for the security update
|
||||
|
||||
### Docker Fixes
|
||||
|
||||
- **Port Standardization**: Fixed inconsistent port usage (11234 vs 11235) - now standardized to 11235
|
||||
- **LLM Environment**: Fixed LLM API key handling for multi-provider support (PR #1537)
|
||||
- **Error Handling**: Improved Docker API error messages with comprehensive status codes
|
||||
- **Serialization**: Fixed `fit_html` property serialization in `/crawl` and `/crawl/stream` endpoints
|
||||
|
||||
### Other Important Fixes
|
||||
|
||||
- **arun_many Returns**: Fixed function to always return a list, even on exception (PR #1530)
|
||||
- **Webhook Serialization**: Properly serialize Pydantic HttpUrl in webhook config
|
||||
- **LLMConfig Documentation**: Fixed casing and variable name consistency (issue #1551)
|
||||
- **Python Version**: Dropped Python 3.9 support, now requires Python >=3.10
|
||||
|
||||
## 📊 Expected Real-World Impact
|
||||
|
||||
### For DevOps & Infrastructure Teams
|
||||
- **Full Visibility**: Know exactly what's happening inside your crawling infrastructure
|
||||
- **Proactive Monitoring**: Catch issues before they become problems
|
||||
- **Resource Optimization**: Identify memory leaks and performance bottlenecks
|
||||
- **Operational Control**: Manual intervention when automated systems need help
|
||||
|
||||
### For Production Deployments
|
||||
- **Enterprise Observability**: Prometheus, Grafana, and alerting integration
|
||||
- **Debugging**: Real-time logs and error tracking
|
||||
- **Capacity Planning**: Historical metrics for scaling decisions
|
||||
- **SLA Monitoring**: Track success rates and latency against targets
|
||||
|
||||
### For Development Teams
|
||||
- **Local Monitoring**: Understand crawler behavior during development
|
||||
- **Performance Testing**: Measure impact of configuration changes
|
||||
- **Troubleshooting**: Quickly identify and fix issues
|
||||
- **Learning**: See exactly how the browser pool works
|
||||
|
||||
## 🔄 Breaking Changes
|
||||
|
||||
**None!** This release is fully backward compatible.
|
||||
|
||||
- All existing Docker configurations continue to work
|
||||
- No API changes to existing endpoints
|
||||
- Monitoring is additive functionality
|
||||
- No migration required
|
||||
|
||||
## 🚀 Upgrade Instructions
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
# Pull the latest version
|
||||
docker pull unclecode/crawl4ai:0.7.7
|
||||
|
||||
# Or use the latest tag
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
|
||||
# Run with monitoring enabled (default)
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--shm-size=1g \
|
||||
--name crawl4ai \
|
||||
unclecode/crawl4ai:0.7.7
|
||||
|
||||
# Access the monitoring dashboard
|
||||
open http://localhost:11235/dashboard
|
||||
```
|
||||
|
||||
### Python Package
|
||||
|
||||
```bash
|
||||
# Upgrade to latest version
|
||||
pip install --upgrade crawl4ai
|
||||
|
||||
# Or install specific version
|
||||
pip install crawl4ai==0.7.7
|
||||
```
|
||||
|
||||
## 🎬 Try the Demo
|
||||
|
||||
Run the comprehensive demo that showcases all monitoring features:
|
||||
|
||||
```bash
|
||||
python docs/releases_review/demo_v0.7.7.py
|
||||
```
|
||||
|
||||
**The demo includes:**
|
||||
1. System health overview with live metrics
|
||||
2. Request tracking with active/completed monitoring
|
||||
3. Browser pool management (permanent/hot/cold)
|
||||
4. Complete Monitor API endpoint examples
|
||||
5. WebSocket streaming demonstration
|
||||
6. Control actions (cleanup, kill, restart)
|
||||
7. Production metrics and alerting patterns
|
||||
8. Self-hosting value proposition
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
### New Documentation
|
||||
- **[Self-Hosting Guide](https://docs.crawl4ai.com/core/self-hosting/)** - Complete self-hosting documentation with monitoring
|
||||
- **Demo Script**: `docs/releases_review/demo_v0.7.7.py` - Working examples
|
||||
|
||||
### Updated Documentation
|
||||
- **Docker Deployment** → **Self-Hosting** (renamed for better positioning)
|
||||
- Added comprehensive monitoring sections
|
||||
- Production integration patterns
|
||||
- WebSocket streaming examples
|
||||
|
||||
## 💡 Pro Tips
|
||||
|
||||
1. **Start with the dashboard** - Visit `/dashboard` to get familiar with the monitoring system
|
||||
2. **Track the 6 key metrics** - Memory, success rate, latency, reuse rate, browser count, errors
|
||||
3. **Set up alerting early** - Use the Monitor API to build alerts before issues occur
|
||||
4. **Monitor browser pool efficiency** - Aim for >80% reuse rate for optimal performance
|
||||
5. **Use WebSocket for custom dashboards** - Build tailored monitoring UIs for your team
|
||||
6. **Leverage Prometheus integration** - Export metrics for long-term storage and analysis
|
||||
7. **Check janitor logs** - Understand automatic cleanup patterns
|
||||
8. **Use control actions judiciously** - Manual interventions are for exceptional cases
|
||||
|
||||
## 🙏 Acknowledgments
|
||||
|
||||
Thank you to our community for the feedback, bug reports, and feature requests that shaped this release. Special thanks to everyone who contributed to the issues that were fixed in this version.
|
||||
|
||||
The monitoring system was built based on real user needs for production deployments, and your input made it comprehensive and practical.
|
||||
|
||||
## 📞 Support & Resources
|
||||
|
||||
- **📖 Documentation**: [docs.crawl4ai.com](https://docs.crawl4ai.com)
|
||||
- **🐙 GitHub**: [github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)
|
||||
- **💬 Discord**: [discord.gg/crawl4ai](https://discord.gg/jP8KfhDhyN)
|
||||
- **🐦 Twitter**: [@unclecode](https://x.com/unclecode)
|
||||
- **📊 Dashboard**: `http://localhost:11235/dashboard` (when running)
|
||||
|
||||
---
|
||||
|
||||
**Crawl4AI v0.7.7 delivers complete self-hosting with enterprise-grade monitoring. You now have full visibility and control over your web crawling infrastructure. The monitoring dashboard, comprehensive API, and WebSocket streaming give you everything needed for production deployments. Try the self-hosting platform—it's a game changer for operational excellence!**
|
||||
|
||||
**Happy crawling with full visibility!** 🕷️📊
|
||||
|
||||
*- unclecode*
|
||||
@@ -18,7 +18,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
2. **Install Dependencies**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip install flask
|
||||
```
|
||||
|
||||
3. **Launch the Server**
|
||||
@@ -28,7 +28,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
4. **Open in Browser**
|
||||
```
|
||||
http://localhost:8000
|
||||
http://localhost:8080
|
||||
```
|
||||
|
||||
**🌐 Try Online**: [Live Demo](https://docs.crawl4ai.com/c4a-script/demo)
|
||||
@@ -325,7 +325,7 @@ Powers the recording functionality:
|
||||
### Configuration
|
||||
```python
|
||||
# server.py configuration
|
||||
PORT = 8000
|
||||
PORT = 8080
|
||||
DEBUG = True
|
||||
THREADED = True
|
||||
```
|
||||
@@ -343,9 +343,9 @@ THREADED = True
|
||||
**Port Already in Use**
|
||||
```bash
|
||||
# Kill existing process
|
||||
lsof -ti:8000 | xargs kill -9
|
||||
lsof -ti:8080 | xargs kill -9
|
||||
# Or use different port
|
||||
python server.py --port 8001
|
||||
python server.py --port 8081
|
||||
```
|
||||
|
||||
**Blockly Not Loading**
|
||||
|
||||
@@ -216,7 +216,7 @@ def get_examples():
|
||||
'name': 'Handle Cookie Banner',
|
||||
'description': 'Accept cookies and close newsletter popup',
|
||||
'script': '''# Handle cookie banner and newsletter
|
||||
GO http://127.0.0.1:8000/playground/
|
||||
GO http://127.0.0.1:8080/playground/
|
||||
WAIT `body` 2
|
||||
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
|
||||
IF (EXISTS `.newsletter-popup`) THEN CLICK `.close`'''
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
import asyncio
|
||||
import capsolver
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: set your config
|
||||
# Docs: https://docs.capsolver.com/guide/captcha/awsWaf/
|
||||
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
|
||||
site_url = "https://nft.porsche.com/onboarding@6" # page url of your target site
|
||||
cookie_domain = ".nft.porsche.com" # the domain name to which you want to apply the cookie
|
||||
captcha_type = "AntiAwsWafTaskProxyLess" # type of your target captcha
|
||||
capsolver.api_key = api_key
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
await crawler.arun(
|
||||
url=site_url,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# get aws waf cookie using capsolver sdk
|
||||
solution = capsolver.solve({
|
||||
"type": captcha_type,
|
||||
"websiteURL": site_url,
|
||||
})
|
||||
cookie = solution["cookie"]
|
||||
print("aws waf cookie:", cookie)
|
||||
|
||||
js_code = """
|
||||
document.cookie = \'aws-waf-token=""" + cookie + """;domain=""" + cookie_domain + """;path=/\';
|
||||
location.reload();
|
||||
"""
|
||||
|
||||
wait_condition = """() => {
|
||||
return document.title === \'Join Porsche’s journey into Web3\';
|
||||
}"""
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test",
|
||||
js_code=js_code,
|
||||
js_only=True,
|
||||
wait_for=f"js:{wait_condition}"
|
||||
)
|
||||
|
||||
result_next = await crawler.arun(
|
||||
url=site_url,
|
||||
config=run_config,
|
||||
)
|
||||
print(result_next.markdown)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,60 +0,0 @@
|
||||
import asyncio
|
||||
import capsolver
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: set your config
|
||||
# Docs: https://docs.capsolver.com/guide/captcha/cloudflare_challenge/
|
||||
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
|
||||
site_url = "https://gitlab.com/users/sign_in" # page url of your target site
|
||||
captcha_type = "AntiCloudflareTask" # type of your target captcha
|
||||
# your http proxy to solve cloudflare challenge
|
||||
proxy_server = "proxy.example.com:8080"
|
||||
proxy_username = "myuser"
|
||||
proxy_password = "mypass"
|
||||
capsolver.api_key = api_key
|
||||
|
||||
|
||||
async def main():
|
||||
# get challenge cookie using capsolver sdk
|
||||
solution = capsolver.solve({
|
||||
"type": captcha_type,
|
||||
"websiteURL": site_url,
|
||||
"proxy": f"{proxy_server}:{proxy_username}:{proxy_password}",
|
||||
})
|
||||
cookies = solution["cookies"]
|
||||
user_agent = solution["userAgent"]
|
||||
print("challenge cookies:", cookies)
|
||||
|
||||
cookies_list = []
|
||||
for name, value in cookies.items():
|
||||
cookies_list.append({
|
||||
"name": name,
|
||||
"value": value,
|
||||
"url": site_url,
|
||||
})
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
use_persistent_context=True,
|
||||
user_agent=user_agent,
|
||||
cookies=cookies_list,
|
||||
proxy_config={
|
||||
"server": f"http://{proxy_server}",
|
||||
"username": proxy_username,
|
||||
"password": proxy_password,
|
||||
},
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url=site_url,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
print(result.markdown)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,64 +0,0 @@
|
||||
import asyncio
|
||||
import capsolver
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: set your config
|
||||
# Docs: https://docs.capsolver.com/guide/captcha/cloudflare_turnstile/
|
||||
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
|
||||
site_key = "0x4AAAAAAAGlwMzq_9z6S9Mh" # site key of your target site
|
||||
site_url = "https://clifford.io/demo/cloudflare-turnstile" # page url of your target site
|
||||
captcha_type = "AntiTurnstileTaskProxyLess" # type of your target captcha
|
||||
capsolver.api_key = api_key
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
await crawler.arun(
|
||||
url=site_url,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# get turnstile token using capsolver sdk
|
||||
solution = capsolver.solve({
|
||||
"type": captcha_type,
|
||||
"websiteURL": site_url,
|
||||
"websiteKey": site_key,
|
||||
})
|
||||
token = solution["token"]
|
||||
print("turnstile token:", token)
|
||||
|
||||
js_code = """
|
||||
document.querySelector(\'input[name="cf-turnstile-response"]\').value = \'"""+token+"""\';
|
||||
document.querySelector(\'button[type="submit"]\').click();
|
||||
"""
|
||||
|
||||
wait_condition = """() => {
|
||||
const items = document.querySelectorAll(\'h1\');
|
||||
return items.length === 0;
|
||||
}"""
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test",
|
||||
js_code=js_code,
|
||||
js_only=True,
|
||||
wait_for=f"js:{wait_condition}"
|
||||
)
|
||||
|
||||
result_next = await crawler.arun(
|
||||
url=site_url,
|
||||
config=run_config,
|
||||
)
|
||||
print(result_next.markdown)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,67 +0,0 @@
|
||||
import asyncio
|
||||
import capsolver
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: set your config
|
||||
# Docs: https://docs.capsolver.com/guide/captcha/ReCaptchaV2/
|
||||
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
|
||||
site_key = "6LfW6wATAAAAAHLqO2pb8bDBahxlMxNdo9g947u9" # site key of your target site
|
||||
site_url = "https://recaptcha-demo.appspot.com/recaptcha-v2-checkbox.php" # page url of your target site
|
||||
captcha_type = "ReCaptchaV2TaskProxyLess" # type of your target captcha
|
||||
capsolver.api_key = api_key
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
await crawler.arun(
|
||||
url=site_url,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# get recaptcha token using capsolver sdk
|
||||
solution = capsolver.solve({
|
||||
"type": captcha_type,
|
||||
"websiteURL": site_url,
|
||||
"websiteKey": site_key,
|
||||
})
|
||||
token = solution["gRecaptchaResponse"]
|
||||
print("recaptcha token:", token)
|
||||
|
||||
js_code = """
|
||||
const textarea = document.getElementById(\'g-recaptcha-response\');
|
||||
if (textarea) {
|
||||
textarea.value = \"""" + token + """\";
|
||||
document.querySelector(\'button.form-field[type="submit"]\').click();
|
||||
}
|
||||
"""
|
||||
|
||||
wait_condition = """() => {
|
||||
const items = document.querySelectorAll(\'h2\');
|
||||
return items.length > 1;
|
||||
}"""
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test",
|
||||
js_code=js_code,
|
||||
js_only=True,
|
||||
wait_for=f"js:{wait_condition}"
|
||||
)
|
||||
|
||||
result_next = await crawler.arun(
|
||||
url=site_url,
|
||||
config=run_config,
|
||||
)
|
||||
print(result_next.markdown)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,75 +0,0 @@
|
||||
import asyncio
|
||||
import capsolver
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: set your config
|
||||
# Docs: https://docs.capsolver.com/guide/captcha/ReCaptchaV3/
|
||||
api_key = "CAP-xxxxxxxxxxxxxxxxxxxxx" # your api key of capsolver
|
||||
site_key = "6LdKlZEpAAAAAAOQjzC2v_d36tWxCl6dWsozdSy9" # site key of your target site
|
||||
site_url = "https://recaptcha-demo.appspot.com/recaptcha-v3-request-scores.php" # page url of your target site
|
||||
page_action = "examples/v3scores" # page action of your target site
|
||||
captcha_type = "ReCaptchaV3TaskProxyLess" # type of your target captcha
|
||||
capsolver.api_key = api_key
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
# get recaptcha token using capsolver sdk
|
||||
solution = capsolver.solve({
|
||||
"type": captcha_type,
|
||||
"websiteURL": site_url,
|
||||
"websiteKey": site_key,
|
||||
"pageAction": page_action,
|
||||
})
|
||||
token = solution["gRecaptchaResponse"]
|
||||
print("recaptcha token:", token)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
await crawler.arun(
|
||||
url=site_url,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
js_code = """
|
||||
const originalFetch = window.fetch;
|
||||
|
||||
window.fetch = function(...args) {
|
||||
if (typeof args[0] === 'string' && args[0].includes('/recaptcha-v3-verify.php')) {
|
||||
const url = new URL(args[0], window.location.origin);
|
||||
url.searchParams.set('action', '""" + token + """');
|
||||
args[0] = url.toString();
|
||||
document.querySelector('.token').innerHTML = "fetch('/recaptcha-v3-verify.php?action=examples/v3scores&token=""" + token + """')";
|
||||
console.log('Fetch URL hooked:', args[0]);
|
||||
}
|
||||
return originalFetch.apply(this, args);
|
||||
};
|
||||
"""
|
||||
|
||||
wait_condition = """() => {
|
||||
return document.querySelector('.step3:not(.hidden)');
|
||||
}"""
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test",
|
||||
js_code=js_code,
|
||||
js_only=True,
|
||||
wait_for=f"js:{wait_condition}"
|
||||
)
|
||||
|
||||
result_next = await crawler.arun(
|
||||
url=site_url,
|
||||
config=run_config,
|
||||
)
|
||||
print(result_next.markdown)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,36 +0,0 @@
|
||||
import time
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: the user data directory that includes the capsolver extension
|
||||
user_data_dir = "/browser-profile/Default1"
|
||||
|
||||
"""
|
||||
The capsolver extension supports more features, such as:
|
||||
- Telling the extension when to start solving captcha.
|
||||
- Calling functions to check whether the captcha has been solved, etc.
|
||||
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
|
||||
"""
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result_initial = await crawler.arun(
|
||||
url="https://nft.porsche.com/onboarding@6",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# do something later
|
||||
time.sleep(300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,36 +0,0 @@
|
||||
import time
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: the user data directory that includes the capsolver extension
|
||||
user_data_dir = "/browser-profile/Default1"
|
||||
|
||||
"""
|
||||
The capsolver extension supports more features, such as:
|
||||
- Telling the extension when to start solving captcha.
|
||||
- Calling functions to check whether the captcha has been solved, etc.
|
||||
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
|
||||
"""
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result_initial = await crawler.arun(
|
||||
url="https://gitlab.com/users/sign_in",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# do something later
|
||||
time.sleep(300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,36 +0,0 @@
|
||||
import time
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: the user data directory that includes the capsolver extension
|
||||
user_data_dir = "/browser-profile/Default1"
|
||||
|
||||
"""
|
||||
The capsolver extension supports more features, such as:
|
||||
- Telling the extension when to start solving captcha.
|
||||
- Calling functions to check whether the captcha has been solved, etc.
|
||||
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
|
||||
"""
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result_initial = await crawler.arun(
|
||||
url="https://clifford.io/demo/cloudflare-turnstile",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# do something later
|
||||
time.sleep(300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,36 +0,0 @@
|
||||
import time
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: the user data directory that includes the capsolver extension
|
||||
user_data_dir = "/browser-profile/Default1"
|
||||
|
||||
"""
|
||||
The capsolver extension supports more features, such as:
|
||||
- Telling the extension when to start solving captcha.
|
||||
- Calling functions to check whether the captcha has been solved, etc.
|
||||
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
|
||||
"""
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result_initial = await crawler.arun(
|
||||
url="https://recaptcha-demo.appspot.com/recaptcha-v2-checkbox.php",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# do something later
|
||||
time.sleep(300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,36 +0,0 @@
|
||||
import time
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
|
||||
# TODO: the user data directory that includes the capsolver extension
|
||||
user_data_dir = "/browser-profile/Default1"
|
||||
|
||||
"""
|
||||
The capsolver extension supports more features, such as:
|
||||
- Telling the extension when to start solving captcha.
|
||||
- Calling functions to check whether the captcha has been solved, etc.
|
||||
Reference blog: https://docs.capsolver.com/guide/automation-tool-integration/
|
||||
"""
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=False,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result_initial = await crawler.arun(
|
||||
url="https://recaptcha-demo.appspot.com/recaptcha-v3-request-scores.php",
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
session_id="session_captcha_test"
|
||||
)
|
||||
|
||||
# do something later
|
||||
time.sleep(300)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,39 +0,0 @@
|
||||
"""
|
||||
Simple demonstration of the DFS deep crawler visiting multiple pages.
|
||||
|
||||
Run with: python docs/examples/dfs_crawl_demo.py
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
from crawl4ai.cache_context import CacheMode
|
||||
from crawl4ai.deep_crawling.dfs_strategy import DFSDeepCrawlStrategy
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
dfs_strategy = DFSDeepCrawlStrategy(
|
||||
max_depth=3,
|
||||
max_pages=50,
|
||||
include_external=False,
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
deep_crawl_strategy=dfs_strategy,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(),
|
||||
stream=True,
|
||||
)
|
||||
|
||||
seed_url = "https://docs.python.org/3/" # Plenty of internal links
|
||||
|
||||
async with AsyncWebCrawler(config=BrowserConfig(headless=True)) as crawler:
|
||||
async for result in await crawler.arun(url=seed_url, config=config):
|
||||
depth = result.metadata.get("depth")
|
||||
status = "SUCCESS" if result.success else "FAILED"
|
||||
print(f"[{status}] depth={depth} url={result.url}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,522 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive hooks examples using Docker Client with function objects.
|
||||
|
||||
This approach is recommended because:
|
||||
- Write hooks as regular Python functions
|
||||
- Full IDE support (autocomplete, type checking)
|
||||
- Automatic conversion to API format
|
||||
- Reusable and testable code
|
||||
- Clean, readable syntax
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from crawl4ai import Crawl4aiDockerClient
|
||||
|
||||
# API_BASE_URL = "http://localhost:11235"
|
||||
API_BASE_URL = "http://localhost:11234"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Hook Function Definitions
|
||||
# ============================================================================
|
||||
|
||||
# --- All Hooks Demo ---
|
||||
async def browser_created_hook(browser, **kwargs):
|
||||
"""Called after browser is created"""
|
||||
print("[HOOK] Browser created and ready")
|
||||
return browser
|
||||
|
||||
|
||||
async def page_context_hook(page, context, **kwargs):
|
||||
"""Setup page environment"""
|
||||
print("[HOOK] Setting up page environment")
|
||||
|
||||
# Set viewport
|
||||
await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
|
||||
# Add cookies
|
||||
await context.add_cookies([{
|
||||
"name": "test_session",
|
||||
"value": "abc123xyz",
|
||||
"domain": ".httpbin.org",
|
||||
"path": "/"
|
||||
}])
|
||||
|
||||
# Block resources
|
||||
await context.route("**/*.{png,jpg,jpeg,gif}", lambda route: route.abort())
|
||||
await context.route("**/analytics/*", lambda route: route.abort())
|
||||
|
||||
print("[HOOK] Environment configured")
|
||||
return page
|
||||
|
||||
|
||||
async def user_agent_hook(page, context, user_agent, **kwargs):
|
||||
"""Called when user agent is updated"""
|
||||
print(f"[HOOK] User agent: {user_agent[:50]}...")
|
||||
return page
|
||||
|
||||
|
||||
async def before_goto_hook(page, context, url, **kwargs):
|
||||
"""Called before navigating to URL"""
|
||||
print(f"[HOOK] Navigating to: {url}")
|
||||
|
||||
await page.set_extra_http_headers({
|
||||
"X-Custom-Header": "crawl4ai-test",
|
||||
"Accept-Language": "en-US"
|
||||
})
|
||||
|
||||
return page
|
||||
|
||||
|
||||
async def after_goto_hook(page, context, url, response, **kwargs):
|
||||
"""Called after page loads"""
|
||||
print(f"[HOOK] Page loaded: {url}")
|
||||
|
||||
await page.wait_for_timeout(1000)
|
||||
|
||||
try:
|
||||
await page.wait_for_selector("body", timeout=2000)
|
||||
print("[HOOK] Body element ready")
|
||||
except:
|
||||
print("[HOOK] Timeout, continuing")
|
||||
|
||||
return page
|
||||
|
||||
|
||||
async def execution_started_hook(page, context, **kwargs):
|
||||
"""Called when custom JS execution starts"""
|
||||
print("[HOOK] JS execution started")
|
||||
await page.evaluate("console.log('[HOOK] Custom JS');")
|
||||
return page
|
||||
|
||||
|
||||
async def before_retrieve_hook(page, context, **kwargs):
|
||||
"""Called before retrieving HTML"""
|
||||
print("[HOOK] Preparing HTML retrieval")
|
||||
|
||||
# Scroll for lazy content
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight);")
|
||||
await page.wait_for_timeout(500)
|
||||
await page.evaluate("window.scrollTo(0, 0);")
|
||||
|
||||
print("[HOOK] Scrolling complete")
|
||||
return page
|
||||
|
||||
|
||||
async def before_return_hook(page, context, html, **kwargs):
|
||||
"""Called before returning HTML"""
|
||||
print(f"[HOOK] HTML ready: {len(html)} chars")
|
||||
|
||||
metrics = await page.evaluate('''() => ({
|
||||
images: document.images.length,
|
||||
links: document.links.length,
|
||||
scripts: document.scripts.length
|
||||
})''')
|
||||
|
||||
print(f"[HOOK] Metrics - Images: {metrics['images']}, Links: {metrics['links']}")
|
||||
return page
|
||||
|
||||
|
||||
# --- Authentication Hooks ---
|
||||
async def auth_context_hook(page, context, **kwargs):
|
||||
"""Setup authentication context"""
|
||||
print("[HOOK] Setting up authentication")
|
||||
|
||||
# Add auth cookies
|
||||
await context.add_cookies([{
|
||||
"name": "auth_token",
|
||||
"value": "fake_jwt_token",
|
||||
"domain": ".httpbin.org",
|
||||
"path": "/",
|
||||
"httpOnly": True
|
||||
}])
|
||||
|
||||
# Set localStorage
|
||||
await page.evaluate('''
|
||||
localStorage.setItem('user_id', '12345');
|
||||
localStorage.setItem('auth_time', new Date().toISOString());
|
||||
''')
|
||||
|
||||
print("[HOOK] Auth context ready")
|
||||
return page
|
||||
|
||||
|
||||
async def auth_headers_hook(page, context, url, **kwargs):
|
||||
"""Add authentication headers"""
|
||||
print(f"[HOOK] Adding auth headers for {url}")
|
||||
|
||||
import base64
|
||||
credentials = base64.b64encode(b"user:passwd").decode('ascii')
|
||||
|
||||
await page.set_extra_http_headers({
|
||||
'Authorization': f'Basic {credentials}',
|
||||
'X-API-Key': 'test-key-123'
|
||||
})
|
||||
|
||||
return page
|
||||
|
||||
|
||||
# --- Performance Optimization Hooks ---
|
||||
async def performance_hook(page, context, **kwargs):
|
||||
"""Optimize page for performance"""
|
||||
print("[HOOK] Optimizing for performance")
|
||||
|
||||
# Block resource-heavy content
|
||||
await context.route("**/*.{png,jpg,jpeg,gif,webp,svg}", lambda r: r.abort())
|
||||
await context.route("**/*.{woff,woff2,ttf}", lambda r: r.abort())
|
||||
await context.route("**/*.{mp4,webm,ogg}", lambda r: r.abort())
|
||||
await context.route("**/googletagmanager.com/*", lambda r: r.abort())
|
||||
await context.route("**/google-analytics.com/*", lambda r: r.abort())
|
||||
await context.route("**/facebook.com/*", lambda r: r.abort())
|
||||
|
||||
# Disable animations
|
||||
await page.add_style_tag(content='''
|
||||
*, *::before, *::after {
|
||||
animation-duration: 0s !important;
|
||||
transition-duration: 0s !important;
|
||||
}
|
||||
''')
|
||||
|
||||
print("[HOOK] Optimizations applied")
|
||||
return page
|
||||
|
||||
|
||||
async def cleanup_hook(page, context, **kwargs):
|
||||
"""Clean page before extraction"""
|
||||
print("[HOOK] Cleaning page")
|
||||
|
||||
await page.evaluate('''() => {
|
||||
const selectors = [
|
||||
'.ad', '.ads', '.advertisement',
|
||||
'.popup', '.modal', '.overlay',
|
||||
'.cookie-banner', '.newsletter'
|
||||
];
|
||||
|
||||
selectors.forEach(sel => {
|
||||
document.querySelectorAll(sel).forEach(el => el.remove());
|
||||
});
|
||||
|
||||
document.querySelectorAll('script, style').forEach(el => el.remove());
|
||||
}''')
|
||||
|
||||
print("[HOOK] Page cleaned")
|
||||
return page
|
||||
|
||||
|
||||
# --- Content Extraction Hooks ---
|
||||
async def wait_dynamic_content_hook(page, context, url, response, **kwargs):
|
||||
"""Wait for dynamic content to load"""
|
||||
print(f"[HOOK] Waiting for dynamic content on {url}")
|
||||
|
||||
await page.wait_for_timeout(2000)
|
||||
|
||||
# Click "Load More" if exists
|
||||
try:
|
||||
load_more = await page.query_selector('[class*="load-more"], button:has-text("Load More")')
|
||||
if load_more:
|
||||
await load_more.click()
|
||||
await page.wait_for_timeout(1000)
|
||||
print("[HOOK] Clicked 'Load More'")
|
||||
except:
|
||||
pass
|
||||
|
||||
return page
|
||||
|
||||
|
||||
async def extract_metadata_hook(page, context, **kwargs):
|
||||
"""Extract page metadata"""
|
||||
print("[HOOK] Extracting metadata")
|
||||
|
||||
metadata = await page.evaluate('''() => {
|
||||
const getMeta = (name) => {
|
||||
const el = document.querySelector(`meta[name="${name}"], meta[property="${name}"]`);
|
||||
return el ? el.getAttribute('content') : null;
|
||||
};
|
||||
|
||||
return {
|
||||
title: document.title,
|
||||
description: getMeta('description'),
|
||||
author: getMeta('author'),
|
||||
keywords: getMeta('keywords'),
|
||||
};
|
||||
}''')
|
||||
|
||||
print(f"[HOOK] Metadata: {metadata}")
|
||||
|
||||
# Infinite scroll
|
||||
for i in range(3):
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight);")
|
||||
await page.wait_for_timeout(1000)
|
||||
print(f"[HOOK] Scroll {i+1}/3")
|
||||
|
||||
return page
|
||||
|
||||
|
||||
# --- Multi-URL Hooks ---
|
||||
async def url_specific_hook(page, context, url, **kwargs):
|
||||
"""Apply URL-specific logic"""
|
||||
print(f"[HOOK] Processing URL: {url}")
|
||||
|
||||
# URL-specific headers
|
||||
if 'html' in url:
|
||||
await page.set_extra_http_headers({"X-Type": "HTML"})
|
||||
elif 'json' in url:
|
||||
await page.set_extra_http_headers({"X-Type": "JSON"})
|
||||
|
||||
return page
|
||||
|
||||
|
||||
async def track_progress_hook(page, context, url, response, **kwargs):
|
||||
"""Track crawl progress"""
|
||||
status = response.status if response else 'unknown'
|
||||
print(f"[HOOK] Loaded {url} - Status: {status}")
|
||||
return page
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test Functions
|
||||
# ============================================================================
|
||||
|
||||
async def test_all_hooks_comprehensive():
|
||||
"""Test all 8 hook types"""
|
||||
print("=" * 70)
|
||||
print("Test 1: All Hooks Comprehensive Demo (Docker Client)")
|
||||
print("=" * 70)
|
||||
|
||||
async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client:
|
||||
print("\nCrawling with all 8 hooks...")
|
||||
|
||||
# Define hooks with function objects
|
||||
hooks = {
|
||||
"on_browser_created": browser_created_hook,
|
||||
"on_page_context_created": page_context_hook,
|
||||
"on_user_agent_updated": user_agent_hook,
|
||||
"before_goto": before_goto_hook,
|
||||
"after_goto": after_goto_hook,
|
||||
"on_execution_started": execution_started_hook,
|
||||
"before_retrieve_html": before_retrieve_hook,
|
||||
"before_return_html": before_return_hook
|
||||
}
|
||||
|
||||
result = await client.crawl(
|
||||
["https://httpbin.org/html"],
|
||||
hooks=hooks,
|
||||
hooks_timeout=30
|
||||
)
|
||||
|
||||
print("\n✅ Success!")
|
||||
print(f" URL: {result.url}")
|
||||
print(f" Success: {result.success}")
|
||||
print(f" HTML: {len(result.html)} chars")
|
||||
|
||||
|
||||
async def test_authentication_workflow():
|
||||
"""Test authentication with hooks"""
|
||||
print("\n" + "=" * 70)
|
||||
print("Test 2: Authentication Workflow (Docker Client)")
|
||||
print("=" * 70)
|
||||
|
||||
async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client:
|
||||
print("\nTesting authentication...")
|
||||
|
||||
hooks = {
|
||||
"on_page_context_created": auth_context_hook,
|
||||
"before_goto": auth_headers_hook
|
||||
}
|
||||
|
||||
result = await client.crawl(
|
||||
["https://httpbin.org/basic-auth/user/passwd"],
|
||||
hooks=hooks,
|
||||
hooks_timeout=15
|
||||
)
|
||||
|
||||
print("\n✅ Authentication completed")
|
||||
|
||||
if result.success:
|
||||
if '"authenticated"' in result.html and 'true' in result.html:
|
||||
print(" ✅ Basic auth successful!")
|
||||
else:
|
||||
print(" ⚠️ Auth status unclear")
|
||||
else:
|
||||
print(f" ❌ Failed: {result.error_message}")
|
||||
|
||||
|
||||
async def test_performance_optimization():
|
||||
"""Test performance optimization"""
|
||||
print("\n" + "=" * 70)
|
||||
print("Test 3: Performance Optimization (Docker Client)")
|
||||
print("=" * 70)
|
||||
|
||||
async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client:
|
||||
print("\nTesting performance hooks...")
|
||||
|
||||
hooks = {
|
||||
"on_page_context_created": performance_hook,
|
||||
"before_retrieve_html": cleanup_hook
|
||||
}
|
||||
|
||||
result = await client.crawl(
|
||||
["https://httpbin.org/html"],
|
||||
hooks=hooks,
|
||||
hooks_timeout=10
|
||||
)
|
||||
|
||||
print("\n✅ Optimization completed")
|
||||
print(f" HTML size: {len(result.html):,} chars")
|
||||
print(" Resources blocked, ads removed")
|
||||
|
||||
|
||||
async def test_content_extraction():
|
||||
"""Test content extraction"""
|
||||
print("\n" + "=" * 70)
|
||||
print("Test 4: Content Extraction (Docker Client)")
|
||||
print("=" * 70)
|
||||
|
||||
async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client:
|
||||
print("\nTesting extraction hooks...")
|
||||
|
||||
hooks = {
|
||||
"after_goto": wait_dynamic_content_hook,
|
||||
"before_retrieve_html": extract_metadata_hook
|
||||
}
|
||||
|
||||
result = await client.crawl(
|
||||
["https://www.kidocode.com/"],
|
||||
hooks=hooks,
|
||||
hooks_timeout=20
|
||||
)
|
||||
|
||||
print("\n✅ Extraction completed")
|
||||
print(f" URL: {result.url}")
|
||||
print(f" Success: {result.success}")
|
||||
print(f" Metadata: {result.metadata}")
|
||||
|
||||
|
||||
async def test_multi_url_crawl():
|
||||
"""Test hooks with multiple URLs"""
|
||||
print("\n" + "=" * 70)
|
||||
print("Test 5: Multi-URL Crawl (Docker Client)")
|
||||
print("=" * 70)
|
||||
|
||||
async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client:
|
||||
print("\nCrawling multiple URLs...")
|
||||
|
||||
hooks = {
|
||||
"before_goto": url_specific_hook,
|
||||
"after_goto": track_progress_hook
|
||||
}
|
||||
|
||||
results = await client.crawl(
|
||||
[
|
||||
"https://httpbin.org/html",
|
||||
"https://httpbin.org/json",
|
||||
"https://httpbin.org/xml"
|
||||
],
|
||||
hooks=hooks,
|
||||
hooks_timeout=15
|
||||
)
|
||||
|
||||
print("\n✅ Multi-URL crawl completed")
|
||||
print(f"\n Crawled {len(results)} URLs:")
|
||||
for i, result in enumerate(results, 1):
|
||||
status = "✅" if result.success else "❌"
|
||||
print(f" {status} {i}. {result.url}")
|
||||
|
||||
|
||||
async def test_reusable_hook_library():
|
||||
"""Test using reusable hook library"""
|
||||
print("\n" + "=" * 70)
|
||||
print("Test 6: Reusable Hook Library (Docker Client)")
|
||||
print("=" * 70)
|
||||
|
||||
# Create a library of reusable hooks
|
||||
class HookLibrary:
|
||||
@staticmethod
|
||||
async def block_images(page, context, **kwargs):
|
||||
"""Block all images"""
|
||||
await context.route("**/*.{png,jpg,jpeg,gif}", lambda r: r.abort())
|
||||
print("[LIBRARY] Images blocked")
|
||||
return page
|
||||
|
||||
@staticmethod
|
||||
async def block_analytics(page, context, **kwargs):
|
||||
"""Block analytics"""
|
||||
await context.route("**/analytics/*", lambda r: r.abort())
|
||||
await context.route("**/google-analytics.com/*", lambda r: r.abort())
|
||||
print("[LIBRARY] Analytics blocked")
|
||||
return page
|
||||
|
||||
@staticmethod
|
||||
async def scroll_infinite(page, context, **kwargs):
|
||||
"""Handle infinite scroll"""
|
||||
for i in range(5):
|
||||
prev = await page.evaluate("document.body.scrollHeight")
|
||||
await page.evaluate("window.scrollTo(0, document.body.scrollHeight);")
|
||||
await page.wait_for_timeout(1000)
|
||||
curr = await page.evaluate("document.body.scrollHeight")
|
||||
if curr == prev:
|
||||
break
|
||||
print("[LIBRARY] Infinite scroll complete")
|
||||
return page
|
||||
|
||||
async with Crawl4aiDockerClient(base_url=API_BASE_URL, verbose=False) as client:
|
||||
print("\nUsing hook library...")
|
||||
|
||||
hooks = {
|
||||
"on_page_context_created": HookLibrary.block_images,
|
||||
"before_retrieve_html": HookLibrary.scroll_infinite
|
||||
}
|
||||
|
||||
result = await client.crawl(
|
||||
["https://www.kidocode.com/"],
|
||||
hooks=hooks,
|
||||
hooks_timeout=20
|
||||
)
|
||||
|
||||
print("\n✅ Library hooks completed")
|
||||
print(f" Success: {result.success}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
async def main():
|
||||
"""Run all Docker client hook examples"""
|
||||
print("🔧 Crawl4AI Docker Client - Hooks Examples (Function-Based)")
|
||||
print("Using Python function objects with automatic conversion")
|
||||
print("=" * 70)
|
||||
|
||||
tests = [
|
||||
("All Hooks Demo", test_all_hooks_comprehensive),
|
||||
("Authentication", test_authentication_workflow),
|
||||
("Performance", test_performance_optimization),
|
||||
("Extraction", test_content_extraction),
|
||||
("Multi-URL", test_multi_url_crawl),
|
||||
("Hook Library", test_reusable_hook_library)
|
||||
]
|
||||
|
||||
for i, (name, test_func) in enumerate(tests, 1):
|
||||
try:
|
||||
await test_func()
|
||||
print(f"\n✅ Test {i}/{len(tests)}: {name} completed\n")
|
||||
except Exception as e:
|
||||
print(f"\n❌ Test {i}/{len(tests)}: {name} failed: {e}\n")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
print("=" * 70)
|
||||
print("🎉 All Docker client hook examples completed!")
|
||||
print("\n💡 Key Benefits of Function-Based Hooks:")
|
||||
print(" • Write as regular Python functions")
|
||||
print(" • Full IDE support (autocomplete, types)")
|
||||
print(" • Automatic conversion to API format")
|
||||
print(" • Reusable across projects")
|
||||
print(" • Clean, readable code")
|
||||
print(" • Easy to test and debug")
|
||||
print("=" * 70)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,461 +0,0 @@
|
||||
"""
|
||||
Docker Webhook Example for Crawl4AI
|
||||
|
||||
This example demonstrates how to use webhooks with the Crawl4AI job queue API.
|
||||
Instead of polling for results, webhooks notify your application when jobs complete.
|
||||
|
||||
Supports both:
|
||||
- /crawl/job - Raw crawling with markdown extraction
|
||||
- /llm/job - LLM-powered content extraction
|
||||
|
||||
Prerequisites:
|
||||
1. Crawl4AI Docker container running on localhost:11235
|
||||
2. Flask installed: pip install flask requests
|
||||
3. LLM API key configured in .llm.env (for LLM extraction examples)
|
||||
|
||||
Usage:
|
||||
1. Run this script: python docker_webhook_example.py
|
||||
2. The webhook server will start on http://localhost:8080
|
||||
3. Jobs will be submitted and webhooks will be received automatically
|
||||
"""
|
||||
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
from flask import Flask, request, jsonify
|
||||
from threading import Thread
|
||||
|
||||
# Configuration
|
||||
CRAWL4AI_BASE_URL = "http://localhost:11235"
|
||||
WEBHOOK_BASE_URL = "http://localhost:8080" # Your webhook receiver URL
|
||||
|
||||
# Initialize Flask app for webhook receiver
|
||||
app = Flask(__name__)
|
||||
|
||||
# Store received webhook data for demonstration
|
||||
received_webhooks = []
|
||||
|
||||
|
||||
@app.route('/webhooks/crawl-complete', methods=['POST'])
|
||||
def handle_crawl_webhook():
|
||||
"""
|
||||
Webhook handler that receives notifications when crawl jobs complete.
|
||||
|
||||
Payload structure:
|
||||
{
|
||||
"task_id": "crawl_abc123",
|
||||
"task_type": "crawl",
|
||||
"status": "completed" or "failed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com"],
|
||||
"error": "error message" (only if failed),
|
||||
"data": {...} (only if webhook_data_in_payload=True)
|
||||
}
|
||||
"""
|
||||
payload = request.json
|
||||
print(f"\n{'='*60}")
|
||||
print(f"📬 Webhook received for task: {payload['task_id']}")
|
||||
print(f" Status: {payload['status']}")
|
||||
print(f" Timestamp: {payload['timestamp']}")
|
||||
print(f" URLs: {payload['urls']}")
|
||||
|
||||
if payload['status'] == 'completed':
|
||||
# If data is in payload, process it directly
|
||||
if 'data' in payload:
|
||||
print(f" ✅ Data included in webhook")
|
||||
data = payload['data']
|
||||
# Process the crawl results here
|
||||
for result in data.get('results', []):
|
||||
print(f" - Crawled: {result.get('url')}")
|
||||
print(f" - Markdown length: {len(result.get('markdown', ''))}")
|
||||
else:
|
||||
# Fetch results from API if not included
|
||||
print(f" 📥 Fetching results from API...")
|
||||
task_id = payload['task_id']
|
||||
result_response = requests.get(f"{CRAWL4AI_BASE_URL}/crawl/job/{task_id}")
|
||||
if result_response.ok:
|
||||
data = result_response.json()
|
||||
print(f" ✅ Results fetched successfully")
|
||||
# Process the crawl results here
|
||||
for result in data['result'].get('results', []):
|
||||
print(f" - Crawled: {result.get('url')}")
|
||||
print(f" - Markdown length: {len(result.get('markdown', ''))}")
|
||||
|
||||
elif payload['status'] == 'failed':
|
||||
print(f" ❌ Job failed: {payload.get('error', 'Unknown error')}")
|
||||
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
# Store webhook for demonstration
|
||||
received_webhooks.append(payload)
|
||||
|
||||
# Return 200 OK to acknowledge receipt
|
||||
return jsonify({"status": "received"}), 200
|
||||
|
||||
|
||||
@app.route('/webhooks/llm-complete', methods=['POST'])
|
||||
def handle_llm_webhook():
|
||||
"""
|
||||
Webhook handler that receives notifications when LLM extraction jobs complete.
|
||||
|
||||
Payload structure:
|
||||
{
|
||||
"task_id": "llm_1698765432_12345",
|
||||
"task_type": "llm_extraction",
|
||||
"status": "completed" or "failed",
|
||||
"timestamp": "2025-10-21T10:30:00.000000+00:00",
|
||||
"urls": ["https://example.com/article"],
|
||||
"error": "error message" (only if failed),
|
||||
"data": {"extracted_content": {...}} (only if webhook_data_in_payload=True)
|
||||
}
|
||||
"""
|
||||
payload = request.json
|
||||
print(f"\n{'='*60}")
|
||||
print(f"🤖 LLM Webhook received for task: {payload['task_id']}")
|
||||
print(f" Task Type: {payload['task_type']}")
|
||||
print(f" Status: {payload['status']}")
|
||||
print(f" Timestamp: {payload['timestamp']}")
|
||||
print(f" URL: {payload['urls'][0]}")
|
||||
|
||||
if payload['status'] == 'completed':
|
||||
# If data is in payload, process it directly
|
||||
if 'data' in payload:
|
||||
print(f" ✅ Data included in webhook")
|
||||
data = payload['data']
|
||||
# Webhook wraps extracted content in 'extracted_content' field
|
||||
extracted = data.get('extracted_content', {})
|
||||
print(f" - Extracted content:")
|
||||
print(f" {json.dumps(extracted, indent=8)}")
|
||||
else:
|
||||
# Fetch results from API if not included
|
||||
print(f" 📥 Fetching results from API...")
|
||||
task_id = payload['task_id']
|
||||
result_response = requests.get(f"{CRAWL4AI_BASE_URL}/llm/job/{task_id}")
|
||||
if result_response.ok:
|
||||
data = result_response.json()
|
||||
print(f" ✅ Results fetched successfully")
|
||||
# API returns unwrapped content in 'result' field
|
||||
extracted = data['result']
|
||||
print(f" - Extracted content:")
|
||||
print(f" {json.dumps(extracted, indent=8)}")
|
||||
|
||||
elif payload['status'] == 'failed':
|
||||
print(f" ❌ Job failed: {payload.get('error', 'Unknown error')}")
|
||||
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
# Store webhook for demonstration
|
||||
received_webhooks.append(payload)
|
||||
|
||||
# Return 200 OK to acknowledge receipt
|
||||
return jsonify({"status": "received"}), 200
|
||||
|
||||
|
||||
def start_webhook_server():
|
||||
"""Start the Flask webhook server in a separate thread"""
|
||||
app.run(host='0.0.0.0', port=8080, debug=False, use_reloader=False)
|
||||
|
||||
|
||||
def submit_crawl_job_with_webhook(urls, webhook_url, include_data=False):
|
||||
"""
|
||||
Submit a crawl job with webhook notification.
|
||||
|
||||
Args:
|
||||
urls: List of URLs to crawl
|
||||
webhook_url: URL to receive webhook notifications
|
||||
include_data: Whether to include full results in webhook payload
|
||||
|
||||
Returns:
|
||||
task_id: The job's task identifier
|
||||
"""
|
||||
payload = {
|
||||
"urls": urls,
|
||||
"browser_config": {"headless": True},
|
||||
"crawler_config": {"cache_mode": "bypass"},
|
||||
"webhook_config": {
|
||||
"webhook_url": webhook_url,
|
||||
"webhook_data_in_payload": include_data,
|
||||
# Optional: Add custom headers for authentication
|
||||
# "webhook_headers": {
|
||||
# "X-Webhook-Secret": "your-secret-token"
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
print(f"\n🚀 Submitting crawl job...")
|
||||
print(f" URLs: {urls}")
|
||||
print(f" Webhook: {webhook_url}")
|
||||
print(f" Include data: {include_data}")
|
||||
|
||||
response = requests.post(
|
||||
f"{CRAWL4AI_BASE_URL}/crawl/job",
|
||||
json=payload,
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
task_id = data['task_id']
|
||||
print(f" ✅ Job submitted successfully")
|
||||
print(f" Task ID: {task_id}")
|
||||
return task_id
|
||||
else:
|
||||
print(f" ❌ Failed to submit job: {response.text}")
|
||||
return None
|
||||
|
||||
|
||||
def submit_llm_job_with_webhook(url, query, webhook_url, include_data=False, schema=None, provider=None):
|
||||
"""
|
||||
Submit an LLM extraction job with webhook notification.
|
||||
|
||||
Args:
|
||||
url: URL to extract content from
|
||||
query: Instruction for the LLM (e.g., "Extract article title and author")
|
||||
webhook_url: URL to receive webhook notifications
|
||||
include_data: Whether to include full results in webhook payload
|
||||
schema: Optional JSON schema for structured extraction
|
||||
provider: Optional LLM provider (e.g., "openai/gpt-4o-mini")
|
||||
|
||||
Returns:
|
||||
task_id: The job's task identifier
|
||||
"""
|
||||
payload = {
|
||||
"url": url,
|
||||
"q": query,
|
||||
"cache": False,
|
||||
"webhook_config": {
|
||||
"webhook_url": webhook_url,
|
||||
"webhook_data_in_payload": include_data,
|
||||
# Optional: Add custom headers for authentication
|
||||
# "webhook_headers": {
|
||||
# "X-Webhook-Secret": "your-secret-token"
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
if schema:
|
||||
payload["schema"] = schema
|
||||
|
||||
if provider:
|
||||
payload["provider"] = provider
|
||||
|
||||
print(f"\n🤖 Submitting LLM extraction job...")
|
||||
print(f" URL: {url}")
|
||||
print(f" Query: {query}")
|
||||
print(f" Webhook: {webhook_url}")
|
||||
print(f" Include data: {include_data}")
|
||||
if provider:
|
||||
print(f" Provider: {provider}")
|
||||
|
||||
response = requests.post(
|
||||
f"{CRAWL4AI_BASE_URL}/llm/job",
|
||||
json=payload,
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
task_id = data['task_id']
|
||||
print(f" ✅ Job submitted successfully")
|
||||
print(f" Task ID: {task_id}")
|
||||
return task_id
|
||||
else:
|
||||
print(f" ❌ Failed to submit job: {response.text}")
|
||||
return None
|
||||
|
||||
|
||||
def submit_job_without_webhook(urls):
|
||||
"""
|
||||
Submit a job without webhook (traditional polling approach).
|
||||
|
||||
Args:
|
||||
urls: List of URLs to crawl
|
||||
|
||||
Returns:
|
||||
task_id: The job's task identifier
|
||||
"""
|
||||
payload = {
|
||||
"urls": urls,
|
||||
"browser_config": {"headless": True},
|
||||
"crawler_config": {"cache_mode": "bypass"}
|
||||
}
|
||||
|
||||
print(f"\n🚀 Submitting crawl job (without webhook)...")
|
||||
print(f" URLs: {urls}")
|
||||
|
||||
response = requests.post(
|
||||
f"{CRAWL4AI_BASE_URL}/crawl/job",
|
||||
json=payload
|
||||
)
|
||||
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
task_id = data['task_id']
|
||||
print(f" ✅ Job submitted successfully")
|
||||
print(f" Task ID: {task_id}")
|
||||
return task_id
|
||||
else:
|
||||
print(f" ❌ Failed to submit job: {response.text}")
|
||||
return None
|
||||
|
||||
|
||||
def poll_job_status(task_id, timeout=60):
|
||||
"""
|
||||
Poll for job status (used when webhook is not configured).
|
||||
|
||||
Args:
|
||||
task_id: The job's task identifier
|
||||
timeout: Maximum time to wait in seconds
|
||||
"""
|
||||
print(f"\n⏳ Polling for job status...")
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
response = requests.get(f"{CRAWL4AI_BASE_URL}/crawl/job/{task_id}")
|
||||
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
status = data.get('status', 'unknown')
|
||||
|
||||
if status == 'completed':
|
||||
print(f" ✅ Job completed!")
|
||||
return data
|
||||
elif status == 'failed':
|
||||
print(f" ❌ Job failed: {data.get('error', 'Unknown error')}")
|
||||
return data
|
||||
else:
|
||||
print(f" ⏳ Status: {status}, waiting...")
|
||||
time.sleep(2)
|
||||
else:
|
||||
print(f" ❌ Failed to get status: {response.text}")
|
||||
return None
|
||||
|
||||
print(f" ⏰ Timeout reached")
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the webhook demonstration"""
|
||||
|
||||
# Check if Crawl4AI is running
|
||||
try:
|
||||
health = requests.get(f"{CRAWL4AI_BASE_URL}/health", timeout=5)
|
||||
print(f"✅ Crawl4AI is running: {health.json()}")
|
||||
except:
|
||||
print(f"❌ Cannot connect to Crawl4AI at {CRAWL4AI_BASE_URL}")
|
||||
print(" Please make sure Docker container is running:")
|
||||
print(" docker run -d -p 11235:11235 --name crawl4ai unclecode/crawl4ai:latest")
|
||||
return
|
||||
|
||||
# Start webhook server in background thread
|
||||
print(f"\n🌐 Starting webhook server at {WEBHOOK_BASE_URL}...")
|
||||
webhook_thread = Thread(target=start_webhook_server, daemon=True)
|
||||
webhook_thread.start()
|
||||
time.sleep(2) # Give server time to start
|
||||
|
||||
# Example 1: Job with webhook (notification only, fetch data separately)
|
||||
print(f"\n{'='*60}")
|
||||
print("Example 1: Webhook Notification Only")
|
||||
print(f"{'='*60}")
|
||||
task_id_1 = submit_crawl_job_with_webhook(
|
||||
urls=["https://example.com"],
|
||||
webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/crawl-complete",
|
||||
include_data=False
|
||||
)
|
||||
|
||||
# Example 2: Job with webhook (data included in payload)
|
||||
time.sleep(5) # Wait a bit between requests
|
||||
print(f"\n{'='*60}")
|
||||
print("Example 2: Webhook with Full Data")
|
||||
print(f"{'='*60}")
|
||||
task_id_2 = submit_crawl_job_with_webhook(
|
||||
urls=["https://www.python.org"],
|
||||
webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/crawl-complete",
|
||||
include_data=True
|
||||
)
|
||||
|
||||
# Example 3: LLM extraction with webhook (notification only)
|
||||
time.sleep(5) # Wait a bit between requests
|
||||
print(f"\n{'='*60}")
|
||||
print("Example 3: LLM Extraction with Webhook (Notification Only)")
|
||||
print(f"{'='*60}")
|
||||
task_id_3 = submit_llm_job_with_webhook(
|
||||
url="https://www.example.com",
|
||||
query="Extract the main heading and description from this page.",
|
||||
webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/llm-complete",
|
||||
include_data=False,
|
||||
provider="openai/gpt-4o-mini"
|
||||
)
|
||||
|
||||
# Example 4: LLM extraction with webhook (data included + schema)
|
||||
time.sleep(5) # Wait a bit between requests
|
||||
print(f"\n{'='*60}")
|
||||
print("Example 4: LLM Extraction with Schema and Full Data")
|
||||
print(f"{'='*60}")
|
||||
|
||||
# Define a schema for structured extraction
|
||||
schema = json.dumps({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {"type": "string", "description": "Page title"},
|
||||
"description": {"type": "string", "description": "Page description"}
|
||||
},
|
||||
"required": ["title"]
|
||||
})
|
||||
|
||||
task_id_4 = submit_llm_job_with_webhook(
|
||||
url="https://www.python.org",
|
||||
query="Extract the title and description of this website",
|
||||
webhook_url=f"{WEBHOOK_BASE_URL}/webhooks/llm-complete",
|
||||
include_data=True,
|
||||
schema=schema,
|
||||
provider="openai/gpt-4o-mini"
|
||||
)
|
||||
|
||||
# Example 5: Traditional polling (no webhook)
|
||||
time.sleep(5) # Wait a bit between requests
|
||||
print(f"\n{'='*60}")
|
||||
print("Example 5: Traditional Polling (No Webhook)")
|
||||
print(f"{'='*60}")
|
||||
task_id_5 = submit_job_without_webhook(
|
||||
urls=["https://github.com"]
|
||||
)
|
||||
if task_id_5:
|
||||
result = poll_job_status(task_id_5)
|
||||
if result and result.get('status') == 'completed':
|
||||
print(f" ✅ Results retrieved via polling")
|
||||
|
||||
# Wait for webhooks to arrive
|
||||
print(f"\n⏳ Waiting for webhooks to be received...")
|
||||
time.sleep(30) # Give jobs time to complete and webhooks to arrive (longer for LLM)
|
||||
|
||||
# Summary
|
||||
print(f"\n{'='*60}")
|
||||
print("Summary")
|
||||
print(f"{'='*60}")
|
||||
print(f"Total webhooks received: {len(received_webhooks)}")
|
||||
|
||||
crawl_webhooks = [w for w in received_webhooks if w['task_type'] == 'crawl']
|
||||
llm_webhooks = [w for w in received_webhooks if w['task_type'] == 'llm_extraction']
|
||||
|
||||
print(f"\n📊 Breakdown:")
|
||||
print(f" - Crawl webhooks: {len(crawl_webhooks)}")
|
||||
print(f" - LLM extraction webhooks: {len(llm_webhooks)}")
|
||||
|
||||
print(f"\n📋 Details:")
|
||||
for i, webhook in enumerate(received_webhooks, 1):
|
||||
task_type = webhook['task_type']
|
||||
icon = "🕷️" if task_type == "crawl" else "🤖"
|
||||
print(f"{i}. {icon} Task {webhook['task_id']}: {webhook['status']} ({task_type})")
|
||||
|
||||
print(f"\n✅ Demo completed!")
|
||||
print(f"\n💡 Pro tips:")
|
||||
print(f" - In production, your webhook URL should be publicly accessible")
|
||||
print(f" (e.g., https://myapp.com/webhooks) or use ngrok for testing")
|
||||
print(f" - Both /crawl/job and /llm/job support the same webhook configuration")
|
||||
print(f" - Use webhook_data_in_payload=true to get results directly in the webhook")
|
||||
print(f" - LLM jobs may take longer, adjust timeouts accordingly")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
315
docs/examples/link_analysis_example.py
Normal file
315
docs/examples/link_analysis_example.py
Normal file
@@ -0,0 +1,315 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Link Analysis Example
|
||||
====================
|
||||
|
||||
This example demonstrates how to use the new /links/analyze endpoint
|
||||
to extract, analyze, and score links from web pages.
|
||||
|
||||
Requirements:
|
||||
- Crawl4AI server running on localhost:11234
|
||||
- requests library: pip install requests
|
||||
"""
|
||||
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
from typing import Dict, Any, List
|
||||
|
||||
|
||||
class LinkAnalyzer:
|
||||
"""Simple client for the link analysis endpoint"""
|
||||
|
||||
def __init__(self, base_url: str = "http://localhost:11234", token: str = None):
|
||||
self.base_url = base_url
|
||||
self.token = token or self._get_test_token()
|
||||
|
||||
def _get_test_token(self) -> str:
|
||||
"""Get a test token (for development only)"""
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.base_url}/token",
|
||||
json={"email": "test@example.com"},
|
||||
timeout=10
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.json()["access_token"]
|
||||
except:
|
||||
pass
|
||||
return "test-token" # Fallback for local testing
|
||||
|
||||
def analyze_links(self, url: str, config: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""Analyze links on a webpage"""
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
if self.token and self.token != "test-token":
|
||||
headers["Authorization"] = f"Bearer {self.token}"
|
||||
|
||||
data = {"url": url}
|
||||
if config:
|
||||
data["config"] = config
|
||||
|
||||
response = requests.post(
|
||||
f"{self.base_url}/links/analyze",
|
||||
headers=headers,
|
||||
json=data,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def print_summary(self, result: Dict[str, Any]):
|
||||
"""Print a summary of link analysis results"""
|
||||
print("\n" + "="*60)
|
||||
print("📊 LINK ANALYSIS SUMMARY")
|
||||
print("="*60)
|
||||
|
||||
total_links = sum(len(links) for links in result.values())
|
||||
print(f"Total links found: {total_links}")
|
||||
|
||||
for category, links in result.items():
|
||||
if links:
|
||||
print(f"\n📂 {category.upper()}: {len(links)} links")
|
||||
|
||||
# Show top 3 links by score
|
||||
top_links = sorted(links, key=lambda x: x.get('total_score', 0), reverse=True)[:3]
|
||||
for i, link in enumerate(top_links, 1):
|
||||
score = link.get('total_score', 0)
|
||||
text = link.get('text', 'No text')[:50]
|
||||
url = link.get('href', 'No URL')[:60]
|
||||
print(f" {i}. [{score:.2f}] {text} → {url}")
|
||||
|
||||
|
||||
def example_1_basic_analysis():
|
||||
"""Example 1: Basic link analysis"""
|
||||
print("\n🔍 Example 1: Basic Link Analysis")
|
||||
print("-" * 40)
|
||||
|
||||
analyzer = LinkAnalyzer()
|
||||
|
||||
# Analyze a simple test page
|
||||
url = "https://httpbin.org/links/10"
|
||||
print(f"Analyzing: {url}")
|
||||
|
||||
try:
|
||||
result = analyzer.analyze_links(url)
|
||||
analyzer.print_summary(result)
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def example_2_custom_config():
|
||||
"""Example 2: Analysis with custom configuration"""
|
||||
print("\n🔍 Example 2: Custom Configuration")
|
||||
print("-" * 40)
|
||||
|
||||
analyzer = LinkAnalyzer()
|
||||
|
||||
# Custom configuration
|
||||
config = {
|
||||
"include_internal": True,
|
||||
"include_external": True,
|
||||
"max_links": 50,
|
||||
"timeout": 10,
|
||||
"verbose": True
|
||||
}
|
||||
|
||||
url = "https://httpbin.org/links/10"
|
||||
print(f"Analyzing with custom config: {url}")
|
||||
print(f"Config: {json.dumps(config, indent=2)}")
|
||||
|
||||
try:
|
||||
result = analyzer.analyze_links(url, config)
|
||||
analyzer.print_summary(result)
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def example_3_real_world_site():
|
||||
"""Example 3: Analyzing a real website"""
|
||||
print("\n🔍 Example 3: Real Website Analysis")
|
||||
print("-" * 40)
|
||||
|
||||
analyzer = LinkAnalyzer()
|
||||
|
||||
# Analyze Python official website
|
||||
url = "https://www.python.org"
|
||||
print(f"Analyzing real website: {url}")
|
||||
print("This may take a moment...")
|
||||
|
||||
try:
|
||||
result = analyzer.analyze_links(url)
|
||||
analyzer.print_summary(result)
|
||||
|
||||
# Additional analysis
|
||||
print("\n📈 DETAILED ANALYSIS")
|
||||
print("-" * 20)
|
||||
|
||||
# Find external links with highest scores
|
||||
external_links = result.get('external', [])
|
||||
if external_links:
|
||||
top_external = sorted(external_links, key=lambda x: x.get('total_score', 0), reverse=True)[:5]
|
||||
print("\n🌐 Top External Links:")
|
||||
for link in top_external:
|
||||
print(f" • {link.get('text', 'N/A')} (score: {link.get('total_score', 0):.2f})")
|
||||
print(f" {link.get('href', 'N/A')}")
|
||||
|
||||
# Find internal links
|
||||
internal_links = result.get('internal', [])
|
||||
if internal_links:
|
||||
top_internal = sorted(internal_links, key=lambda x: x.get('total_score', 0), reverse=True)[:5]
|
||||
print("\n🏠 Top Internal Links:")
|
||||
for link in top_internal:
|
||||
print(f" • {link.get('text', 'N/A')} (score: {link.get('total_score', 0):.2f})")
|
||||
print(f" {link.get('href', 'N/A')}")
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
print("⚠️ This example may fail due to network issues")
|
||||
return None
|
||||
|
||||
|
||||
def example_4_comparative_analysis():
|
||||
"""Example 4: Comparing link structures across sites"""
|
||||
print("\n🔍 Example 4: Comparative Analysis")
|
||||
print("-" * 40)
|
||||
|
||||
analyzer = LinkAnalyzer()
|
||||
|
||||
sites = [
|
||||
("https://httpbin.org/links/10", "Test Page 1"),
|
||||
("https://httpbin.org/links/5", "Test Page 2")
|
||||
]
|
||||
|
||||
results = {}
|
||||
|
||||
for url, name in sites:
|
||||
print(f"\nAnalyzing: {name}")
|
||||
try:
|
||||
result = analyzer.analyze_links(url)
|
||||
results[name] = result
|
||||
|
||||
total_links = sum(len(links) for links in result.values())
|
||||
categories = len([cat for cat, links in result.items() if links])
|
||||
print(f" Links: {total_links}, Categories: {categories}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error: {e}")
|
||||
|
||||
# Compare results
|
||||
if len(results) > 1:
|
||||
print("\n📊 COMPARISON")
|
||||
print("-" * 15)
|
||||
|
||||
for name, result in results.items():
|
||||
total = sum(len(links) for links in result.values())
|
||||
print(f"{name}: {total} total links")
|
||||
|
||||
# Calculate average scores
|
||||
all_scores = []
|
||||
for links in result.values():
|
||||
for link in links:
|
||||
all_scores.append(link.get('total_score', 0))
|
||||
|
||||
if all_scores:
|
||||
avg_score = sum(all_scores) / len(all_scores)
|
||||
print(f" Average link score: {avg_score:.3f}")
|
||||
|
||||
|
||||
def example_5_advanced_filtering():
|
||||
"""Example 5: Advanced filtering and analysis"""
|
||||
print("\n🔍 Example 5: Advanced Filtering")
|
||||
print("-" * 40)
|
||||
|
||||
analyzer = LinkAnalyzer()
|
||||
|
||||
url = "https://httpbin.org/links/10"
|
||||
|
||||
try:
|
||||
result = analyzer.analyze_links(url)
|
||||
|
||||
# Filter links by score
|
||||
min_score = 0.5
|
||||
high_quality_links = {}
|
||||
|
||||
for category, links in result.items():
|
||||
if links:
|
||||
filtered = [link for link in links if link.get('total_score', 0) >= min_score]
|
||||
if filtered:
|
||||
high_quality_links[category] = filtered
|
||||
|
||||
print(f"\n🎯 High-quality links (score >= {min_score}):")
|
||||
total_high_quality = sum(len(links) for links in high_quality_links.values())
|
||||
print(f"Total: {total_high_quality} links")
|
||||
|
||||
for category, links in high_quality_links.items():
|
||||
print(f"\n{category.upper()}:")
|
||||
for link in links:
|
||||
score = link.get('total_score', 0)
|
||||
text = link.get('text', 'No text')
|
||||
print(f" • [{score:.2f}] {text}")
|
||||
|
||||
# Extract unique domains from external links
|
||||
external_links = result.get('external', [])
|
||||
if external_links:
|
||||
domains = set()
|
||||
for link in external_links:
|
||||
url = link.get('href', '')
|
||||
if '://' in url:
|
||||
domain = url.split('://')[1].split('/')[0]
|
||||
domains.add(domain)
|
||||
|
||||
print(f"\n🌐 Unique external domains: {len(domains)}")
|
||||
for domain in sorted(domains):
|
||||
print(f" • {domain}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all examples"""
|
||||
print("🚀 Link Analysis Examples")
|
||||
print("=" * 50)
|
||||
print("Make sure the Crawl4AI server is running on localhost:11234")
|
||||
print()
|
||||
|
||||
examples = [
|
||||
example_1_basic_analysis,
|
||||
example_2_custom_config,
|
||||
example_3_real_world_site,
|
||||
example_4_comparative_analysis,
|
||||
example_5_advanced_filtering
|
||||
]
|
||||
|
||||
for i, example_func in enumerate(examples, 1):
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Running Example {i}")
|
||||
print('='*60)
|
||||
|
||||
try:
|
||||
example_func()
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏹️ Example interrupted by user")
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"\n❌ Example {i} failed: {e}")
|
||||
|
||||
if i < len(examples):
|
||||
print("\n⏳ Press Enter to continue to next example...")
|
||||
try:
|
||||
input()
|
||||
except KeyboardInterrupt:
|
||||
break
|
||||
|
||||
print("\n🎉 Examples completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,48 +0,0 @@
|
||||
"""
|
||||
NSTProxy Integration Examples for crawl4ai
|
||||
------------------------------------------
|
||||
|
||||
NSTProxy is a premium residential proxy provider.
|
||||
👉 Purchase Proxies: https://nstproxy.com
|
||||
💰 Use coupon code "crawl4ai" for 10% off your plan.
|
||||
|
||||
"""
|
||||
import asyncio, requests
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
|
||||
|
||||
async def main():
|
||||
"""
|
||||
Example: Dynamically fetch a proxy from NSTProxy API before crawling.
|
||||
"""
|
||||
NST_TOKEN = "YOUR_NST_PROXY_TOKEN" # Get from https://app.nstproxy.com/profile
|
||||
CHANNEL_ID = "YOUR_NST_PROXY_CHANNEL_ID" # Your NSTProxy Channel ID
|
||||
country = "ANY" # e.g. "ANY", "US", "DE"
|
||||
|
||||
# Fetch proxy from NSTProxy API
|
||||
api_url = (
|
||||
f"https://api.nstproxy.com/api/v1/generate/apiproxies"
|
||||
f"?fType=2&channelId={CHANNEL_ID}&country={country}"
|
||||
f"&protocol=http&sessionDuration=10&count=1&token={NST_TOKEN}"
|
||||
)
|
||||
response = requests.get(api_url, timeout=10).json()
|
||||
proxy = response[0]
|
||||
|
||||
ip = proxy.get("ip")
|
||||
port = proxy.get("port")
|
||||
username = proxy.get("username", "")
|
||||
password = proxy.get("password", "")
|
||||
|
||||
browser_config = BrowserConfig(proxy_config={
|
||||
"server": f"http://{ip}:{port}",
|
||||
"username": username,
|
||||
"password": password,
|
||||
})
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print("[API Proxy] Status:", result.status_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,31 +0,0 @@
|
||||
"""
|
||||
NSTProxy Integration Examples for crawl4ai
|
||||
------------------------------------------
|
||||
|
||||
NSTProxy is a premium residential proxy provider.
|
||||
👉 Purchase Proxies: https://nstproxy.com
|
||||
💰 Use coupon code "crawl4ai" for 10% off your plan.
|
||||
|
||||
"""
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
|
||||
|
||||
async def main():
|
||||
"""
|
||||
Example: Use NSTProxy with manual username/password authentication.
|
||||
"""
|
||||
|
||||
browser_config = BrowserConfig(proxy_config={
|
||||
"server": "http://gate.nstproxy.io:24125",
|
||||
"username": "your_username",
|
||||
"password": "your_password",
|
||||
})
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print("[Auth Proxy] Status:", result.status_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,29 +0,0 @@
|
||||
"""
|
||||
NSTProxy Integration Examples for crawl4ai
|
||||
------------------------------------------
|
||||
|
||||
NSTProxy is a premium residential proxy provider.
|
||||
👉 Purchase Proxies: https://nstproxy.com
|
||||
💰 Use coupon code "crawl4ai" for 10% off your plan.
|
||||
|
||||
"""
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
|
||||
|
||||
async def main():
|
||||
# Using HTTP proxy
|
||||
browser_config = BrowserConfig(proxy_config={"server": "http://gate.nstproxy.io:24125"})
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print("[HTTP Proxy] Status:", result.status_code)
|
||||
|
||||
# Using SOCKS proxy
|
||||
browser_config = BrowserConfig(proxy_config={"server": "socks5://gate.nstproxy.io:24125"})
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print("[SOCKS5 Proxy] Status:", result.status_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,39 +0,0 @@
|
||||
"""
|
||||
NSTProxy Integration Examples for crawl4ai
|
||||
------------------------------------------
|
||||
|
||||
NSTProxy is a premium residential proxy provider.
|
||||
👉 Purchase Proxies: https://nstproxy.com
|
||||
💰 Use coupon code "crawl4ai" for 10% off your plan.
|
||||
|
||||
"""
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
|
||||
|
||||
async def main():
|
||||
"""
|
||||
Example: Using NSTProxy with AsyncWebCrawler.
|
||||
"""
|
||||
|
||||
NST_TOKEN = "YOUR_NST_PROXY_TOKEN" # Get from https://app.nstproxy.com/profile
|
||||
CHANNEL_ID = "YOUR_NST_PROXY_CHANNEL_ID" # Your NSTProxy Channel ID
|
||||
|
||||
browser_config = BrowserConfig()
|
||||
browser_config.set_nstproxy(
|
||||
token=NST_TOKEN,
|
||||
channel_id=CHANNEL_ID,
|
||||
country="ANY", # e.g. "US", "JP", or "ANY"
|
||||
state="", # optional, leave empty if not needed
|
||||
city="", # optional, leave empty if not needed
|
||||
session_duration=0 # Session duration in minutes,0 = rotate on every request
|
||||
)
|
||||
|
||||
# === Run crawler ===
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print("[Nstproxy] Status:", result.status_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
626
docs/examples/table-extraction-api.md
Normal file
626
docs/examples/table-extraction-api.md
Normal file
@@ -0,0 +1,626 @@
|
||||
# Table Extraction API Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The Crawl4AI Docker Server provides powerful table extraction capabilities through both **integrated** and **dedicated** endpoints. Extract structured data from HTML tables using multiple strategies: default (fast regex-based), LLM-powered (semantic understanding), or financial (specialized for financial data).
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Quick Start](#quick-start)
|
||||
2. [Extraction Strategies](#extraction-strategies)
|
||||
3. [Integrated Extraction (with /crawl)](#integrated-extraction)
|
||||
4. [Dedicated Endpoints (/tables)](#dedicated-endpoints)
|
||||
5. [Batch Processing](#batch-processing)
|
||||
6. [Configuration Options](#configuration-options)
|
||||
7. [Response Format](#response-format)
|
||||
8. [Error Handling](#error-handling)
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Extract Tables During Crawl
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com/financial-data"],
|
||||
"table_extraction": {
|
||||
"strategy": "default"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Extract Tables from HTML
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/tables/extract \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"html": "<table><tr><th>Name</th><th>Value</th></tr><tr><td>A</td><td>100</td></tr></table>",
|
||||
"config": {
|
||||
"strategy": "default"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Extraction Strategies
|
||||
|
||||
### 1. **Default Strategy** (Fast, Regex-Based)
|
||||
|
||||
Best for general-purpose table extraction with high performance.
|
||||
|
||||
```json
|
||||
{
|
||||
"strategy": "default"
|
||||
}
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- General web scraping
|
||||
- Simple data tables
|
||||
- High-volume extraction
|
||||
|
||||
### 2. **LLM Strategy** (AI-Powered)
|
||||
|
||||
Uses Large Language Models for semantic understanding and complex table structures.
|
||||
|
||||
```json
|
||||
{
|
||||
"strategy": "llm",
|
||||
"llm_provider": "openai",
|
||||
"llm_model": "gpt-4",
|
||||
"llm_api_key": "your-api-key",
|
||||
"llm_prompt": "Extract and structure the financial data"
|
||||
}
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- Complex nested tables
|
||||
- Tables with irregular structure
|
||||
- Semantic data extraction
|
||||
|
||||
**Supported Providers:**
|
||||
- `openai` (GPT-3.5, GPT-4)
|
||||
- `anthropic` (Claude)
|
||||
- `huggingface` (Open models)
|
||||
|
||||
### 3. **Financial Strategy** (Specialized)
|
||||
|
||||
Optimized for financial tables with proper numerical formatting.
|
||||
|
||||
```json
|
||||
{
|
||||
"strategy": "financial",
|
||||
"preserve_formatting": true,
|
||||
"extract_metadata": true
|
||||
}
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- Stock data
|
||||
- Financial statements
|
||||
- Accounting tables
|
||||
- Price lists
|
||||
|
||||
### 4. **None Strategy** (No Extraction)
|
||||
|
||||
Disables table extraction.
|
||||
|
||||
```json
|
||||
{
|
||||
"strategy": "none"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Integrated Extraction
|
||||
|
||||
Add table extraction to any crawl request by including the `table_extraction` configuration.
|
||||
|
||||
### Example: Basic Integration
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
response = requests.post("http://localhost:11235/crawl", json={
|
||||
"urls": ["https://finance.yahoo.com/quote/AAPL"],
|
||||
"browser_config": {
|
||||
"headless": True
|
||||
},
|
||||
"crawler_config": {
|
||||
"wait_until": "networkidle"
|
||||
},
|
||||
"table_extraction": {
|
||||
"strategy": "financial",
|
||||
"preserve_formatting": True
|
||||
}
|
||||
})
|
||||
|
||||
data = response.json()
|
||||
for result in data["results"]:
|
||||
if result["success"]:
|
||||
print(f"Found {len(result.get('tables', []))} tables")
|
||||
for table in result.get("tables", []):
|
||||
print(f"Table: {table['headers']}")
|
||||
```
|
||||
|
||||
### Example: Multiple URLs with Table Extraction
|
||||
|
||||
```javascript
|
||||
// Node.js example
|
||||
const axios = require('axios');
|
||||
|
||||
const response = await axios.post('http://localhost:11235/crawl', {
|
||||
urls: [
|
||||
'https://example.com/page1',
|
||||
'https://example.com/page2',
|
||||
'https://example.com/page3'
|
||||
],
|
||||
table_extraction: {
|
||||
strategy: 'default'
|
||||
}
|
||||
});
|
||||
|
||||
response.data.results.forEach((result, index) => {
|
||||
console.log(`Page ${index + 1}:`);
|
||||
console.log(` Tables found: ${result.tables?.length || 0}`);
|
||||
});
|
||||
```
|
||||
|
||||
### Example: LLM-Based Extraction with Custom Prompt
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:11235/crawl \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"urls": ["https://example.com/complex-data"],
|
||||
"table_extraction": {
|
||||
"strategy": "llm",
|
||||
"llm_provider": "openai",
|
||||
"llm_model": "gpt-4",
|
||||
"llm_api_key": "sk-...",
|
||||
"llm_prompt": "Extract product pricing information, including discounts and availability"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Dedicated Endpoints
|
||||
|
||||
### `/tables/extract` - Single Extraction
|
||||
|
||||
Extract tables from HTML content or by fetching a URL.
|
||||
|
||||
#### Extract from HTML
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
html_content = """
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Product</th><th>Price</th><th>Stock</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Widget A</td><td>$19.99</td><td>In Stock</td></tr>
|
||||
<tr><td>Widget B</td><td>$29.99</td><td>Out of Stock</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
"""
|
||||
|
||||
response = requests.post("http://localhost:11235/tables/extract", json={
|
||||
"html": html_content,
|
||||
"config": {
|
||||
"strategy": "default"
|
||||
}
|
||||
})
|
||||
|
||||
data = response.json()
|
||||
print(f"Success: {data['success']}")
|
||||
print(f"Tables found: {data['table_count']}")
|
||||
print(f"Strategy used: {data['strategy']}")
|
||||
|
||||
for table in data['tables']:
|
||||
print("\nTable:")
|
||||
print(f" Headers: {table['headers']}")
|
||||
print(f" Rows: {len(table['rows'])}")
|
||||
```
|
||||
|
||||
#### Extract from URL
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11235/tables/extract", json={
|
||||
"url": "https://example.com/data-page",
|
||||
"config": {
|
||||
"strategy": "financial",
|
||||
"preserve_formatting": True
|
||||
}
|
||||
})
|
||||
|
||||
data = response.json()
|
||||
for table in data['tables']:
|
||||
print(f"Table with {len(table['rows'])} rows")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Batch Processing
|
||||
|
||||
### `/tables/extract/batch` - Batch Extraction
|
||||
|
||||
Extract tables from multiple HTML contents or URLs in a single request.
|
||||
|
||||
#### Batch from HTML List
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
html_contents = [
|
||||
"<table><tr><th>A</th></tr><tr><td>1</td></tr></table>",
|
||||
"<table><tr><th>B</th></tr><tr><td>2</td></tr></table>",
|
||||
"<table><tr><th>C</th></tr><tr><td>3</td></tr></table>",
|
||||
]
|
||||
|
||||
response = requests.post("http://localhost:11235/tables/extract/batch", json={
|
||||
"html_list": html_contents,
|
||||
"config": {
|
||||
"strategy": "default"
|
||||
}
|
||||
})
|
||||
|
||||
data = response.json()
|
||||
print(f"Total processed: {data['summary']['total_processed']}")
|
||||
print(f"Successful: {data['summary']['successful']}")
|
||||
print(f"Failed: {data['summary']['failed']}")
|
||||
print(f"Total tables: {data['summary']['total_tables_extracted']}")
|
||||
|
||||
for result in data['results']:
|
||||
if result['success']:
|
||||
print(f" {result['source']}: {result['table_count']} tables")
|
||||
else:
|
||||
print(f" {result['source']}: Error - {result['error']}")
|
||||
```
|
||||
|
||||
#### Batch from URL List
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11235/tables/extract/batch", json={
|
||||
"url_list": [
|
||||
"https://example.com/page1",
|
||||
"https://example.com/page2",
|
||||
"https://example.com/page3",
|
||||
],
|
||||
"config": {
|
||||
"strategy": "financial"
|
||||
}
|
||||
})
|
||||
|
||||
data = response.json()
|
||||
for result in data['results']:
|
||||
print(f"URL: {result['source']}")
|
||||
if result['success']:
|
||||
print(f" ✓ Found {result['table_count']} tables")
|
||||
else:
|
||||
print(f" ✗ Failed: {result['error']}")
|
||||
```
|
||||
|
||||
#### Mixed Batch (HTML + URLs)
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11235/tables/extract/batch", json={
|
||||
"html_list": [
|
||||
"<table><tr><th>Local</th></tr></table>"
|
||||
],
|
||||
"url_list": [
|
||||
"https://example.com/remote"
|
||||
],
|
||||
"config": {
|
||||
"strategy": "default"
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
**Batch Limits:**
|
||||
- Maximum 50 items per batch request
|
||||
- Items are processed independently (partial failures allowed)
|
||||
|
||||
---
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### TableExtractionConfig
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------|------|---------|-------------|
|
||||
| `strategy` | `"none"` \| `"default"` \| `"llm"` \| `"financial"` | `"default"` | Extraction strategy to use |
|
||||
| `llm_provider` | `string` | `null` | LLM provider (required for `llm` strategy) |
|
||||
| `llm_model` | `string` | `null` | Model name (required for `llm` strategy) |
|
||||
| `llm_api_key` | `string` | `null` | API key (required for `llm` strategy) |
|
||||
| `llm_prompt` | `string` | `null` | Custom extraction prompt |
|
||||
| `preserve_formatting` | `boolean` | `false` | Keep original number/date formatting |
|
||||
| `extract_metadata` | `boolean` | `false` | Include table metadata (id, class, etc.) |
|
||||
|
||||
### Example: Full Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"strategy": "llm",
|
||||
"llm_provider": "openai",
|
||||
"llm_model": "gpt-4",
|
||||
"llm_api_key": "sk-...",
|
||||
"llm_prompt": "Extract structured product data",
|
||||
"preserve_formatting": true,
|
||||
"extract_metadata": true
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Response Format
|
||||
|
||||
### Single Extraction Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"table_count": 2,
|
||||
"strategy": "default",
|
||||
"tables": [
|
||||
{
|
||||
"headers": ["Product", "Price", "Stock"],
|
||||
"rows": [
|
||||
["Widget A", "$19.99", "In Stock"],
|
||||
["Widget B", "$29.99", "Out of Stock"]
|
||||
],
|
||||
"metadata": {
|
||||
"id": "product-table",
|
||||
"class": "data-table",
|
||||
"row_count": 2,
|
||||
"column_count": 3
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Batch Extraction Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"summary": {
|
||||
"total_processed": 3,
|
||||
"successful": 2,
|
||||
"failed": 1,
|
||||
"total_tables_extracted": 5
|
||||
},
|
||||
"strategy": "default",
|
||||
"results": [
|
||||
{
|
||||
"success": true,
|
||||
"source": "html_0",
|
||||
"table_count": 2,
|
||||
"tables": [...]
|
||||
},
|
||||
{
|
||||
"success": true,
|
||||
"source": "https://example.com",
|
||||
"table_count": 3,
|
||||
"tables": [...]
|
||||
},
|
||||
{
|
||||
"success": false,
|
||||
"source": "html_2",
|
||||
"error": "Invalid HTML structure"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Integrated Crawl Response
|
||||
|
||||
Tables are included in the standard crawl result:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"results": [
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"success": true,
|
||||
"html": "...",
|
||||
"markdown": "...",
|
||||
"tables": [
|
||||
{
|
||||
"headers": [...],
|
||||
"rows": [...]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Errors
|
||||
|
||||
#### 400 Bad Request
|
||||
|
||||
```json
|
||||
{
|
||||
"detail": "Must provide either 'html' or 'url' for table extraction."
|
||||
}
|
||||
```
|
||||
|
||||
**Cause:** Invalid request parameters
|
||||
|
||||
**Solution:** Ensure you provide exactly one of `html` or `url`
|
||||
|
||||
#### 400 Bad Request (LLM)
|
||||
|
||||
```json
|
||||
{
|
||||
"detail": "Invalid table extraction config: LLM strategy requires llm_provider, llm_model, and llm_api_key"
|
||||
}
|
||||
```
|
||||
|
||||
**Cause:** Missing required LLM configuration
|
||||
|
||||
**Solution:** Provide all required LLM fields
|
||||
|
||||
#### 500 Internal Server Error
|
||||
|
||||
```json
|
||||
{
|
||||
"detail": "Failed to fetch and extract from URL: Connection timeout"
|
||||
}
|
||||
```
|
||||
|
||||
**Cause:** URL fetch failure or extraction error
|
||||
|
||||
**Solution:** Check URL accessibility and HTML validity
|
||||
|
||||
### Handling Partial Failures in Batch
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11235/tables/extract/batch", json={
|
||||
"url_list": urls,
|
||||
"config": {"strategy": "default"}
|
||||
})
|
||||
|
||||
data = response.json()
|
||||
|
||||
successful_results = [r for r in data['results'] if r['success']]
|
||||
failed_results = [r for r in data['results'] if not r['success']]
|
||||
|
||||
print(f"Successful: {len(successful_results)}")
|
||||
for result in failed_results:
|
||||
print(f"Failed: {result['source']} - {result['error']}")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. **Choose the Right Strategy**
|
||||
|
||||
- **Default**: Fast, reliable for most tables
|
||||
- **LLM**: Complex structures, semantic extraction
|
||||
- **Financial**: Numerical data with formatting
|
||||
|
||||
### 2. **Batch Processing**
|
||||
|
||||
- Use batch endpoints for multiple pages
|
||||
- Keep batch size under 50 items
|
||||
- Handle partial failures gracefully
|
||||
|
||||
### 3. **Performance Optimization**
|
||||
|
||||
- Use `default` strategy for high-volume extraction
|
||||
- Enable `preserve_formatting` only when needed
|
||||
- Limit `extract_metadata` to reduce payload size
|
||||
|
||||
### 4. **LLM Strategy Tips**
|
||||
|
||||
- Use specific prompts for better results
|
||||
- GPT-4 for complex tables, GPT-3.5 for simple ones
|
||||
- Cache results to reduce API costs
|
||||
|
||||
### 5. **Error Handling**
|
||||
|
||||
- Always check `success` field
|
||||
- Log errors for debugging
|
||||
- Implement retry logic for transient failures
|
||||
|
||||
---
|
||||
|
||||
## Examples by Use Case
|
||||
|
||||
### Financial Data Extraction
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11235/crawl", json={
|
||||
"urls": ["https://finance.site.com/stocks"],
|
||||
"table_extraction": {
|
||||
"strategy": "financial",
|
||||
"preserve_formatting": True,
|
||||
"extract_metadata": True
|
||||
}
|
||||
})
|
||||
|
||||
for result in response.json()["results"]:
|
||||
for table in result.get("tables", []):
|
||||
# Financial tables with preserved formatting
|
||||
print(table["rows"])
|
||||
```
|
||||
|
||||
### Product Catalog Scraping
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11235/tables/extract/batch", json={
|
||||
"url_list": [
|
||||
"https://shop.com/category/electronics",
|
||||
"https://shop.com/category/clothing",
|
||||
"https://shop.com/category/books",
|
||||
],
|
||||
"config": {"strategy": "default"}
|
||||
})
|
||||
|
||||
all_products = []
|
||||
for result in response.json()["results"]:
|
||||
if result["success"]:
|
||||
for table in result["tables"]:
|
||||
all_products.extend(table["rows"])
|
||||
|
||||
print(f"Total products: {len(all_products)}")
|
||||
```
|
||||
|
||||
### Complex Table with LLM
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11235/tables/extract", json={
|
||||
"url": "https://complex-data.com/report",
|
||||
"config": {
|
||||
"strategy": "llm",
|
||||
"llm_provider": "openai",
|
||||
"llm_model": "gpt-4",
|
||||
"llm_api_key": "sk-...",
|
||||
"llm_prompt": "Extract quarterly revenue breakdown by region and product category"
|
||||
}
|
||||
})
|
||||
|
||||
structured_data = response.json()["tables"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## API Reference Summary
|
||||
|
||||
| Endpoint | Method | Purpose |
|
||||
|----------|--------|---------|
|
||||
| `/crawl` | POST | Crawl with integrated table extraction |
|
||||
| `/crawl/stream` | POST | Stream crawl with table extraction |
|
||||
| `/tables/extract` | POST | Extract tables from HTML or URL |
|
||||
| `/tables/extract/batch` | POST | Batch extract from multiple sources |
|
||||
|
||||
For complete API documentation, visit: `/docs` (Swagger UI)
|
||||
|
||||
---
|
||||
|
||||
## Support
|
||||
|
||||
For issues, feature requests, or questions:
|
||||
- GitHub: https://github.com/unclecode/crawl4ai
|
||||
- Documentation: https://crawl4ai.com/docs
|
||||
- Discord: https://discord.gg/crawl4ai
|
||||
@@ -82,42 +82,6 @@ If you installed Crawl4AI (which installs Playwright under the hood), you alread
|
||||
|
||||
---
|
||||
|
||||
### Creating a Profile Using the Crawl4AI CLI (Easiest)
|
||||
|
||||
If you prefer a guided, interactive setup, use the built-in CLI to create and manage persistent browser profiles.
|
||||
|
||||
1.⠀Launch the profile manager:
|
||||
```bash
|
||||
crwl profiles
|
||||
```
|
||||
|
||||
2.⠀Choose "Create new profile" and enter a profile name. A Chromium window opens so you can log in to sites and configure settings. When finished, return to the terminal and press `q` to save the profile.
|
||||
|
||||
3.⠀Profiles are saved under `~/.crawl4ai/profiles/<profile_name>` (for example: `/home/<you>/.crawl4ai/profiles/test_profile_1`) along with a `storage_state.json` for cookies and session data.
|
||||
|
||||
4.⠀Optionally, choose "List profiles" in the CLI to view available profiles and their paths.
|
||||
|
||||
5.⠀Use the saved path with `BrowserConfig.user_data_dir`:
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
|
||||
profile_path = "/home/<you>/.crawl4ai/profiles/test_profile_1"
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
use_managed_browser=True,
|
||||
user_data_dir=profile_path,
|
||||
browser_type="chromium",
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com/private")
|
||||
```
|
||||
|
||||
The CLI also supports listing and deleting profiles, and even testing a crawl directly from the menu.
|
||||
|
||||
---
|
||||
|
||||
## 3. Using Managed Browsers in Crawl4AI
|
||||
|
||||
Once you have a data directory with your session data, pass it to **`BrowserConfig`**:
|
||||
|
||||
@@ -1,304 +1,98 @@
|
||||
# Proxy & Security
|
||||
|
||||
This guide covers proxy configuration and security features in Crawl4AI, including SSL certificate analysis and proxy rotation strategies.
|
||||
|
||||
## Understanding Proxy Configuration
|
||||
|
||||
Crawl4AI recommends configuring proxies per request through `CrawlerRunConfig.proxy_config`. This gives you precise control, enables rotation strategies, and keeps examples simple enough to copy, paste, and run.
|
||||
# Proxy
|
||||
|
||||
## Basic Proxy Setup
|
||||
|
||||
Configure proxies that apply to each crawl operation:
|
||||
Simple proxy configuration with `BrowserConfig`:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, ProxyConfig
|
||||
from crawl4ai.async_configs import BrowserConfig
|
||||
|
||||
run_config = CrawlerRunConfig(proxy_config=ProxyConfig(server="http://proxy.example.com:8080"))
|
||||
# run_config = CrawlerRunConfig(proxy_config={"server": "http://proxy.example.com:8080"})
|
||||
# run_config = CrawlerRunConfig(proxy_config="http://proxy.example.com:8080")
|
||||
# Using HTTP proxy
|
||||
browser_config = BrowserConfig(proxy_config={"server": "http://proxy.example.com:8080"})
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com", config=run_config)
|
||||
print(f"Success: {result.success} -> {result.url}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
# Using SOCKS proxy
|
||||
browser_config = BrowserConfig(proxy_config={"server": "socks5://proxy.example.com:1080"})
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
```
|
||||
|
||||
!!! note "Why request-level?"
|
||||
`CrawlerRunConfig.proxy_config` keeps each request self-contained, so swapping proxies or rotation strategies is just a matter of building a new run configuration.
|
||||
## Authenticated Proxy
|
||||
|
||||
## Supported Proxy Formats
|
||||
|
||||
The `ProxyConfig.from_string()` method supports multiple formats:
|
||||
Use an authenticated proxy with `BrowserConfig`:
|
||||
|
||||
```python
|
||||
from crawl4ai import ProxyConfig
|
||||
from crawl4ai.async_configs import BrowserConfig
|
||||
|
||||
# HTTP proxy with authentication
|
||||
proxy1 = ProxyConfig.from_string("http://user:pass@192.168.1.1:8080")
|
||||
|
||||
# HTTPS proxy
|
||||
proxy2 = ProxyConfig.from_string("https://proxy.example.com:8080")
|
||||
|
||||
# SOCKS5 proxy
|
||||
proxy3 = ProxyConfig.from_string("socks5://proxy.example.com:1080")
|
||||
|
||||
# Simple IP:port format
|
||||
proxy4 = ProxyConfig.from_string("192.168.1.1:8080")
|
||||
|
||||
# IP:port:user:pass format
|
||||
proxy5 = ProxyConfig.from_string("192.168.1.1:8080:user:pass")
|
||||
browser_config = BrowserConfig(proxy_config={
|
||||
"server": "http://[host]:[port]",
|
||||
"username": "[username]",
|
||||
"password": "[password]",
|
||||
})
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
```
|
||||
|
||||
## Authenticated Proxies
|
||||
|
||||
For proxies requiring authentication:
|
||||
## Rotating Proxies
|
||||
|
||||
Example using a proxy rotation service dynamically:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler,BrowserConfig, CrawlerRunConfig, ProxyConfig
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
proxy_config=ProxyConfig(
|
||||
server="http://proxy.example.com:8080",
|
||||
username="your_username",
|
||||
password="your_password",
|
||||
)
|
||||
)
|
||||
# Or dictionary style:
|
||||
# run_config = CrawlerRunConfig(proxy_config={
|
||||
# "server": "http://proxy.example.com:8080",
|
||||
# "username": "your_username",
|
||||
# "password": "your_password",
|
||||
# })
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com", config=run_config)
|
||||
print(f"Success: {result.success} -> {result.url}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## Environment Variable Configuration
|
||||
|
||||
Load proxies from environment variables for easy configuration:
|
||||
|
||||
```python
|
||||
import os
|
||||
from crawl4ai import ProxyConfig, CrawlerRunConfig
|
||||
|
||||
# Set environment variable
|
||||
os.environ["PROXIES"] = "ip1:port1:user1:pass1,ip2:port2:user2:pass2,ip3:port3"
|
||||
|
||||
# Load all proxies
|
||||
proxies = ProxyConfig.from_env()
|
||||
print(f"Loaded {len(proxies)} proxies")
|
||||
|
||||
# Use first proxy
|
||||
if proxies:
|
||||
run_config = CrawlerRunConfig(proxy_config=proxies[0])
|
||||
```
|
||||
|
||||
## Rotating Proxies
|
||||
|
||||
Crawl4AI supports automatic proxy rotation to distribute requests across multiple proxy servers. Rotation is applied per request using a rotation strategy on `CrawlerRunConfig`.
|
||||
|
||||
### Proxy Rotation (recommended)
|
||||
```python
|
||||
import asyncio
|
||||
import re
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, ProxyConfig
|
||||
from crawl4ai.proxy_strategy import RoundRobinProxyStrategy
|
||||
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
RoundRobinProxyStrategy,
|
||||
)
|
||||
import asyncio
|
||||
from crawl4ai import ProxyConfig
|
||||
async def main():
|
||||
# Load proxies from environment
|
||||
# Load proxies and create rotation strategy
|
||||
proxies = ProxyConfig.from_env()
|
||||
#eg: export PROXIES="ip1:port1:username1:password1,ip2:port2:username2:password2"
|
||||
if not proxies:
|
||||
print("No proxies found! Set PROXIES environment variable.")
|
||||
print("No proxies found in environment. Set PROXIES env variable!")
|
||||
return
|
||||
|
||||
# Create rotation strategy
|
||||
proxy_strategy = RoundRobinProxyStrategy(proxies)
|
||||
|
||||
# Configure per-request with proxy rotation
|
||||
# Create configs
|
||||
browser_config = BrowserConfig(headless=True, verbose=False)
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
proxy_rotation_strategy=proxy_strategy,
|
||||
proxy_rotation_strategy=proxy_strategy
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
urls = ["https://httpbin.org/ip"] * (len(proxies) * 2) # Test each proxy twice
|
||||
|
||||
print(f"🚀 Testing {len(proxies)} proxies with rotation...")
|
||||
results = await crawler.arun_many(urls=urls, config=run_config)
|
||||
print("\n📈 Initializing crawler with proxy rotation...")
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
print("\n🚀 Starting batch crawl with proxy rotation...")
|
||||
results = await crawler.arun_many(
|
||||
urls=urls,
|
||||
config=run_config
|
||||
)
|
||||
for result in results:
|
||||
if result.success:
|
||||
ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html)
|
||||
current_proxy = run_config.proxy_config if run_config.proxy_config else None
|
||||
|
||||
for i, result in enumerate(results):
|
||||
if result.success:
|
||||
# Extract IP from response
|
||||
ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html)
|
||||
if ip_match:
|
||||
detected_ip = ip_match.group(0)
|
||||
proxy_index = i % len(proxies)
|
||||
expected_ip = proxies[proxy_index].ip
|
||||
if current_proxy and ip_match:
|
||||
print(f"URL {result.url}")
|
||||
print(f"Proxy {current_proxy.server} -> Response IP: {ip_match.group(0)}")
|
||||
verified = ip_match.group(0) == current_proxy.ip
|
||||
if verified:
|
||||
print(f"✅ Proxy working! IP matches: {current_proxy.ip}")
|
||||
else:
|
||||
print("❌ Proxy failed or IP mismatch!")
|
||||
print("---")
|
||||
|
||||
print(f"✅ Request {i+1}: Proxy {proxy_index+1} -> IP {detected_ip}")
|
||||
if detected_ip == expected_ip:
|
||||
print(" 🎯 IP matches proxy configuration")
|
||||
else:
|
||||
print(f" ⚠️ IP mismatch (expected {expected_ip})")
|
||||
else:
|
||||
print(f"❌ Request {i+1}: Could not extract IP from response")
|
||||
else:
|
||||
print(f"❌ Request {i+1}: Failed - {result.error_message}")
|
||||
asyncio.run(main())
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## SSL Certificate Analysis
|
||||
|
||||
Combine proxy usage with SSL certificate inspection for enhanced security analysis. SSL certificate fetching is configured per request via `CrawlerRunConfig`.
|
||||
|
||||
### Per-Request SSL Certificate Analysis
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
proxy_config={
|
||||
"server": "http://proxy.example.com:8080",
|
||||
"username": "user",
|
||||
"password": "pass",
|
||||
},
|
||||
fetch_ssl_certificate=True, # Enable SSL certificate analysis for this request
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig()
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com", config=run_config)
|
||||
|
||||
if result.success:
|
||||
print(f"✅ Crawled via proxy: {result.url}")
|
||||
|
||||
# Analyze SSL certificate
|
||||
if result.ssl_certificate:
|
||||
cert = result.ssl_certificate
|
||||
print("🔒 SSL Certificate Info:")
|
||||
print(f" Issuer: {cert.issuer}")
|
||||
print(f" Subject: {cert.subject}")
|
||||
print(f" Valid until: {cert.valid_until}")
|
||||
print(f" Fingerprint: {cert.fingerprint}")
|
||||
|
||||
# Export certificate
|
||||
cert.to_json("certificate.json")
|
||||
print("💾 Certificate exported to certificate.json")
|
||||
else:
|
||||
print("⚠️ No SSL certificate information available")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### 1. Proxy Rotation for Anonymity
|
||||
```python
|
||||
from crawl4ai import CrawlerRunConfig, ProxyConfig
|
||||
from crawl4ai.proxy_strategy import RoundRobinProxyStrategy
|
||||
|
||||
# Use multiple proxies to avoid IP blocking
|
||||
proxies = ProxyConfig.from_env("PROXIES")
|
||||
strategy = RoundRobinProxyStrategy(proxies)
|
||||
|
||||
# Configure rotation per request (recommended)
|
||||
run_config = CrawlerRunConfig(proxy_rotation_strategy=strategy)
|
||||
|
||||
# For a fixed proxy across all requests, just reuse the same run_config instance
|
||||
static_run_config = run_config
|
||||
```
|
||||
|
||||
### 2. SSL Certificate Verification
|
||||
```python
|
||||
from crawl4ai import CrawlerRunConfig
|
||||
|
||||
# Always verify SSL certificates when possible
|
||||
# Per-request (affects specific requests)
|
||||
run_config = CrawlerRunConfig(fetch_ssl_certificate=True)
|
||||
```
|
||||
|
||||
### 3. Environment Variable Security
|
||||
```bash
|
||||
# Use environment variables for sensitive proxy credentials
|
||||
# Avoid hardcoding usernames/passwords in code
|
||||
export PROXIES="ip1:port1:user1:pass1,ip2:port2:user2:pass2"
|
||||
```
|
||||
|
||||
### 4. SOCKS5 for Enhanced Security
|
||||
```python
|
||||
from crawl4ai import CrawlerRunConfig
|
||||
|
||||
# Prefer SOCKS5 proxies for better protocol support
|
||||
run_config = CrawlerRunConfig(proxy_config="socks5://proxy.example.com:1080")
|
||||
```
|
||||
|
||||
## Migration from Deprecated `proxy` Parameter
|
||||
|
||||
- "Deprecation Notice"
|
||||
The legacy `proxy` argument on `BrowserConfig` is deprecated. Configure proxies through `CrawlerRunConfig.proxy_config` so each request fully describes its network settings.
|
||||
|
||||
```python
|
||||
# Old (deprecated) approach
|
||||
# from crawl4ai import BrowserConfig
|
||||
# browser_config = BrowserConfig(proxy="http://proxy.example.com:8080")
|
||||
|
||||
# New (preferred) approach
|
||||
from crawl4ai import CrawlerRunConfig
|
||||
run_config = CrawlerRunConfig(proxy_config="http://proxy.example.com:8080")
|
||||
```
|
||||
|
||||
### Safe Logging of Proxies
|
||||
```python
|
||||
from crawl4ai import ProxyConfig
|
||||
|
||||
def safe_proxy_repr(proxy: ProxyConfig):
|
||||
if getattr(proxy, "username", None):
|
||||
return f"{proxy.server} (auth: ****)"
|
||||
return proxy.server
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
- "Proxy connection failed"
|
||||
- Verify the proxy server is reachable from your network.
|
||||
- Double-check authentication credentials.
|
||||
- Ensure the protocol matches (`http`, `https`, or `socks5`).
|
||||
|
||||
- "SSL certificate errors"
|
||||
- Some proxies break SSL inspection; switch proxies if you see repeated failures.
|
||||
- Consider temporarily disabling certificate fetching to isolate the issue.
|
||||
|
||||
- "Environment variables not loading"
|
||||
- Confirm `PROXIES` (or your custom env var) is set before running the script.
|
||||
- Check formatting: `ip:port:user:pass,ip:port:user:pass`.
|
||||
|
||||
- "Proxy rotation not working"
|
||||
- Ensure `ProxyConfig.from_env()` actually loaded entries (`len(proxies) > 0`).
|
||||
- Attach `proxy_rotation_strategy` to `CrawlerRunConfig`.
|
||||
- Validate the proxy definitions you pass into the strategy.
|
||||
|
||||
1943
docs/md_v2/api/docker-server.md
Normal file
1943
docs/md_v2/api/docker-server.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -21,35 +21,21 @@ browser_cfg = BrowserConfig(
|
||||
|-----------------------|----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **`browser_type`** | `"chromium"`, `"firefox"`, `"webkit"`<br/>*(default: `"chromium"`)* | Which browser engine to use. `"chromium"` is typical for many sites, `"firefox"` or `"webkit"` for specialized tests. |
|
||||
| **`headless`** | `bool` (default: `True`) | Headless means no visible UI. `False` is handy for debugging. |
|
||||
| **`browser_mode`** | `str` (default: `"dedicated"`) | How browser is initialized: `"dedicated"` (new instance), `"builtin"` (CDP background), `"custom"` (explicit CDP), `"docker"` (container). |
|
||||
| **`use_managed_browser`** | `bool` (default: `False`) | Launch browser via CDP for advanced control. Set automatically based on `browser_mode`. |
|
||||
| **`cdp_url`** | `str` (default: `None`) | Chrome DevTools Protocol endpoint URL (e.g., `"ws://localhost:9222/devtools/browser/"`). Set automatically based on `browser_mode`. |
|
||||
| **`debugging_port`** | `int` (default: `9222`) | Port for browser debugging protocol. |
|
||||
| **`host`** | `str` (default: `"localhost"`) | Host for browser connection. |
|
||||
| **`viewport_width`** | `int` (default: `1080`) | Initial page width (in px). Useful for testing responsive layouts. |
|
||||
| **`viewport_height`** | `int` (default: `600`) | Initial page height (in px). |
|
||||
| **`viewport`** | `dict` (default: `None`) | Viewport dimensions dict. If set, overrides `viewport_width` and `viewport_height`. |
|
||||
| **`proxy`** | `str` (deprecated) | Deprecated. Use `proxy_config` instead. If set, it will be auto-converted internally. |
|
||||
| **`proxy_config`** | `ProxyConfig or dict` (default: `None`)| For advanced or multi-proxy needs, specify `ProxyConfig` object or dict like `{"server": "...", "username": "...", "password": "..."}`. |
|
||||
| **`proxy_config`** | `dict` (default: `None`) | For advanced or multi-proxy needs, specify details like `{"server": "...", "username": "...", ...}`. |
|
||||
| **`use_persistent_context`** | `bool` (default: `False`) | If `True`, uses a **persistent** browser context (keep cookies, sessions across runs). Also sets `use_managed_browser=True`. |
|
||||
| **`user_data_dir`** | `str or None` (default: `None`) | Directory to store user data (profiles, cookies). Must be set if you want permanent sessions. |
|
||||
| **`chrome_channel`** | `str` (default: `"chromium"`) | Chrome channel to launch (e.g., "chrome", "msedge"). Only for `browser_type="chromium"`. Auto-set to empty for Firefox/WebKit. |
|
||||
| **`channel`** | `str` (default: `"chromium"`) | Alias for `chrome_channel`. |
|
||||
| **`accept_downloads`** | `bool` (default: `False`) | Whether to allow file downloads. Requires `downloads_path` if `True`. |
|
||||
| **`downloads_path`** | `str or None` (default: `None`) | Directory to store downloaded files. |
|
||||
| **`storage_state`** | `str or dict or None` (default: `None`)| In-memory storage state (cookies, localStorage) to restore browser state. |
|
||||
| **`ignore_https_errors`** | `bool` (default: `True`) | If `True`, continues despite invalid certificates (common in dev/staging). |
|
||||
| **`java_script_enabled`** | `bool` (default: `True`) | Disable if you want no JS overhead, or if only static content is needed. |
|
||||
| **`sleep_on_close`** | `bool` (default: `False`) | Add a small delay when closing browser (can help with cleanup issues). |
|
||||
| **`cookies`** | `list` (default: `[]`) | Pre-set cookies, each a dict like `{"name": "session", "value": "...", "url": "..."}`. |
|
||||
| **`headers`** | `dict` (default: `{}`) | Extra HTTP headers for every request, e.g. `{"Accept-Language": "en-US"}`. |
|
||||
| **`user_agent`** | `str` (default: Chrome-based UA) | Your custom user agent string. |
|
||||
| **`user_agent_mode`** | `str` (default: `""`) | Set to `"random"` to randomize user agent from a pool (helps with bot detection). |
|
||||
| **`user_agent_generator_config`** | `dict` (default: `{}`) | Configuration dict for user agent generation when `user_agent_mode="random"`. |
|
||||
| **`text_mode`** | `bool` (default: `False`) | If `True`, tries to disable images/other heavy content for speed. |
|
||||
| **`user_agent`** | `str` (default: Chrome-based UA) | Your custom or random user agent. `user_agent_mode="random"` can shuffle it. |
|
||||
| **`light_mode`** | `bool` (default: `False`) | Disables some background features for performance gains. |
|
||||
| **`text_mode`** | `bool` (default: `False`) | If `True`, tries to disable images/other heavy content for speed. |
|
||||
| **`use_managed_browser`** | `bool` (default: `False`) | For advanced “managed” interactions (debugging, CDP usage). Typically set automatically if persistent context is on. |
|
||||
| **`extra_args`** | `list` (default: `[]`) | Additional flags for the underlying browser process, e.g. `["--disable-extensions"]`. |
|
||||
| **`enable_stealth`** | `bool` (default: `False`) | Enable playwright-stealth mode to bypass bot detection. Cannot be used with `browser_mode="builtin"`. |
|
||||
|
||||
**Tips**:
|
||||
- Set `headless=False` to visually **debug** how pages load or how interactions proceed.
|
||||
@@ -84,7 +70,6 @@ We group them by category.
|
||||
|------------------------------|--------------------------------------|-------------------------------------------------------------------------------------------------|
|
||||
| **`word_count_threshold`** | `int` (default: ~200) | Skips text blocks below X words. Helps ignore trivial sections. |
|
||||
| **`extraction_strategy`** | `ExtractionStrategy` (default: None) | If set, extracts structured data (CSS-based, LLM-based, etc.). |
|
||||
| **`chunking_strategy`** | `ChunkingStrategy` (default: RegexChunking()) | Strategy to chunk content before extraction. Can be customized for different chunking approaches. |
|
||||
| **`markdown_generator`** | `MarkdownGenerationStrategy` (None) | If you want specialized markdown output (citations, filtering, chunking, etc.). Can be customized with options such as `content_source` parameter to select the HTML input source ('cleaned_html', 'raw_html', or 'fit_html'). |
|
||||
| **`css_selector`** | `str` (None) | Retains only the part of the page matching this selector. Affects the entire extraction process. |
|
||||
| **`target_elements`** | `List[str]` (None) | List of CSS selectors for elements to focus on for markdown generation and data extraction, while still processing the entire page for links, media, etc. Provides more flexibility than `css_selector`. |
|
||||
@@ -93,50 +78,32 @@ We group them by category.
|
||||
| **`only_text`** | `bool` (False) | If `True`, tries to extract text-only content. |
|
||||
| **`prettiify`** | `bool` (False) | If `True`, beautifies final HTML (slower, purely cosmetic). |
|
||||
| **`keep_data_attributes`** | `bool` (False) | If `True`, preserve `data-*` attributes in cleaned HTML. |
|
||||
| **`keep_attrs`** | `list` (default: []) | List of HTML attributes to keep during processing (e.g., `["id", "class", "data-value"]`). |
|
||||
| **`remove_forms`** | `bool` (False) | If `True`, remove all `<form>` elements. |
|
||||
| **`parser_type`** | `str` (default: "lxml") | HTML parser to use (e.g., "lxml", "html.parser"). |
|
||||
| **`scraping_strategy`** | `ContentScrapingStrategy` (default: LXMLWebScrapingStrategy()) | Strategy to use for content scraping. Can be customized for different scraping needs (e.g., PDF extraction). |
|
||||
|
||||
---
|
||||
|
||||
### B) **Browser Location and Identity**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|------------------------|---------------------------|--------------------------------------------------------------------------------------------------------|
|
||||
| **`locale`** | `str or None` (None) | Browser's locale (e.g., "en-US", "fr-FR") for language preferences. |
|
||||
| **`timezone_id`** | `str or None` (None) | Browser's timezone (e.g., "America/New_York", "Europe/Paris"). |
|
||||
| **`geolocation`** | `GeolocationConfig or None` (None) | GPS coordinates configuration. Use `GeolocationConfig(latitude=..., longitude=..., accuracy=...)`. |
|
||||
| **`fetch_ssl_certificate`** | `bool` (False) | If `True`, fetches and includes SSL certificate information in the result. |
|
||||
| **`proxy_config`** | `ProxyConfig or dict or None` (None) | Proxy configuration for this specific crawl. Can override browser-level proxy settings. |
|
||||
| **`proxy_rotation_strategy`** | `ProxyRotationStrategy` (None) | Strategy for rotating proxies during crawl operations. |
|
||||
|
||||
---
|
||||
|
||||
### C) **Caching & Session**
|
||||
### B) **Caching & Session**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|-------------------------|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **`cache_mode`** | `CacheMode or None` | Controls how caching is handled (`ENABLED`, `BYPASS`, `DISABLED`, etc.). If `None`, typically defaults to `ENABLED`. |
|
||||
| **`session_id`** | `str or None` | Assign a unique ID to reuse a single browser session across multiple `arun()` calls. |
|
||||
| **`bypass_cache`** | `bool` (False) | **Deprecated.** If `True`, acts like `CacheMode.BYPASS`. Use `cache_mode` instead. |
|
||||
| **`disable_cache`** | `bool` (False) | **Deprecated.** If `True`, acts like `CacheMode.DISABLED`. Use `cache_mode` instead. |
|
||||
| **`no_cache_read`** | `bool` (False) | **Deprecated.** If `True`, acts like `CacheMode.WRITE_ONLY` (writes cache but never reads). Use `cache_mode` instead. |
|
||||
| **`no_cache_write`** | `bool` (False) | **Deprecated.** If `True`, acts like `CacheMode.READ_ONLY` (reads cache but never writes). Use `cache_mode` instead. |
|
||||
| **`shared_data`** | `dict or None` (None) | Shared data to be passed between hooks and accessible across crawl operations. |
|
||||
| **`bypass_cache`** | `bool` (False) | If `True`, acts like `CacheMode.BYPASS`. |
|
||||
| **`disable_cache`** | `bool` (False) | If `True`, acts like `CacheMode.DISABLED`. |
|
||||
| **`no_cache_read`** | `bool` (False) | If `True`, acts like `CacheMode.WRITE_ONLY` (writes cache but never reads). |
|
||||
| **`no_cache_write`** | `bool` (False) | If `True`, acts like `CacheMode.READ_ONLY` (reads cache but never writes). |
|
||||
|
||||
Use these for controlling whether you read or write from a local content cache. Handy for large batch crawls or repeated site visits.
|
||||
|
||||
---
|
||||
|
||||
### D) **Page Navigation & Timing**
|
||||
### C) **Page Navigation & Timing**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|----------------------------|-------------------------|----------------------------------------------------------------------------------------------------------------------|
|
||||
| **`wait_until`** | `str` (domcontentloaded)| Condition for navigation to "complete". Often `"networkidle"` or `"domcontentloaded"`. |
|
||||
| **`wait_until`** | `str` (domcontentloaded)| Condition for navigation to “complete”. Often `"networkidle"` or `"domcontentloaded"`. |
|
||||
| **`page_timeout`** | `int` (60000 ms) | Timeout for page navigation or JS steps. Increase for slow sites. |
|
||||
| **`wait_for`** | `str or None` | Wait for a CSS (`"css:selector"`) or JS (`"js:() => bool"`) condition before content extraction. |
|
||||
| **`wait_for_timeout`** | `int or None` (None) | Specific timeout in ms for the `wait_for` condition. If None, uses `page_timeout`. |
|
||||
| **`wait_for_images`** | `bool` (False) | Wait for images to load before finishing. Slows down if you only want text. |
|
||||
| **`delay_before_return_html`** | `float` (0.1) | Additional pause (seconds) before final HTML is captured. Good for last-second updates. |
|
||||
| **`check_robots_txt`** | `bool` (False) | Whether to check and respect robots.txt rules before crawling. If True, caches robots.txt for efficiency. |
|
||||
@@ -145,17 +112,15 @@ Use these for controlling whether you read or write from a local content cache.
|
||||
|
||||
---
|
||||
|
||||
### E) **Page Interaction**
|
||||
### D) **Page Interaction**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|----------------------------|--------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **`js_code`** | `str or list[str]` (None) | JavaScript to run after load. E.g. `"document.querySelector('button')?.click();"`. |
|
||||
| **`c4a_script`** | `str or list[str]` (None) | C4A script that compiles to JavaScript. Alternative to writing raw JS. |
|
||||
| **`js_only`** | `bool` (False) | If `True`, indicates we're reusing an existing session and only applying JS. No full reload. |
|
||||
| **`js_only`** | `bool` (False) | If `True`, indicates we’re reusing an existing session and only applying JS. No full reload. |
|
||||
| **`ignore_body_visibility`** | `bool` (True) | Skip checking if `<body>` is visible. Usually best to keep `True`. |
|
||||
| **`scan_full_page`** | `bool` (False) | If `True`, auto-scroll the page to load dynamic content (infinite scroll). |
|
||||
| **`scroll_delay`** | `float` (0.2) | Delay between scroll steps if `scan_full_page=True`. |
|
||||
| **`max_scroll_steps`** | `int or None` (None) | Maximum number of scroll steps during full page scan. If None, scrolls until entire page is loaded. |
|
||||
| **`process_iframes`** | `bool` (False) | Inlines iframe content for single-page extraction. |
|
||||
| **`remove_overlay_elements`** | `bool` (False) | Removes potential modals/popups blocking the main content. |
|
||||
| **`simulate_user`** | `bool` (False) | Simulate user interactions (mouse movements) to avoid bot detection. |
|
||||
@@ -167,7 +132,7 @@ If your page is a single-page app with repeated JS updates, set `js_only=True` i
|
||||
|
||||
---
|
||||
|
||||
### F) **Media Handling**
|
||||
### E) **Media Handling**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|--------------------------------------------|---------------------|-----------------------------------------------------------------------------------------------------------|
|
||||
@@ -176,16 +141,13 @@ If your page is a single-page app with repeated JS updates, set `js_only=True` i
|
||||
| **`screenshot_height_threshold`** | `int` (~20000) | If the page is taller than this, alternate screenshot strategies are used. |
|
||||
| **`pdf`** | `bool` (False) | If `True`, returns a PDF in `result.pdf`. |
|
||||
| **`capture_mhtml`** | `bool` (False) | If `True`, captures an MHTML snapshot of the page in `result.mhtml`. MHTML includes all page resources (CSS, images, etc.) in a single file. |
|
||||
| **`image_description_min_word_threshold`** | `int` (~50) | Minimum words for an image's alt text or description to be considered valid. |
|
||||
| **`image_description_min_word_threshold`** | `int` (~50) | Minimum words for an image’s alt text or description to be considered valid. |
|
||||
| **`image_score_threshold`** | `int` (~3) | Filter out low-scoring images. The crawler scores images by relevance (size, context, etc.). |
|
||||
| **`exclude_external_images`** | `bool` (False) | Exclude images from other domains. |
|
||||
| **`exclude_all_images`** | `bool` (False) | If `True`, excludes all images from processing (both internal and external). |
|
||||
| **`table_score_threshold`** | `int` (7) | Minimum score threshold for processing a table. Lower values include more tables. |
|
||||
| **`table_extraction`** | `TableExtractionStrategy` (DefaultTableExtraction) | Strategy for table extraction. Defaults to DefaultTableExtraction with configured threshold. |
|
||||
|
||||
---
|
||||
|
||||
### G) **Link/Domain Handling**
|
||||
### F) **Link/Domain Handling**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|------------------------------|-------------------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
@@ -193,39 +155,23 @@ If your page is a single-page app with repeated JS updates, set `js_only=True` i
|
||||
| **`exclude_external_links`** | `bool` (False) | Removes all links pointing outside the current domain. |
|
||||
| **`exclude_social_media_links`** | `bool` (False) | Strips links specifically to social sites (like Facebook or Twitter). |
|
||||
| **`exclude_domains`** | `list` ([]) | Provide a custom list of domains to exclude (like `["ads.com", "trackers.io"]`). |
|
||||
| **`exclude_internal_links`** | `bool` (False) | If `True`, excludes internal links from the results. |
|
||||
| **`score_links`** | `bool` (False) | If `True`, calculates intrinsic quality scores for all links using URL structure, text quality, and contextual metrics. |
|
||||
| **`preserve_https_for_internal_links`** | `bool` (False) | If `True`, preserves HTTPS scheme for internal links even when the server redirects to HTTP. Useful for security-conscious crawling. |
|
||||
|
||||
Use these for link-level content filtering (often to keep crawls “internal” or to remove spammy domains).
|
||||
|
||||
---
|
||||
|
||||
### H) **Debug, Logging & Network Monitoring**
|
||||
### G) **Debug & Logging**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|----------------|--------------------|---------------------------------------------------------------------------|
|
||||
| **`verbose`** | `bool` (True) | Prints logs detailing each step of crawling, interactions, or errors. |
|
||||
| **`log_console`** | `bool` (False) | Logs the page's JavaScript console output if you want deeper JS debugging.|
|
||||
| **`capture_network_requests`** | `bool` (False) | If `True`, captures network requests made by the page in `result.captured_requests`. |
|
||||
| **`capture_console_messages`** | `bool` (False) | If `True`, captures console messages from the page in `result.console_messages`. |
|
||||
| **`log_console`** | `bool` (False) | Logs the page’s JavaScript console output if you want deeper JS debugging.|
|
||||
|
||||
---
|
||||
|
||||
### I) **Connection & HTTP Parameters**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|-----------------------------|-------------------------|----------------------------------------------------------------------------------------------------------------------|
|
||||
| **`method`** | `str` ("GET") | HTTP method to use when using AsyncHTTPCrawlerStrategy (e.g., "GET", "POST"). |
|
||||
| **`stream`** | `bool` (False) | If `True`, enables streaming mode for `arun_many()` to process URLs as they complete rather than waiting for all. |
|
||||
| **`url`** | `str or None` (None) | URL for this specific config. Not typically set directly but used internally for URL-specific configurations. |
|
||||
| **`user_agent`** | `str or None` (None) | Custom User-Agent string for this crawl. Can override browser-level user agent. |
|
||||
| **`user_agent_mode`** | `str or None` (None) | Set to `"random"` to randomize user agent. Can override browser-level setting. |
|
||||
| **`user_agent_generator_config`** | `dict` ({}) | Configuration for user agent generation when `user_agent_mode="random"`. |
|
||||
|
||||
---
|
||||
|
||||
### J) **Virtual Scroll Configuration**
|
||||
### H) **Virtual Scroll Configuration**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|------------------------------|------------------------------|-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
@@ -265,7 +211,7 @@ See [Virtual Scroll documentation](../../advanced/virtual-scroll.md) for detaile
|
||||
|
||||
---
|
||||
|
||||
### K) **URL Matching Configuration**
|
||||
### I) **URL Matching Configuration**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|------------------------|------------------------------|-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
@@ -328,25 +274,7 @@ default_config = CrawlerRunConfig() # No url_matcher = matches everything
|
||||
- If no config matches a URL and there's no default config (one without `url_matcher`), the URL will fail with "No matching configuration found"
|
||||
- Always include a default config as the last item if you want to handle all URLs
|
||||
|
||||
---
|
||||
|
||||
### L) **Advanced Crawling Features**
|
||||
|
||||
| **Parameter** | **Type / Default** | **What It Does** |
|
||||
|-----------------------------|------------------------------|-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **`deep_crawl_strategy`** | `DeepCrawlStrategy or None` (None) | Strategy for deep/recursive crawling. Enables automatic link following and multi-level site crawling. |
|
||||
| **`link_preview_config`** | `LinkPreviewConfig or dict or None` (None) | Configuration for link head extraction and scoring. Fetches and scores link metadata without full page loads. |
|
||||
| **`experimental`** | `dict or None` (None) | Dictionary for experimental/beta features not yet integrated into main parameters. Use with caution. |
|
||||
|
||||
**Deep Crawl Strategy** enables automatic site exploration by following links according to defined rules. Useful for sitemap generation or comprehensive site archiving.
|
||||
|
||||
**Link Preview Config** allows efficient link discovery and scoring by fetching only the `<head>` section of linked pages, enabling smart crawl prioritization without the overhead of full page loads.
|
||||
|
||||
**Experimental** parameters are features in beta testing. They may change or be removed in future versions. Check documentation for currently available experimental features.
|
||||
|
||||
---
|
||||
|
||||
## 2.2 Helper Methods
|
||||
---## 2.2 Helper Methods
|
||||
|
||||
Both `BrowserConfig` and `CrawlerRunConfig` provide a `clone()` method to create modified copies:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
2. **Install Dependencies**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip install flask
|
||||
```
|
||||
|
||||
3. **Launch the Server**
|
||||
@@ -28,7 +28,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
4. **Open in Browser**
|
||||
```
|
||||
http://localhost:8000
|
||||
http://localhost:8080
|
||||
```
|
||||
|
||||
**🌐 Try Online**: [Live Demo](https://docs.crawl4ai.com/c4a-script/demo)
|
||||
@@ -325,7 +325,7 @@ Powers the recording functionality:
|
||||
### Configuration
|
||||
```python
|
||||
# server.py configuration
|
||||
PORT = 8000
|
||||
PORT = 8080
|
||||
DEBUG = True
|
||||
THREADED = True
|
||||
```
|
||||
@@ -343,9 +343,9 @@ THREADED = True
|
||||
**Port Already in Use**
|
||||
```bash
|
||||
# Kill existing process
|
||||
lsof -ti:8000 | xargs kill -9
|
||||
lsof -ti:8080 | xargs kill -9
|
||||
# Or use different port
|
||||
python server.py --port 8001
|
||||
python server.py --port 8081
|
||||
```
|
||||
|
||||
**Blockly Not Loading**
|
||||
|
||||
@@ -216,7 +216,7 @@ def get_examples():
|
||||
'name': 'Handle Cookie Banner',
|
||||
'description': 'Accept cookies and close newsletter popup',
|
||||
'script': '''# Handle cookie banner and newsletter
|
||||
GO http://127.0.0.1:8000/playground/
|
||||
GO http://127.0.0.1:8080/playground/
|
||||
WAIT `body` 2
|
||||
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
|
||||
IF (EXISTS `.newsletter-popup`) THEN CLICK `.close`'''
|
||||
@@ -283,7 +283,7 @@ WAIT `.success-message` 5'''
|
||||
return jsonify(examples)
|
||||
|
||||
if __name__ == '__main__':
|
||||
port = int(os.environ.get('PORT', 8000))
|
||||
port = int(os.environ.get('PORT', 8080))
|
||||
print(f"""
|
||||
╔══════════════════════════════════════════════════════════╗
|
||||
║ C4A-Script Interactive Tutorial Server ║
|
||||
|
||||
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 1.6 KiB |
@@ -1,376 +0,0 @@
|
||||
/* ==== File: assets/page_actions.css ==== */
|
||||
/* Page Actions Dropdown - Terminal Style */
|
||||
|
||||
/* Wrapper - positioned in content area */
|
||||
.page-actions-wrapper {
|
||||
position: absolute;
|
||||
top: 1.3rem;
|
||||
right: 1rem;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
/* Floating Action Button */
|
||||
.page-actions-button {
|
||||
position: relative;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
background: #3f3f44;
|
||||
border: 1px solid #50ffff;
|
||||
color: #e8e9ed;
|
||||
padding: 0.75rem 1rem;
|
||||
border-radius: 6px;
|
||||
font-family: 'Dank Mono', Monaco, monospace;
|
||||
font-size: 0.875rem;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
.page-actions-button:hover {
|
||||
background: #50ffff;
|
||||
color: #070708;
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 6px 16px rgba(80, 255, 255, 0.3);
|
||||
}
|
||||
|
||||
.page-actions-button::before {
|
||||
content: '▤';
|
||||
font-size: 1.2rem;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.page-actions-button::after {
|
||||
content: '▼';
|
||||
font-size: 0.6rem;
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
|
||||
.page-actions-button.active::after {
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
|
||||
/* Dropdown Menu */
|
||||
.page-actions-dropdown {
|
||||
position: absolute;
|
||||
top: 3.5rem;
|
||||
right: 0;
|
||||
z-index: 1001;
|
||||
background: #1a1a1a;
|
||||
border: 1px solid #3f3f44;
|
||||
border-radius: 8px;
|
||||
min-width: 280px;
|
||||
opacity: 0;
|
||||
visibility: hidden;
|
||||
transform: translateY(-10px);
|
||||
transition: all 0.2s ease;
|
||||
box-shadow: 0 8px 24px rgba(0, 0, 0, 0.5);
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.page-actions-dropdown.active {
|
||||
opacity: 1;
|
||||
visibility: visible;
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
.page-actions-dropdown::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: -8px;
|
||||
right: 1.5rem;
|
||||
width: 0;
|
||||
height: 0;
|
||||
border-left: 8px solid transparent;
|
||||
border-right: 8px solid transparent;
|
||||
border-bottom: 8px solid #3f3f44;
|
||||
}
|
||||
|
||||
/* Menu Header */
|
||||
.page-actions-header {
|
||||
background: #3f3f44;
|
||||
padding: 0.5rem 0.75rem;
|
||||
border-bottom: 1px solid #50ffff;
|
||||
font-family: 'Dank Mono', Monaco, monospace;
|
||||
font-size: 0.7rem;
|
||||
color: #a3abba;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
.page-actions-header::before {
|
||||
content: '┌─';
|
||||
margin-right: 0.5rem;
|
||||
color: #50ffff;
|
||||
}
|
||||
|
||||
/* Menu Items */
|
||||
.page-actions-menu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0.25rem 0;
|
||||
}
|
||||
|
||||
.page-action-item {
|
||||
display: block;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ul>li.page-action-item::after{
|
||||
content: '';
|
||||
}
|
||||
.page-action-link {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
padding: 0.5rem 0.75rem;
|
||||
color: #e8e9ed;
|
||||
text-decoration: none !important;
|
||||
font-family: 'Dank Mono', Monaco, monospace;
|
||||
font-size: 0.8rem;
|
||||
transition: all 0.15s ease;
|
||||
cursor: pointer;
|
||||
border-left: 3px solid transparent;
|
||||
}
|
||||
|
||||
.page-action-link:hover:not(.disabled) {
|
||||
background: #3f3f44;
|
||||
border-left-color: #50ffff;
|
||||
color: #50ffff;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.page-action-link.disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
.page-action-link.disabled:hover {
|
||||
background: transparent;
|
||||
color: #e8e9ed;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
/* Icons using ASCII/Terminal characters */
|
||||
.page-action-icon {
|
||||
font-size: 1rem;
|
||||
width: 1.5rem;
|
||||
text-align: center;
|
||||
font-weight: bold;
|
||||
color: #50ffff;
|
||||
}
|
||||
|
||||
.page-action-link:hover:not(.disabled) .page-action-icon {
|
||||
color: #50ffff;
|
||||
}
|
||||
|
||||
.page-action-link.disabled .page-action-icon {
|
||||
color: #666;
|
||||
}
|
||||
|
||||
/* Specific icons */
|
||||
.icon-copy::before {
|
||||
content: '⎘'; /* Copy/duplicate symbol */
|
||||
}
|
||||
|
||||
.icon-view::before {
|
||||
content: '⎙'; /* Document symbol */
|
||||
}
|
||||
|
||||
.icon-ai::before {
|
||||
content: '⚡'; /* Lightning/AI symbol */
|
||||
}
|
||||
|
||||
/* Action Text */
|
||||
.page-action-text {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.page-action-label {
|
||||
display: block;
|
||||
font-weight: 600;
|
||||
margin-bottom: 0.05rem;
|
||||
line-height: 1.3;
|
||||
}
|
||||
|
||||
.page-action-description {
|
||||
display: block;
|
||||
font-size: 0.7rem;
|
||||
color: #a3abba;
|
||||
line-height: 1.2;
|
||||
}
|
||||
|
||||
/* Badge */
|
||||
/* External link indicator */
|
||||
.page-action-external::after {
|
||||
content: '→';
|
||||
margin-left: 0.25rem;
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
/* Divider */
|
||||
.page-actions-divider {
|
||||
height: 1px;
|
||||
background: #3f3f44;
|
||||
margin: 0.25rem 0;
|
||||
}
|
||||
|
||||
/* Success/Copy feedback */
|
||||
.page-action-copied {
|
||||
background: #50ff50 !important;
|
||||
color: #070708 !important;
|
||||
border-left-color: #50ff50 !important;
|
||||
}
|
||||
|
||||
.page-action-copied .page-action-icon {
|
||||
color: #070708 !important;
|
||||
}
|
||||
|
||||
.page-action-copied .page-action-icon::before {
|
||||
content: '✓';
|
||||
}
|
||||
|
||||
/* Mobile Responsive */
|
||||
@media (max-width: 768px) {
|
||||
.page-actions-wrapper {
|
||||
top: 0.5rem;
|
||||
right: 0.5rem;
|
||||
}
|
||||
|
||||
.page-actions-button {
|
||||
padding: 0.6rem 0.8rem;
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
|
||||
.page-actions-dropdown {
|
||||
min-width: 260px;
|
||||
max-width: calc(100vw - 2rem);
|
||||
right: -0.5rem;
|
||||
}
|
||||
|
||||
.page-action-link {
|
||||
padding: 0.6rem 0.8rem;
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
|
||||
.page-action-description {
|
||||
font-size: 0.7rem;
|
||||
}
|
||||
}
|
||||
|
||||
/* Animation for tooltip/notification */
|
||||
@keyframes slideInFromTop {
|
||||
from {
|
||||
transform: translateY(-20px);
|
||||
opacity: 0;
|
||||
}
|
||||
to {
|
||||
transform: translateY(0);
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
.page-actions-notification {
|
||||
position: fixed;
|
||||
top: calc(var(--header-height) + 0.5rem);
|
||||
right: 50%;
|
||||
transform: translateX(50%);
|
||||
z-index: 1100;
|
||||
background: #50ff50;
|
||||
color: #070708;
|
||||
padding: 0.75rem 1.5rem;
|
||||
border-radius: 6px;
|
||||
font-family: 'Dank Mono', Monaco, monospace;
|
||||
font-size: 0.875rem;
|
||||
font-weight: 600;
|
||||
box-shadow: 0 4px 12px rgba(80, 255, 80, 0.4);
|
||||
animation: slideInFromTop 0.3s ease;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.page-actions-notification::before {
|
||||
content: '✓ ';
|
||||
margin-right: 0.5rem;
|
||||
}
|
||||
|
||||
/* Hide on print */
|
||||
@media print {
|
||||
.page-actions-button,
|
||||
.page-actions-dropdown {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
/* Overlay for mobile */
|
||||
.page-actions-overlay {
|
||||
display: none;
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background: rgba(0, 0, 0, 0.5);
|
||||
z-index: 998;
|
||||
opacity: 0;
|
||||
transition: opacity 0.2s ease;
|
||||
}
|
||||
|
||||
.page-actions-overlay.active {
|
||||
display: block;
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.page-actions-overlay {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
/* Keyboard focus styles */
|
||||
.page-action-link:focus {
|
||||
outline: 2px solid #50ffff;
|
||||
outline-offset: -2px;
|
||||
}
|
||||
|
||||
.page-actions-button:focus {
|
||||
outline: 2px solid #50ffff;
|
||||
outline-offset: 2px;
|
||||
}
|
||||
|
||||
/* Loading state */
|
||||
.page-action-link.loading {
|
||||
pointer-events: none;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.page-action-link.loading .page-action-icon::before {
|
||||
content: '⟳';
|
||||
animation: spin 1s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
from { transform: rotate(0deg); }
|
||||
to { transform: rotate(360deg); }
|
||||
}
|
||||
|
||||
/* Terminal-style border effect on hover */
|
||||
.page-actions-dropdown:hover {
|
||||
border-color: #50ffff;
|
||||
}
|
||||
|
||||
/* Footer info */
|
||||
.page-actions-footer {
|
||||
background: #070708;
|
||||
padding: 0.4rem 0.75rem;
|
||||
border-top: 1px solid #3f3f44;
|
||||
font-size: 0.65rem;
|
||||
color: #666;
|
||||
text-align: center;
|
||||
font-family: 'Dank Mono', Monaco, monospace;
|
||||
}
|
||||
|
||||
.page-actions-footer::before {
|
||||
content: '└─';
|
||||
margin-right: 0.5rem;
|
||||
color: #3f3f44;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user