Compare commits
257 Commits
next-alpin
...
release/v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f9ea3bb3b | ||
|
|
d58b93c207 | ||
|
|
e2b4705010 | ||
|
|
4a1abd5086 | ||
|
|
04258cd4f2 | ||
|
|
84e462d9f8 | ||
|
|
9546773a07 | ||
|
|
66a979ad11 | ||
|
|
0c31e91b53 | ||
|
|
1b6a31f88f | ||
|
|
b8c261780f | ||
|
|
db6ad7a79d | ||
|
|
004d514f33 | ||
|
|
3a9e2c716e | ||
|
|
0163bd797c | ||
|
|
26bad799e4 | ||
|
|
cf8badfe27 | ||
|
|
ccbe3c105c | ||
|
|
761c19d54b | ||
|
|
14b0ecb137 | ||
|
|
0eaa9f9895 | ||
|
|
1d1970ae69 | ||
|
|
205df1e330 | ||
|
|
2640dc73a5 | ||
|
|
58024755c5 | ||
|
|
dd5ee752cf | ||
|
|
bde1bba6a2 | ||
|
|
14f690d751 | ||
|
|
7b9ba3015f | ||
|
|
0c8bb742b7 | ||
|
|
ba2ed53ff1 | ||
|
|
a93efcb650 | ||
|
|
8794852a26 | ||
|
|
fb25a4a769 | ||
|
|
afe852935e | ||
|
|
0ebce590f8 | ||
|
|
026e96a2df | ||
|
|
36429a63de | ||
|
|
a3d41c7951 | ||
|
|
fee4c5c783 | ||
|
|
0f210f6e02 | ||
|
|
1a73fb60db | ||
|
|
74705c1f67 | ||
|
|
048d9b0f5b | ||
|
|
ee25c771d8 | ||
|
|
a353515271 | ||
|
|
539a324cf6 | ||
|
|
5c9c305dbf | ||
|
|
02f3127ded | ||
|
|
e528086341 | ||
|
|
414f16e975 | ||
|
|
b7a6e02236 | ||
|
|
9332326457 | ||
|
|
6cd34b3157 | ||
|
|
871d4f1158 | ||
|
|
dc85481180 | ||
|
|
5d9213a0e9 | ||
|
|
c0fd36982d | ||
|
|
4679ee023d | ||
|
|
f9b7090084 | ||
|
|
cab457e9c7 | ||
|
|
2a0c0ed18d | ||
|
|
c73a130c50 | ||
|
|
ef6f4329fa | ||
|
|
4eb90b41b6 | ||
|
|
9442597f81 | ||
|
|
0ac12da9f3 | ||
|
|
74b06d4b80 | ||
|
|
40640badad | ||
|
|
926592649e | ||
|
|
b870bfdb6c | ||
|
|
6f3a0ea38e | ||
|
|
451b0d6c9a | ||
|
|
8b215e17af | ||
|
|
b4bb0ccea0 | ||
|
|
08a2cdae53 | ||
|
|
ca03acbc82 | ||
|
|
3f6f2e998c | ||
|
|
5ac19a61d7 | ||
|
|
022cc2d92a | ||
|
|
e731596315 | ||
|
|
641526af81 | ||
|
|
82a25c037a | ||
|
|
c6fc5c0518 | ||
|
|
b5c2732f88 | ||
|
|
09fd3e152a | ||
|
|
3f9424e884 | ||
|
|
3048cc1ff9 | ||
|
|
fcc2abe4db | ||
|
|
cc95d3abd4 | ||
|
|
5ce3e682f3 | ||
|
|
28125c1980 | ||
|
|
773ed7b281 | ||
|
|
58c1e17170 | ||
|
|
b55e27d2ef | ||
|
|
3b766e1aac | ||
|
|
c3b7b7e918 | ||
|
|
7d0b447e1c | ||
|
|
33b0e222ca | ||
|
|
1fc45ffac8 | ||
|
|
9c2cc7f73c | ||
|
|
1c5e76d51a | ||
|
|
7665a6832f | ||
|
|
a06710ff03 | ||
|
|
ad078c3f18 | ||
|
|
400a6621ee | ||
|
|
3d46d89759 | ||
|
|
da8f0dbb93 | ||
|
|
33a0c7a17a | ||
|
|
bf56787874 | ||
|
|
08ad7ef257 | ||
|
|
984524ca1c | ||
|
|
1c0ce41328 | ||
|
|
cb8d581e47 | ||
|
|
a55c2b3f88 | ||
|
|
ce09648af1 | ||
|
|
a97654270b | ||
|
|
b4fc60a555 | ||
|
|
137ac014fb | ||
|
|
faa98eefbc | ||
|
|
85ac6fa523 | ||
|
|
becc4624bb | ||
|
|
754ba731fa | ||
|
|
ac9981a1f5 | ||
|
|
83ef15fd47 | ||
|
|
a3cb938675 | ||
|
|
9b60988232 | ||
|
|
98e951f611 | ||
|
|
baca2df8df | ||
|
|
8a5e23d374 | ||
|
|
22725ca87b | ||
|
|
e0fbd2b0a0 | ||
|
|
32966bea11 | ||
|
|
a3b0cab52a | ||
|
|
137556b3dc | ||
|
|
260e2dc347 | ||
|
|
25d97d56e4 | ||
|
|
98a56e6e01 | ||
|
|
897e017361 | ||
|
|
a3e9ef91ad | ||
|
|
76dd86d1b3 | ||
|
|
206a9dfabd | ||
|
|
1af3d1c2e0 | ||
|
|
c1041b9bbe | ||
|
|
f6e25e2a6b | ||
|
|
ee93acbd06 | ||
|
|
2b17f234f8 | ||
|
|
eebb8c84f0 | ||
|
|
12783fabda | ||
|
|
39e3b792a1 | ||
|
|
aaf05910eb | ||
|
|
a0555d5fa6 | ||
|
|
38ebcbb304 | ||
|
|
9b5ccac76e | ||
|
|
87d4b0fff4 | ||
|
|
bd5a9ac632 | ||
|
|
6650b2f34a | ||
|
|
5cc58f9bb3 | ||
|
|
baf7f6a6f5 | ||
|
|
e0cd3e10de | ||
|
|
94e9959fe0 | ||
|
|
7c2fd5202e | ||
|
|
ee01b81f3e | ||
|
|
0e5d672763 | ||
|
|
cd2b490b40 | ||
|
|
50f0b83fcd | ||
|
|
1d6a2b9979 | ||
|
|
039be1b1ce | ||
|
|
9499164d3c | ||
|
|
53245e4e0e | ||
|
|
2140d9aca4 | ||
|
|
ccec40ed17 | ||
|
|
094201ab2a | ||
|
|
ad4dfb21e1 | ||
|
|
7784b2468e | ||
|
|
146f9d415f | ||
|
|
37fd80e4b9 | ||
|
|
949a93982e | ||
|
|
c4f5651199 | ||
|
|
b0aa8bc9f7 | ||
|
|
c98ffe2130 | ||
|
|
4812f08a73 | ||
|
|
f3ebb38edf | ||
|
|
0007aea204 | ||
|
|
b5c25731e6 | ||
|
|
5297e362f3 | ||
|
|
14a31456ef | ||
|
|
a58c8000aa | ||
|
|
b27bb367e8 | ||
|
|
d2648eaa39 | ||
|
|
c2902fd200 | ||
|
|
16b2318242 | ||
|
|
907cba194f | ||
|
|
3bf78ff47a | ||
|
|
921e0c46b6 | ||
|
|
fd899f66aa | ||
|
|
30ec4f571f | ||
|
|
7db6b468d9 | ||
|
|
0886153d6a | ||
|
|
0ec3c4a788 | ||
|
|
eed7f88f29 | ||
|
|
94d486579c | ||
|
|
5206c6f2d6 | ||
|
|
230f22da86 | ||
|
|
05085b6e3d | ||
|
|
793668a413 | ||
|
|
82aa53aa59 | ||
|
|
1f3b1251d0 | ||
|
|
7b9aabc64a | ||
|
|
dcc265458c | ||
|
|
7d8e81fb2e | ||
|
|
9fc5d315af | ||
|
|
d84508b4d5 | ||
|
|
022f5c9e25 | ||
|
|
b2f3cb0dfa | ||
|
|
18e8227dfb | ||
|
|
7c358a1aee | ||
|
|
6f7ab9c927 | ||
|
|
7155778eac | ||
|
|
4133e5460d | ||
|
|
73fda8a6ec | ||
|
|
9e16a4bb26 | ||
|
|
765f856ed4 | ||
|
|
757e3177ed | ||
|
|
d8357e80d2 | ||
|
|
ef1f0c4102 | ||
|
|
1119f2f5b5 | ||
|
|
d8cbeff386 | ||
|
|
57e0423b3a | ||
|
|
7be5427283 | ||
|
|
585e5e5973 | ||
|
|
e3111d0a32 | ||
|
|
2f0e217751 | ||
|
|
efa73257c5 | ||
|
|
e01d1e73e1 | ||
|
|
471d110c5e | ||
|
|
f89113377a | ||
|
|
6740e87b4d | ||
|
|
8b761f232b | ||
|
|
e0c2a7c284 | ||
|
|
ac2f9ae533 | ||
|
|
eedda1ae5c | ||
|
|
8cecbec7a7 | ||
|
|
4359b12003 | ||
|
|
529a79725e | ||
|
|
9109ecd8fc | ||
|
|
84883be513 | ||
|
|
c190ba816d | ||
|
|
a3954dd4c6 | ||
|
|
cbb8755972 | ||
|
|
341b7a5f2a | ||
|
|
504207faa6 | ||
|
|
f14e4a4b67 | ||
|
|
1e819cdb26 | ||
|
|
5edfea279d | ||
|
|
7c1705712d | ||
|
|
27af4cc27b |
28
.claude/settings.local.json
Normal file
28
.claude/settings.local.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(cd:*)",
|
||||
"Bash(python3:*)",
|
||||
"Bash(python:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(mkdir:*)",
|
||||
"Bash(cp:*)",
|
||||
"Bash(rm:*)",
|
||||
"Bash(true)",
|
||||
"Bash(./package-extension.sh:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(chmod:*)",
|
||||
"Bash(rg:*)",
|
||||
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 5 -B 5 \"Script Builder\" docs/md_v2/apps/crawl4ai-assistant/)",
|
||||
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 30 \"generateCode\\(events, format\\)\" docs/md_v2/apps/crawl4ai-assistant/content/content.js)",
|
||||
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg \"<style>\" docs/md_v2/apps/crawl4ai-assistant/index.html -A 5)",
|
||||
"Bash(git checkout:*)",
|
||||
"Bash(docker logs:*)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(./test-final-integration.sh:*)",
|
||||
"Bash(mv:*)"
|
||||
]
|
||||
},
|
||||
"enableAllProjectMcpServers": false
|
||||
}
|
||||
13
.github/workflows/main.yml
vendored
13
.github/workflows/main.yml
vendored
@@ -9,16 +9,26 @@ on:
|
||||
types: [opened]
|
||||
discussion:
|
||||
types: [created]
|
||||
watch:
|
||||
types: [started]
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Send to Google Apps Script (Stars only)
|
||||
if: github.event_name == 'watch'
|
||||
run: |
|
||||
curl -fSs -X POST "${{ secrets.GOOGLE_SCRIPT_ENDPOINT }}" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"url":"${{ github.event.sender.html_url }}"}'
|
||||
- name: Set webhook based on event type
|
||||
id: set-webhook
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "discussion" ]; then
|
||||
echo "webhook=${{ secrets.DISCORD_DISCUSSIONS_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ github.event_name }}" == "watch" ]; then
|
||||
echo "webhook=${{ secrets.DISCORD_STAR_GAZERS }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "webhook=${{ secrets.DISCORD_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
@@ -31,5 +41,6 @@ jobs:
|
||||
args: |
|
||||
${{ github.event_name == 'issues' && format('📣 New issue created: **{0}** by {1} - {2}', github.event.issue.title, github.event.issue.user.login, github.event.issue.html_url) ||
|
||||
github.event_name == 'issue_comment' && format('💬 New comment on issue **{0}** by {1} - {2}', github.event.issue.title, github.event.comment.user.login, github.event.comment.html_url) ||
|
||||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
|
||||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
|
||||
github.event_name == 'watch' && format('⭐ {0} starred Crawl4AI 🥳! Check out their profile: {1}', github.event.sender.login, github.event.sender.html_url) ||
|
||||
format('💬 New discussion started: **{0}** by {1} - {2}', github.event.discussion.title, github.event.discussion.user.login, github.event.discussion.html_url) }}
|
||||
|
||||
141
.github/workflows/release.yml
vendored
Normal file
141
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
name: Release Pipeline
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- '!test-v*' # Exclude test tags
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Extract version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Releasing version: $TAG_VERSION"
|
||||
|
||||
- name: Install package dependencies
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
|
||||
echo "Tag version: $TAG_VERSION"
|
||||
echo "Package version: $PACKAGE_VERSION"
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
|
||||
echo "Please update crawl4ai/__version__.py to match the tag version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Version check passed: $TAG_VERSION"
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Upload to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
run: |
|
||||
echo "📦 Uploading to PyPI..."
|
||||
twine upload dist/*
|
||||
echo "✅ Package uploaded to https://pypi.org/project/crawl4ai/"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Extract major and minor versions
|
||||
id: versions
|
||||
run: |
|
||||
VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1)
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2)
|
||||
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
|
||||
unclecode/crawl4ai:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: v${{ steps.get_version.outputs.VERSION }}
|
||||
release_name: Release v${{ steps.get_version.outputs.VERSION }}
|
||||
body: |
|
||||
## 🎉 Crawl4AI v${{ steps.get_version.outputs.VERSION }} Released!
|
||||
|
||||
### 📦 Installation
|
||||
|
||||
**PyPI:**
|
||||
```bash
|
||||
pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}
|
||||
```
|
||||
|
||||
**Docker:**
|
||||
```bash
|
||||
docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
### 📝 What's Changed
|
||||
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details.
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🚀 Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 PyPI Package" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- URL: https://pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Install: \`pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🐳 Docker Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
|
||||
echo "https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
116
.github/workflows/test-release.yml.disabled
vendored
Normal file
116
.github/workflows/test-release.yml.disabled
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
name: Test Release Pipeline
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'test-v*'
|
||||
|
||||
jobs:
|
||||
test-release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Extract version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/test-v}
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Testing with version: $TAG_VERSION"
|
||||
|
||||
- name: Install package dependencies
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
|
||||
echo "Tag version: $TAG_VERSION"
|
||||
echo "Package version: $PACKAGE_VERSION"
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
|
||||
echo "Please update crawl4ai/__version__.py to match the tag version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Version check passed: $TAG_VERSION"
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Upload to Test PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.TEST_PYPI_TOKEN }}
|
||||
run: |
|
||||
echo "📦 Uploading to Test PyPI..."
|
||||
twine upload --repository testpypi dist/* || {
|
||||
if [ $? -eq 1 ]; then
|
||||
echo "⚠️ Upload failed - likely version already exists on Test PyPI"
|
||||
echo "Continuing anyway for test purposes..."
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
echo "✅ Test PyPI step complete"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build and push Docker test images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:test-latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🎉 Test Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 Test PyPI Package" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- URL: https://test.pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Install: \`pip install -i https://test.pypi.org/simple/ crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🐳 Docker Test Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:test-latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🧹 Cleanup Commands" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Remove test tag" >> $GITHUB_STEP_SUMMARY
|
||||
echo "git tag -d test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "git push origin :test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Remove Docker test images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "docker rmi unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "docker rmi unclecode/crawl4ai:test-latest" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
# Scripts folder (private tools)
|
||||
.scripts/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@@ -257,4 +260,14 @@ continue_config.json
|
||||
.private/
|
||||
|
||||
CLAUDE_MONITOR.md
|
||||
CLAUDE.md
|
||||
CLAUDE.md
|
||||
|
||||
tests/**/test_site
|
||||
tests/**/reports
|
||||
tests/**/benchmark_reports
|
||||
|
||||
docs/**/data
|
||||
.codecat/
|
||||
|
||||
docs/apps/linkdin/debug*/
|
||||
docs/apps/linkdin/samples/insights/*
|
||||
156
CHANGELOG.md
156
CHANGELOG.md
@@ -5,6 +5,162 @@ All notable changes to Crawl4AI will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.7.x] - 2025-06-29
|
||||
|
||||
### Added
|
||||
- **Virtual Scroll Support**: New `VirtualScrollConfig` for handling virtualized scrolling on modern websites
|
||||
- Automatically detects and handles three scrolling scenarios:
|
||||
- Content unchanged (continue scrolling)
|
||||
- Content appended (traditional infinite scroll)
|
||||
- Content replaced (true virtual scroll - Twitter/Instagram style)
|
||||
- Captures ALL content from pages that replace DOM elements during scroll
|
||||
- Intelligent deduplication based on normalized text content
|
||||
- Configurable scroll amount, count, and wait times
|
||||
- Seamless integration with existing extraction strategies
|
||||
- Comprehensive examples including Twitter timeline, Instagram grid, and mixed content scenarios
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
- **AsyncUrlSeeder**: High-performance URL discovery system for intelligent crawling at scale
|
||||
- Discover URLs from sitemaps and Common Crawl index
|
||||
- Extract and analyze page metadata without full crawling
|
||||
- BM25 relevance scoring for query-based URL filtering
|
||||
- Multi-domain parallel discovery with `many_urls()` method
|
||||
- Automatic caching with TTL for discovered URLs
|
||||
- Rate limiting and concurrent request management
|
||||
- Live URL validation with HEAD requests
|
||||
- JSON-LD and Open Graph metadata extraction
|
||||
- **SeedingConfig**: Configuration class for URL seeding operations
|
||||
- Support for multiple discovery sources (`sitemap`, `cc`, `sitemap+cc`)
|
||||
- Pattern-based URL filtering with wildcards
|
||||
- Configurable concurrency and rate limiting
|
||||
- Query-based relevance scoring with BM25
|
||||
- Score threshold filtering for quality control
|
||||
- Comprehensive documentation for URL seeding feature
|
||||
- Detailed comparison with deep crawling approaches
|
||||
- Complete API reference with examples
|
||||
- Integration guide with AsyncWebCrawler
|
||||
- Performance benchmarks and best practices
|
||||
- Example scripts demonstrating URL seeding:
|
||||
- `url_seeder_demo.py`: Interactive Rich-based demonstration
|
||||
- `url_seeder_quick_demo.py`: Screenshot-friendly examples
|
||||
- Test suite for URL seeding with BM25 scoring
|
||||
|
||||
### Changed
|
||||
- Updated `__init__.py` to export AsyncUrlSeeder and SeedingConfig
|
||||
- Enhanced documentation with URL seeding integration examples
|
||||
|
||||
### Fixed
|
||||
- Corrected examples to properly extract URLs from seeder results before passing to `arun_many()`
|
||||
- Fixed logger color compatibility issue (changed `lightblack` to `bright_black`)
|
||||
|
||||
## [0.6.2] - 2025-05-02
|
||||
|
||||
### Added
|
||||
- New `RegexExtractionStrategy` for fast pattern-based extraction without requiring LLM
|
||||
- Built-in patterns for emails, URLs, phone numbers, dates, and more
|
||||
- Support for custom regex patterns
|
||||
- `generate_pattern` utility for LLM-assisted pattern creation (one-time use)
|
||||
- Added `fit_html` as a top-level field in `CrawlResult` for optimized HTML extraction
|
||||
- Added support for network response body capture in network request tracking
|
||||
|
||||
### Changed
|
||||
- Updated documentation for no-LLM extraction strategies
|
||||
- Enhanced API reference to include RegexExtractionStrategy examples and usage
|
||||
- Improved HTML preprocessing with optimized performance for extraction strategies
|
||||
|
||||
## [0.6.1] - 2025-04-24
|
||||
|
||||
### Added
|
||||
- New dedicated `tables` field in `CrawlResult` model for better table extraction handling
|
||||
- Updated crypto_analysis_example.py to use the new tables field with backward compatibility
|
||||
|
||||
### Changed
|
||||
- Improved playground UI in Docker deployment with better endpoint handling and UI feedback
|
||||
|
||||
## [0.6.0] ‑ 2025‑04‑22
|
||||
|
||||
### Added
|
||||
- Browser pooling with page pre‑warming and fine‑grained **geolocation, locale, and timezone** controls
|
||||
- Crawler pool manager (SDK + Docker API) for smarter resource allocation
|
||||
- Network & console log capture plus MHTML snapshot export
|
||||
- **Table extractor**: turn HTML `<table>`s into DataFrames or CSV with one flag
|
||||
- High‑volume stress‑test framework in `tests/memory` and API load scripts
|
||||
- MCP protocol endpoints with socket & SSE support; playground UI scaffold
|
||||
- Docs v2 revamp: TOC, GitHub badge, copy‑code buttons, Docker API demo
|
||||
- “Ask AI” helper button *(work‑in‑progress, shipping soon)*
|
||||
- New examples: geo‑location usage, network/console capture, Docker API, markdown source selection, crypto analysis
|
||||
- Expanded automated test suites for browser, Docker, MCP and memory benchmarks
|
||||
|
||||
### Changed
|
||||
- Consolidated and renamed browser strategies; legacy docker strategy modules removed
|
||||
- `ProxyConfig` moved to `async_configs`
|
||||
- Server migrated to pool‑based crawler management
|
||||
- FastAPI validators replace custom query validation
|
||||
- Docker build now uses Chromium base image
|
||||
- Large‑scale repo tidy‑up (≈36 k insertions, ≈5 k deletions)
|
||||
|
||||
### Fixed
|
||||
- Async crawler session leak, duplicate‑visit handling, URL normalisation
|
||||
- Target‑element regressions in scraping strategies
|
||||
- Logged‑URL readability, encoded‑URL decoding, middle truncation for long URLs
|
||||
- Closed issues: #701, #733, #756, #774, #804, #822, #839, #841, #842, #843, #867, #902, #911
|
||||
|
||||
### Removed
|
||||
- Obsolete modules under `crawl4ai/browser/*` superseded by the new pooled browser layer
|
||||
|
||||
### Deprecated
|
||||
- Old markdown generator names now alias `DefaultMarkdownGenerator` and emit warnings
|
||||
|
||||
---
|
||||
|
||||
#### Upgrade notes
|
||||
1. Update any direct imports from `crawl4ai/browser/*` to the new pooled browser modules
|
||||
2. If you override `AsyncPlaywrightCrawlerStrategy.get_page`, adopt the new signature
|
||||
3. Rebuild Docker images to pull the new Chromium layer
|
||||
4. Switch to `DefaultMarkdownGenerator` (or silence the deprecation warning)
|
||||
|
||||
---
|
||||
|
||||
`121 files changed, ≈36 223 insertions, ≈4 975 deletions` :contentReference[oaicite:0]{index=0}​:contentReference[oaicite:1]{index=1}
|
||||
|
||||
|
||||
### [Feature] 2025-04-21
|
||||
- Implemented MCP protocol for machine-to-machine communication
|
||||
- Added WebSocket and SSE transport for MCP server
|
||||
- Exposed server endpoints via MCP protocol
|
||||
- Created tests for MCP socket and SSE communication
|
||||
- Enhanced Docker server with file handling and intelligent search
|
||||
- Added PDF and screenshot endpoints with file saving capability
|
||||
- Added JavaScript execution endpoint for page interaction
|
||||
- Implemented advanced context search with BM25 and code chunking
|
||||
- Added file path output support for generated assets
|
||||
- Improved server endpoints and API surface
|
||||
- Added intelligent context search with query filtering
|
||||
- Added syntax-aware code function chunking
|
||||
- Implemented efficient HTML processing pipeline
|
||||
- Added support for controlling browser geolocation via new GeolocationConfig class
|
||||
- Added locale and timezone configuration options to CrawlerRunConfig
|
||||
- Added example script demonstrating geolocation and locale usage
|
||||
- Added documentation for location-based identity features
|
||||
|
||||
### [Refactor] 2025-04-20
|
||||
- Replaced crawler_manager.py with simpler crawler_pool.py implementation
|
||||
- Added global page semaphore for hard concurrency cap
|
||||
- Implemented browser pool with idle cleanup
|
||||
- Added playground UI for testing and stress testing
|
||||
- Updated API handlers to use pooled crawlers
|
||||
- Enhanced logging levels and symbols
|
||||
- Added memory tests and stress test utilities
|
||||
|
||||
### [Added] 2025-04-17
|
||||
- Added content source selection feature for markdown generation
|
||||
- New `content_source` parameter allows choosing between `cleaned_html`, `raw_html`, and `fit_html`
|
||||
- Provides flexibility in how HTML content is processed before markdown conversion
|
||||
- Added examples and documentation for the new feature
|
||||
- Includes backward compatibility with default `cleaned_html` behavior
|
||||
|
||||
## Version 0.5.0.post5 (2025-03-14)
|
||||
|
||||
### Added
|
||||
|
||||
15
Dockerfile
15
Dockerfile
@@ -1,4 +1,9 @@
|
||||
FROM python:3.10-slim
|
||||
FROM python:3.12-slim-bookworm AS build
|
||||
|
||||
# C4ai version
|
||||
ARG C4AI_VER=0.7.0-r1
|
||||
ENV C4AI_VERSION=$C4AI_VER
|
||||
LABEL c4ai.version=$C4AI_VER
|
||||
|
||||
# Set build arguments
|
||||
ARG APP_HOME=/app
|
||||
@@ -17,7 +22,7 @@ ENV PYTHONFAULTHANDLER=1 \
|
||||
REDIS_HOST=localhost \
|
||||
REDIS_PORT=6379
|
||||
|
||||
ARG PYTHON_VERSION=3.10
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ARG INSTALL_TYPE=default
|
||||
ARG ENABLE_GPU=false
|
||||
ARG TARGETARCH
|
||||
@@ -66,6 +71,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-get update && apt-get dist-upgrade -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
nvidia-cuda-toolkit \
|
||||
@@ -162,6 +170,9 @@ RUN crawl4ai-doctor
|
||||
# Copy application code
|
||||
COPY deploy/docker/* ${APP_HOME}/
|
||||
|
||||
# copy the playground + any future static assets
|
||||
COPY deploy/docker/static ${APP_HOME}/static
|
||||
|
||||
# Change ownership of the application directory to the non-root user
|
||||
RUN chown -R appuser:appuser ${APP_HOME}
|
||||
|
||||
|
||||
231
JOURNAL.md
231
JOURNAL.md
@@ -2,6 +2,237 @@
|
||||
|
||||
This journal tracks significant feature additions, bug fixes, and architectural decisions in the crawl4ai project. It serves as both documentation and a historical record of the project's evolution.
|
||||
|
||||
## [2025-04-17] Added Content Source Selection for Markdown Generation
|
||||
|
||||
**Feature:** Configurable content source for markdown generation
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `content_source: str = "cleaned_html"` parameter to `MarkdownGenerationStrategy` class
|
||||
2. Updated `DefaultMarkdownGenerator` to accept and pass the content source parameter
|
||||
3. Renamed the `cleaned_html` parameter to `input_html` in the `generate_markdown` method
|
||||
4. Modified `AsyncWebCrawler.aprocess_html` to select the appropriate HTML source based on the generator's config
|
||||
5. Added `preprocess_html_for_schema` import in `async_webcrawler.py`
|
||||
|
||||
**Implementation Details:**
|
||||
- Added a new `content_source` parameter to specify which HTML input to use for markdown generation
|
||||
- Options include: "cleaned_html" (default), "raw_html", and "fit_html"
|
||||
- Used a dictionary dispatch pattern in `aprocess_html` to select the appropriate HTML source
|
||||
- Added proper error handling with fallback to cleaned_html if content source selection fails
|
||||
- Ensured backward compatibility by defaulting to "cleaned_html" option
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/markdown_generation_strategy.py`: Added content_source parameter and updated the method signature
|
||||
- `crawl4ai/async_webcrawler.py`: Added HTML source selection logic and updated imports
|
||||
|
||||
**Examples:**
|
||||
- Created `docs/examples/content_source_example.py` demonstrating how to use the new parameter
|
||||
|
||||
**Challenges:**
|
||||
- Maintaining backward compatibility while reorganizing the parameter flow
|
||||
- Ensuring proper error handling for all content source options
|
||||
- Making the change with minimal code modifications
|
||||
|
||||
**Why This Feature:**
|
||||
The content source selection feature allows users to choose which HTML content to use as input for markdown generation:
|
||||
1. "cleaned_html" - Uses the post-processed HTML after scraping strategy (original behavior)
|
||||
2. "raw_html" - Uses the original raw HTML directly from the web page
|
||||
3. "fit_html" - Uses the preprocessed HTML optimized for schema extraction
|
||||
|
||||
This feature provides greater flexibility in how users generate markdown, enabling them to:
|
||||
- Capture more detailed content from the original HTML when needed
|
||||
- Use schema-optimized HTML when working with structured data
|
||||
- Choose the approach that best suits their specific use case
|
||||
## [2025-04-17] Implemented High Volume Stress Testing Solution for SDK
|
||||
|
||||
**Feature:** Comprehensive stress testing framework using `arun_many` and the dispatcher system to evaluate performance, concurrency handling, and identify potential issues under high-volume crawling scenarios.
|
||||
|
||||
**Changes Made:**
|
||||
1. Created a dedicated stress testing framework in the `benchmarking/` (or similar) directory.
|
||||
2. Implemented local test site generation (`SiteGenerator`) with configurable heavy HTML pages.
|
||||
3. Added basic memory usage tracking (`SimpleMemoryTracker`) using platform-specific commands (avoiding `psutil` dependency for this specific test).
|
||||
4. Utilized `CrawlerMonitor` from `crawl4ai` for rich terminal UI and real-time monitoring of test progress and dispatcher activity.
|
||||
5. Implemented detailed result summary saving (JSON) and memory sample logging (CSV).
|
||||
6. Developed `run_benchmark.py` to orchestrate tests with predefined configurations.
|
||||
7. Created `run_all.sh` as a simple wrapper for `run_benchmark.py`.
|
||||
|
||||
**Implementation Details:**
|
||||
- Generates a local test site with configurable pages containing heavy text and image content.
|
||||
- Uses Python's built-in `http.server` for local serving, minimizing network variance.
|
||||
- Leverages `crawl4ai`'s `arun_many` method for processing URLs.
|
||||
- Utilizes `MemoryAdaptiveDispatcher` to manage concurrency via the `max_sessions` parameter (note: memory adaptation features require `psutil`, not used by `SimpleMemoryTracker`).
|
||||
- Tracks memory usage via `SimpleMemoryTracker`, recording samples throughout test execution to a CSV file.
|
||||
- Uses `CrawlerMonitor` (which uses the `rich` library) for clear terminal visualization and progress reporting directly from the dispatcher.
|
||||
- Stores detailed final metrics in a JSON summary file.
|
||||
|
||||
**Files Created/Updated:**
|
||||
- `stress_test_sdk.py`: Main stress testing implementation using `arun_many`.
|
||||
- `benchmark_report.py`: (Assumed) Report generator for comparing test results.
|
||||
- `run_benchmark.py`: Test runner script with predefined configurations.
|
||||
- `run_all.sh`: Simple bash script wrapper for `run_benchmark.py`.
|
||||
- `USAGE.md`: Comprehensive documentation on usage and interpretation (updated).
|
||||
|
||||
**Testing Approach:**
|
||||
- Creates a controlled, reproducible test environment with a local HTTP server.
|
||||
- Processes URLs using `arun_many`, allowing the dispatcher to manage concurrency up to `max_sessions`.
|
||||
- Optionally logs per-batch summaries (when not in streaming mode) after processing chunks.
|
||||
- Supports different test sizes via `run_benchmark.py` configurations.
|
||||
- Records memory samples via platform commands for basic trend analysis.
|
||||
- Includes cleanup functionality for the test environment.
|
||||
|
||||
**Challenges:**
|
||||
- Ensuring proper cleanup of HTTP server processes.
|
||||
- Getting reliable memory tracking across platforms without adding heavy dependencies (`psutil`) to this specific test script.
|
||||
- Designing `run_benchmark.py` to correctly pass arguments to `stress_test_sdk.py`.
|
||||
|
||||
**Why This Feature:**
|
||||
The high volume stress testing solution addresses critical needs for ensuring Crawl4AI's `arun_many` reliability:
|
||||
1. Provides a reproducible way to evaluate performance under concurrent load.
|
||||
2. Allows testing the dispatcher's concurrency control (`max_session_permit`) and queue management.
|
||||
3. Enables performance tuning by observing throughput (`URLs/sec`) under different `max_sessions` settings.
|
||||
4. Creates a controlled environment for testing `arun_many` behavior.
|
||||
5. Supports continuous integration by providing deterministic test conditions for `arun_many`.
|
||||
|
||||
**Design Decisions:**
|
||||
- Chose local site generation for reproducibility and isolation from network issues.
|
||||
- Utilized the built-in `CrawlerMonitor` for real-time feedback, leveraging its `rich` integration.
|
||||
- Implemented optional per-batch logging in `stress_test_sdk.py` (when not streaming) to provide chunk-level summaries alongside the continuous monitor.
|
||||
- Adopted `arun_many` with a `MemoryAdaptiveDispatcher` as the core mechanism for parallel execution, reflecting the intended SDK usage.
|
||||
- Created `run_benchmark.py` to simplify running standard test configurations.
|
||||
- Used `SimpleMemoryTracker` to provide basic memory insights without requiring `psutil` for this particular test runner.
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Create a separate test variant that *does* use `psutil` to specifically stress the memory-adaptive features of the dispatcher.
|
||||
- Add support for generated JavaScript content.
|
||||
- Add support for Docker-based testing with explicit memory limits.
|
||||
- Enhance `benchmark_report.py` to provide more sophisticated analysis of performance and memory trends from the generated JSON/CSV files.
|
||||
|
||||
---
|
||||
|
||||
## [2025-04-17] Refined Stress Testing System Parameters and Execution
|
||||
|
||||
**Changes Made:**
|
||||
1. Corrected `run_benchmark.py` and `stress_test_sdk.py` to use `--max-sessions` instead of the incorrect `--workers` parameter, accurately reflecting dispatcher configuration.
|
||||
2. Updated `run_benchmark.py` argument handling to correctly pass all relevant custom parameters (including `--stream`, `--monitor-mode`, etc.) to `stress_test_sdk.py`.
|
||||
3. (Assuming changes in `benchmark_report.py`) Applied dark theme to benchmark reports for better readability.
|
||||
4. (Assuming changes in `benchmark_report.py`) Improved visualization code to eliminate matplotlib warnings.
|
||||
5. Updated `run_benchmark.py` to provide clickable `file://` links to generated reports in the terminal output.
|
||||
6. Updated `USAGE.md` with comprehensive parameter descriptions reflecting the final script arguments.
|
||||
7. Updated `run_all.sh` wrapper to correctly invoke `run_benchmark.py` with flexible arguments.
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Parameter Correction (`--max-sessions`)**:
|
||||
* Identified the fundamental misunderstanding where `--workers` was used incorrectly.
|
||||
* Refactored `stress_test_sdk.py` to accept `--max-sessions` and configure the `MemoryAdaptiveDispatcher`'s `max_session_permit` accordingly.
|
||||
* Updated `run_benchmark.py` argument parsing and command construction to use `--max-sessions`.
|
||||
* Updated `TEST_CONFIGS` in `run_benchmark.py` to use `max_sessions`.
|
||||
|
||||
2. **Argument Handling (`run_benchmark.py`)**:
|
||||
* Improved logic to collect all command-line arguments provided to `run_benchmark.py`.
|
||||
* Ensured all relevant arguments (like `--stream`, `--monitor-mode`, `--port`, `--use-rate-limiter`, etc.) are correctly forwarded when calling `stress_test_sdk.py` as a subprocess.
|
||||
|
||||
3. **Dark Theme & Visualization Fixes (Assumed in `benchmark_report.py`)**:
|
||||
* (Describes changes assumed to be made in the separate reporting script).
|
||||
|
||||
4. **Clickable Links (`run_benchmark.py`)**:
|
||||
* Added logic to find the latest HTML report and PNG chart in the `benchmark_reports` directory after `benchmark_report.py` runs.
|
||||
* Used `pathlib` to generate correct `file://` URLs for terminal output.
|
||||
|
||||
5. **Documentation Improvements (`USAGE.md`)**:
|
||||
* Rewrote sections to explain `arun_many`, dispatchers, and `--max-sessions`.
|
||||
* Updated parameter tables for all scripts (`stress_test_sdk.py`, `run_benchmark.py`).
|
||||
* Clarified the difference between batch and streaming modes and their effect on logging.
|
||||
* Updated examples to use correct arguments.
|
||||
|
||||
**Files Modified:**
|
||||
- `stress_test_sdk.py`: Changed `--workers` to `--max-sessions`, added new arguments, used `arun_many`.
|
||||
- `run_benchmark.py`: Changed argument handling, updated configs, calls `stress_test_sdk.py`.
|
||||
- `run_all.sh`: Updated to call `run_benchmark.py` correctly.
|
||||
- `USAGE.md`: Updated documentation extensively.
|
||||
- `benchmark_report.py`: (Assumed modifications for dark theme and viz fixes).
|
||||
|
||||
**Testing:**
|
||||
- Verified that `--max-sessions` correctly limits concurrency via the `CrawlerMonitor` output.
|
||||
- Confirmed that custom arguments passed to `run_benchmark.py` are forwarded to `stress_test_sdk.py`.
|
||||
- Validated clickable links work in supporting terminals.
|
||||
- Ensured documentation matches the final script parameters and behavior.
|
||||
|
||||
**Why These Changes:**
|
||||
These refinements correct the fundamental approach of the stress test to align with `crawl4ai`'s actual architecture and intended usage:
|
||||
1. Ensures the test evaluates the correct components (`arun_many`, `MemoryAdaptiveDispatcher`).
|
||||
2. Makes test configurations more accurate and flexible.
|
||||
3. Improves the usability of the testing framework through better argument handling and documentation.
|
||||
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add support for generated JavaScript content to test JS rendering performance
|
||||
- Implement more sophisticated memory analysis like generational garbage collection tracking
|
||||
- Add support for Docker-based testing with memory limits to force OOM conditions
|
||||
- Create visualization tools for analyzing memory usage patterns across test runs
|
||||
- Add benchmark comparisons between different crawler versions or configurations
|
||||
|
||||
## [2025-04-17] Fixed Issues in Stress Testing System
|
||||
|
||||
**Changes Made:**
|
||||
1. Fixed custom parameter handling in run_benchmark.py
|
||||
2. Applied dark theme to benchmark reports for better readability
|
||||
3. Improved visualization code to eliminate matplotlib warnings
|
||||
4. Added clickable links to generated reports in terminal output
|
||||
5. Enhanced documentation with comprehensive parameter descriptions
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Custom Parameter Handling Fix**
|
||||
- Identified bug where custom URL count was being ignored in run_benchmark.py
|
||||
- Rewrote argument handling to use a custom args dictionary
|
||||
- Properly passed parameters to the test_simple_stress.py command
|
||||
- Added better UI indication of custom parameters in use
|
||||
|
||||
2. **Dark Theme Implementation**
|
||||
- Added complete dark theme to HTML benchmark reports
|
||||
- Applied dark styling to all visualization components
|
||||
- Used Nord-inspired color palette for charts and graphs
|
||||
- Improved contrast and readability for data visualization
|
||||
- Updated text colors and backgrounds for better eye comfort
|
||||
|
||||
3. **Matplotlib Warning Fixes**
|
||||
- Resolved warnings related to improper use of set_xticklabels()
|
||||
- Implemented correct x-axis positioning for bar charts
|
||||
- Ensured proper alignment of bar labels and data points
|
||||
- Updated plotting code to use modern matplotlib practices
|
||||
|
||||
4. **Documentation Improvements**
|
||||
- Created comprehensive USAGE.md with detailed instructions
|
||||
- Added parameter documentation for all scripts
|
||||
- Included examples for all common use cases
|
||||
- Provided detailed explanations for interpreting results
|
||||
- Added troubleshooting guide for common issues
|
||||
|
||||
**Files Modified:**
|
||||
- `tests/memory/run_benchmark.py`: Fixed custom parameter handling
|
||||
- `tests/memory/benchmark_report.py`: Added dark theme and fixed visualization warnings
|
||||
- `tests/memory/run_all.sh`: Added clickable links to reports
|
||||
- `tests/memory/USAGE.md`: Created comprehensive documentation
|
||||
|
||||
**Testing:**
|
||||
- Verified that custom URL counts are now correctly used
|
||||
- Confirmed dark theme is properly applied to all report elements
|
||||
- Checked that matplotlib warnings are no longer appearing
|
||||
- Validated clickable links to reports work in terminals that support them
|
||||
|
||||
**Why These Changes:**
|
||||
These improvements address several usability issues with the stress testing system:
|
||||
1. Better parameter handling ensures test configurations work as expected
|
||||
2. Dark theme reduces eye strain during extended test review sessions
|
||||
3. Fixing visualization warnings improves code quality and output clarity
|
||||
4. Enhanced documentation makes the system more accessible for future use
|
||||
|
||||
**Future Enhancements:**
|
||||
- Add additional visualization options for different types of analysis
|
||||
- Implement theme toggle to support both light and dark preferences
|
||||
- Add export options for embedding reports in other documentation
|
||||
- Create dedicated CI/CD integration templates for automated testing
|
||||
|
||||
## [2025-04-09] Added MHTML Capture Feature
|
||||
|
||||
**Feature:** MHTML snapshot capture of crawled pages
|
||||
|
||||
320
PROGRESSIVE_CRAWLING.md
Normal file
320
PROGRESSIVE_CRAWLING.md
Normal file
@@ -0,0 +1,320 @@
|
||||
# Progressive Web Crawling with Adaptive Information Foraging
|
||||
|
||||
## Abstract
|
||||
|
||||
This paper presents a novel approach to web crawling that adaptively determines when sufficient information has been gathered to answer a given query. Unlike traditional exhaustive crawling methods, our Progressive Information Sufficiency (PIS) framework uses statistical measures to balance information completeness against crawling efficiency. We introduce a multi-strategy architecture supporting pure statistical, embedding-enhanced, and LLM-assisted approaches, with theoretical guarantees on convergence and practical evaluation methods using synthetic datasets.
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
Traditional web crawling approaches follow predetermined patterns (breadth-first, depth-first) without consideration for information sufficiency. This work addresses the fundamental question: *"When do we have enough information to answer a query and similar queries in its domain?"*
|
||||
|
||||
We formalize this as an optimal stopping problem in information foraging, introducing metrics for coverage, consistency, and saturation that enable crawlers to make intelligent decisions about when to stop crawling and which links to follow.
|
||||
|
||||
## 2. Problem Formulation
|
||||
|
||||
### 2.1 Definitions
|
||||
|
||||
Let:
|
||||
- **K** = {d₁, d₂, ..., dₙ} be the current knowledge base (crawled documents)
|
||||
- **Q** be the user query
|
||||
- **L** = {l₁, l₂, ..., lₘ} be available links with preview metadata
|
||||
- **θ** be the confidence threshold for information sufficiency
|
||||
|
||||
### 2.2 Objectives
|
||||
|
||||
1. **Minimize** |K| (number of crawled pages)
|
||||
2. **Maximize** P(answers(Q) | K) (probability of answering Q given K)
|
||||
3. **Ensure** coverage of Q's domain (similar queries)
|
||||
|
||||
## 3. Mathematical Framework
|
||||
|
||||
### 3.1 Information Sufficiency Metric
|
||||
|
||||
We define Information Sufficiency as:
|
||||
|
||||
```
|
||||
IS(K, Q) = min(Coverage(K, Q), Consistency(K, Q), 1 - Redundancy(K)) × DomainCoverage(K, Q)
|
||||
```
|
||||
|
||||
### 3.2 Coverage Score
|
||||
|
||||
Coverage measures how well current knowledge covers query terms and related concepts:
|
||||
|
||||
```
|
||||
Coverage(K, Q) = Σ(t ∈ Q) log(df(t, K) + 1) × idf(t) / |Q|
|
||||
```
|
||||
|
||||
Where:
|
||||
- df(t, K) = document frequency of term t in knowledge base K
|
||||
- idf(t) = inverse document frequency weight
|
||||
|
||||
### 3.3 Consistency Score
|
||||
|
||||
Consistency measures information coherence across documents:
|
||||
|
||||
```
|
||||
Consistency(K, Q) = 1 - Var(answers from random subsets of K)
|
||||
```
|
||||
|
||||
This captures the principle that sufficient knowledge should provide stable answers regardless of document subset.
|
||||
|
||||
### 3.4 Saturation Score
|
||||
|
||||
Saturation detects diminishing returns:
|
||||
|
||||
```
|
||||
Saturation(K) = 1 - (ΔInfo(Kₙ) / ΔInfo(K₁))
|
||||
```
|
||||
|
||||
Where ΔInfo represents marginal information gain from the nth crawl.
|
||||
|
||||
### 3.5 Link Value Prediction
|
||||
|
||||
Expected information gain from uncrawled links:
|
||||
|
||||
```
|
||||
ExpectedGain(l) = Relevance(l, Q) × Novelty(l, K) × Authority(l)
|
||||
```
|
||||
|
||||
Components:
|
||||
- **Relevance**: BM25(preview_text, Q)
|
||||
- **Novelty**: 1 - max_similarity(preview, K)
|
||||
- **Authority**: f(url_structure, domain_metrics)
|
||||
|
||||
## 4. Algorithmic Approach
|
||||
|
||||
### 4.1 Progressive Crawling Algorithm
|
||||
|
||||
```
|
||||
Algorithm: ProgressiveCrawl(start_url, query, θ)
|
||||
K ← ∅
|
||||
crawled ← {start_url}
|
||||
pending ← extract_links(crawl(start_url))
|
||||
|
||||
while IS(K, Q) < θ and |crawled| < max_pages:
|
||||
candidates ← rank_by_expected_gain(pending, Q, K)
|
||||
if max(ExpectedGain(candidates)) < min_gain:
|
||||
break // Diminishing returns
|
||||
|
||||
to_crawl ← top_k(candidates)
|
||||
new_docs ← parallel_crawl(to_crawl)
|
||||
K ← K ∪ new_docs
|
||||
crawled ← crawled ∪ to_crawl
|
||||
pending ← extract_new_links(new_docs) - crawled
|
||||
|
||||
return K
|
||||
```
|
||||
|
||||
### 4.2 Stopping Criteria
|
||||
|
||||
Crawling terminates when:
|
||||
1. IS(K, Q) ≥ θ (sufficient information)
|
||||
2. d(IS)/d(crawls) < ε (plateau reached)
|
||||
3. |crawled| ≥ max_pages (resource limit)
|
||||
4. max(ExpectedGain) < min_gain (no promising links)
|
||||
|
||||
## 5. Multi-Strategy Architecture
|
||||
|
||||
### 5.1 Strategy Pattern Design
|
||||
|
||||
```
|
||||
AbstractStrategy
|
||||
├── StatisticalStrategy (no LLM, no embeddings)
|
||||
├── EmbeddingStrategy (with semantic similarity)
|
||||
└── LLMStrategy (with language model assistance)
|
||||
```
|
||||
|
||||
### 5.2 Statistical Strategy
|
||||
|
||||
Pure statistical approach using:
|
||||
- BM25 for relevance scoring
|
||||
- Term frequency analysis for coverage
|
||||
- Graph structure for authority
|
||||
- No external models required
|
||||
|
||||
**Advantages**: Fast, no API costs, works offline
|
||||
**Best for**: Technical documentation, specific terminology
|
||||
|
||||
### 5.3 Embedding Strategy (Implemented)
|
||||
|
||||
Semantic understanding through embeddings:
|
||||
- Query expansion into semantic variations
|
||||
- Coverage mapping in embedding space
|
||||
- Gap-driven link selection
|
||||
- Validation-based stopping criteria
|
||||
|
||||
**Mathematical Framework**:
|
||||
```
|
||||
Coverage(K, Q) = mean(max_similarity(q, K) for q in Q_expanded)
|
||||
Gap(q) = 1 - max_similarity(q, K)
|
||||
LinkScore(l) = Σ(Gap(q) × relevance(l, q)) × (1 - redundancy(l, K))
|
||||
```
|
||||
|
||||
**Key Parameters**:
|
||||
- `embedding_k_exp`: Exponential decay factor for distance-to-score mapping
|
||||
- `embedding_coverage_radius`: Distance threshold for query coverage
|
||||
- `embedding_min_confidence_threshold`: Minimum relevance threshold
|
||||
|
||||
**Advantages**: Semantic understanding, handles ambiguity, detects irrelevance
|
||||
**Best for**: Research queries, conceptual topics, diverse content
|
||||
|
||||
### 5.4 Progressive Enhancement Path
|
||||
|
||||
1. **Level 0**: Statistical only (implemented)
|
||||
2. **Level 1**: + Embeddings for semantic similarity (implemented)
|
||||
3. **Level 2**: + LLM for query understanding (future)
|
||||
|
||||
## 6. Evaluation Methodology
|
||||
|
||||
### 6.1 Synthetic Dataset Generation
|
||||
|
||||
Using LLM to create evaluation data:
|
||||
|
||||
```python
|
||||
def generate_synthetic_dataset(domain_url):
|
||||
# 1. Fully crawl domain
|
||||
full_knowledge = exhaustive_crawl(domain_url)
|
||||
|
||||
# 2. Generate answerable queries
|
||||
queries = llm_generate_queries(full_knowledge)
|
||||
|
||||
# 3. Create query variations
|
||||
for q in queries:
|
||||
variations = generate_variations(q) # synonyms, sub/super queries
|
||||
|
||||
return queries, variations, full_knowledge
|
||||
```
|
||||
|
||||
### 6.2 Evaluation Metrics
|
||||
|
||||
1. **Efficiency**: Information gained / Pages crawled
|
||||
2. **Completeness**: Answerable queries / Total queries
|
||||
3. **Redundancy**: 1 - (Unique information / Total information)
|
||||
4. **Convergence Rate**: Pages to 95% completeness
|
||||
|
||||
### 6.3 Ablation Studies
|
||||
|
||||
- Impact of each score component (coverage, consistency, saturation)
|
||||
- Sensitivity to threshold parameters
|
||||
- Performance across different domain types
|
||||
|
||||
## 7. Theoretical Properties
|
||||
|
||||
### 7.1 Convergence Guarantee
|
||||
|
||||
**Theorem**: For finite websites, ProgressiveCrawl converges to IS(K, Q) ≥ θ or exhausts all reachable pages.
|
||||
|
||||
**Proof sketch**: IS(K, Q) is monotonically non-decreasing with each crawl, bounded above by 1.
|
||||
|
||||
### 7.2 Optimality
|
||||
|
||||
Under certain assumptions about link preview accuracy:
|
||||
- Expected crawls ≤ 2 × optimal_crawls
|
||||
- Approximation ratio improves with preview quality
|
||||
|
||||
## 8. Implementation Design
|
||||
|
||||
### 8.1 Core Components
|
||||
|
||||
1. **CrawlState**: Maintains crawl history and metrics
|
||||
2. **AdaptiveConfig**: Configuration parameters
|
||||
3. **CrawlStrategy**: Pluggable strategy interface
|
||||
4. **AdaptiveCrawler**: Main orchestrator
|
||||
|
||||
### 8.2 Integration with Crawl4AI
|
||||
|
||||
- Wraps existing AsyncWebCrawler
|
||||
- Leverages link preview functionality
|
||||
- Maintains backward compatibility
|
||||
|
||||
### 8.3 Persistence
|
||||
|
||||
Knowledge base serialization for:
|
||||
- Resumable crawls
|
||||
- Knowledge sharing
|
||||
- Offline analysis
|
||||
|
||||
## 9. Future Directions
|
||||
|
||||
### 9.1 Advanced Scoring
|
||||
|
||||
- Temporal information value
|
||||
- Multi-query optimization
|
||||
- Active learning from user feedback
|
||||
|
||||
### 9.2 Distributed Crawling
|
||||
|
||||
- Collaborative knowledge building
|
||||
- Federated information sufficiency
|
||||
|
||||
### 9.3 Domain Adaptation
|
||||
|
||||
- Transfer learning across domains
|
||||
- Meta-learning for threshold selection
|
||||
|
||||
## 10. Conclusion
|
||||
|
||||
Progressive crawling with adaptive information foraging provides a principled approach to efficient web information extraction. By combining coverage, consistency, and saturation metrics, we can determine information sufficiency without ground truth labels. The multi-strategy architecture allows graceful enhancement from pure statistical to LLM-assisted approaches based on requirements and resources.
|
||||
|
||||
## References
|
||||
|
||||
1. Manning, C. D., Raghavan, P., & Schütze, H. (2008). Introduction to Information Retrieval. Cambridge University Press.
|
||||
|
||||
2. Robertson, S., & Zaragoza, H. (2009). The Probabilistic Relevance Framework: BM25 and Beyond. Foundations and Trends in Information Retrieval.
|
||||
|
||||
3. Pirolli, P., & Card, S. (1999). Information Foraging. Psychological Review, 106(4), 643-675.
|
||||
|
||||
4. Dasgupta, S. (2005). Analysis of a greedy active learning strategy. Advances in Neural Information Processing Systems.
|
||||
|
||||
## Appendix A: Implementation Pseudocode
|
||||
|
||||
```python
|
||||
class StatisticalStrategy:
|
||||
def calculate_confidence(self, state):
|
||||
coverage = self.calculate_coverage(state)
|
||||
consistency = self.calculate_consistency(state)
|
||||
saturation = self.calculate_saturation(state)
|
||||
return min(coverage, consistency, saturation)
|
||||
|
||||
def calculate_coverage(self, state):
|
||||
# BM25-based term coverage
|
||||
term_scores = []
|
||||
for term in state.query.split():
|
||||
df = state.document_frequencies.get(term, 0)
|
||||
idf = self.idf_cache.get(term, 1.0)
|
||||
term_scores.append(log(df + 1) * idf)
|
||||
return mean(term_scores) / max_possible_score
|
||||
|
||||
def rank_links(self, state):
|
||||
scored_links = []
|
||||
for link in state.pending_links:
|
||||
relevance = self.bm25_score(link.preview_text, state.query)
|
||||
novelty = self.calculate_novelty(link, state.knowledge_base)
|
||||
authority = self.url_authority(link.href)
|
||||
score = relevance * novelty * authority
|
||||
scored_links.append((link, score))
|
||||
return sorted(scored_links, key=lambda x: x[1], reverse=True)
|
||||
```
|
||||
|
||||
## Appendix B: Evaluation Protocol
|
||||
|
||||
1. **Dataset Creation**:
|
||||
- Select diverse domains (documentation, blogs, e-commerce)
|
||||
- Generate 100 queries per domain using LLM
|
||||
- Create query variations (5-10 per query)
|
||||
|
||||
2. **Baseline Comparisons**:
|
||||
- BFS crawler (depth-limited)
|
||||
- DFS crawler (depth-limited)
|
||||
- Random crawler
|
||||
- Oracle (knows relevant pages)
|
||||
|
||||
3. **Metrics Collection**:
|
||||
- Pages crawled vs query answerability
|
||||
- Time to sufficient confidence
|
||||
- False positive/negative rates
|
||||
|
||||
4. **Statistical Analysis**:
|
||||
- ANOVA for strategy comparison
|
||||
- Regression for parameter sensitivity
|
||||
- Bootstrap for confidence intervals
|
||||
241
README.md
241
README.md
@@ -11,19 +11,24 @@
|
||||
[](https://pypi.org/project/crawl4ai/)
|
||||
[](https://pepy.tech/project/crawl4ai)
|
||||
|
||||
<!-- [](https://crawl4ai.readthedocs.io/) -->
|
||||
[](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
|
||||
[](https://github.com/psf/black)
|
||||
[](https://github.com/PyCQA/bandit)
|
||||
[](code_of_conduct.md)
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Follow%20on%20X-000000?style=for-the-badge&logo=x&logoColor=white" alt="Follow on X" />
|
||||
</a>
|
||||
<a href="https://www.linkedin.com/company/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Follow%20on%20LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white" alt="Follow on LinkedIn" />
|
||||
</a>
|
||||
<a href="https://discord.gg/jP8KfhDhyN">
|
||||
<img src="https://img.shields.io/badge/Join%20our%20Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join our Discord" />
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
||||
|
||||
[✨ Check out latest update v0.5.0](#-recent-updates)
|
||||
[✨ Check out latest update v0.7.0](#-recent-updates)
|
||||
|
||||
🎉 **Version 0.5.0 is out!** This major release introduces Deep Crawling with BFS/DFS/BestFirst strategies, Memory-Adaptive Dispatcher, Multiple Crawling Strategies (Playwright and HTTP), Docker Deployment with FastAPI, Command-Line Interface (CLI), and more! [Read the release notes →](https://docs.crawl4ai.com/blog)
|
||||
🎉 **Version 0.7.0 is now available!** The Adaptive Intelligence Update introduces groundbreaking features: Adaptive Crawling that learns website patterns, Virtual Scroll support for infinite pages, intelligent Link Preview with 3-layer scoring, Async URL Seeder for massive discovery, and significant performance improvements. [Read the release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.0.md)
|
||||
|
||||
<details>
|
||||
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||
@@ -253,24 +258,29 @@ pip install -e ".[all]" # Install all optional features
|
||||
<details>
|
||||
<summary>🐳 <strong>Docker Deployment</strong></summary>
|
||||
|
||||
> 🚀 **Major Changes Coming!** We're developing a completely new Docker implementation that will make deployment even more efficient and seamless. The current Docker setup is being deprecated in favor of this new solution.
|
||||
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
|
||||
|
||||
### Current Docker Support
|
||||
### New Docker Features
|
||||
|
||||
The existing Docker implementation is being deprecated and will be replaced soon. If you still need to use Docker with the current version:
|
||||
The new Docker implementation includes:
|
||||
- **Browser pooling** with page pre-warming for faster response times
|
||||
- **Interactive playground** to test and generate request code
|
||||
- **MCP integration** for direct connection to AI tools like Claude Code
|
||||
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
|
||||
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
|
||||
- **Optimized resources** with improved memory management
|
||||
|
||||
- 📚 [Deprecated Docker Setup](./docs/deprecated/docker-deployment.md) - Instructions for the current Docker implementation
|
||||
- ⚠️ Note: This setup will be replaced in the next major release
|
||||
### Getting Started
|
||||
|
||||
### What's Coming Next?
|
||||
```bash
|
||||
# Pull and run the latest release candidate
|
||||
docker pull unclecode/crawl4ai:0.7.0
|
||||
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:0.7.0
|
||||
|
||||
Our new Docker implementation will bring:
|
||||
- Improved performance and resource efficiency
|
||||
- Streamlined deployment process
|
||||
- Better integration with Crawl4AI features
|
||||
- Enhanced scalability options
|
||||
# Visit the playground at http://localhost:11235/playground
|
||||
```
|
||||
|
||||
Stay connected with our [GitHub repository](https://github.com/unclecode/crawl4ai) for updates!
|
||||
For complete documentation, see our [Docker Deployment Guide](https://docs.crawl4ai.com/core/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
@@ -286,12 +296,20 @@ import requests
|
||||
# Submit a crawl job
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={"urls": "https://example.com", "priority": 10}
|
||||
json={"urls": ["https://example.com"], "priority": 10}
|
||||
)
|
||||
task_id = response.json()["task_id"]
|
||||
|
||||
# Continue polling until the task is complete (status="completed")
|
||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||
if response.status_code == 200:
|
||||
print("Crawl job submitted successfully.")
|
||||
|
||||
if "results" in response.json():
|
||||
results = response.json()["results"]
|
||||
print("Crawl job completed. Results:")
|
||||
for result in results:
|
||||
print(result)
|
||||
else:
|
||||
task_id = response.json()["task_id"]
|
||||
print(f"Crawl job submitted. Task ID:: {task_id}")
|
||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||
```
|
||||
|
||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
|
||||
@@ -347,7 +365,7 @@ if __name__ == "__main__":
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
import json
|
||||
|
||||
async def main():
|
||||
@@ -421,7 +439,7 @@ if __name__ == "__main__":
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
from crawl4ai import LLMExtractionStrategy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class OpenAIModelFee(BaseModel):
|
||||
@@ -500,31 +518,156 @@ async def test_news_crawl():
|
||||
|
||||
## ✨ Recent Updates
|
||||
|
||||
### Version 0.5.0 Major Release Highlights
|
||||
### Version 0.7.0 Release Highlights - The Adaptive Intelligence Update
|
||||
|
||||
- **🚀 Deep Crawling System**: Explore websites beyond initial URLs with three strategies:
|
||||
- **BFS Strategy**: Breadth-first search explores websites level by level
|
||||
- **DFS Strategy**: Depth-first search explores each branch deeply before backtracking
|
||||
- **BestFirst Strategy**: Uses scoring functions to prioritize which URLs to crawl next
|
||||
- **Page Limiting**: Control the maximum number of pages to crawl with `max_pages` parameter
|
||||
- **Score Thresholds**: Filter URLs based on relevance scores
|
||||
- **⚡ Memory-Adaptive Dispatcher**: Dynamically adjusts concurrency based on system memory with built-in rate limiting
|
||||
- **🔄 Multiple Crawling Strategies**:
|
||||
- **AsyncPlaywrightCrawlerStrategy**: Browser-based crawling with JavaScript support (Default)
|
||||
- **AsyncHTTPCrawlerStrategy**: Fast, lightweight HTTP-only crawler for simple tasks
|
||||
- **🐳 Docker Deployment**: Easy deployment with FastAPI server and streaming/non-streaming endpoints
|
||||
- **💻 Command-Line Interface**: New `crwl` CLI provides convenient terminal access to all features with intuitive commands and configuration options
|
||||
- **👤 Browser Profiler**: Create and manage persistent browser profiles to save authentication states, cookies, and settings for seamless crawling of protected content
|
||||
- **🧠 Crawl4AI Coding Assistant**: AI-powered coding assistant to answer your question for Crawl4ai, and generate proper code for crawling.
|
||||
- **🏎️ LXML Scraping Mode**: Fast HTML parsing using the `lxml` library for improved performance
|
||||
- **🌐 Proxy Rotation**: Built-in support for proxy switching with `RoundRobinProxyStrategy`
|
||||
- **🧠 Adaptive Crawling**: Your crawler now learns and adapts to website patterns automatically:
|
||||
```python
|
||||
config = AdaptiveConfig(
|
||||
confidence_threshold=0.7, # Min confidence to stop crawling
|
||||
max_depth=5, # Maximum crawl depth
|
||||
max_pages=20, # Maximum number of pages to crawl
|
||||
strategy="statistical"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
adaptive_crawler = AdaptiveCrawler(crawler, config)
|
||||
state = await adaptive_crawler.digest(
|
||||
start_url="https://news.example.com",
|
||||
query="latest news content"
|
||||
)
|
||||
# Crawler learns patterns and improves extraction over time
|
||||
```
|
||||
|
||||
- **🌊 Virtual Scroll Support**: Complete content extraction from infinite scroll pages:
|
||||
```python
|
||||
scroll_config = VirtualScrollConfig(
|
||||
container_selector="[data-testid='feed']",
|
||||
scroll_count=20,
|
||||
scroll_by="container_height",
|
||||
wait_after_scroll=1.0
|
||||
)
|
||||
|
||||
result = await crawler.arun(url, config=CrawlerRunConfig(
|
||||
virtual_scroll_config=scroll_config
|
||||
))
|
||||
```
|
||||
|
||||
- **🔗 Intelligent Link Analysis**: 3-layer scoring system for smart link prioritization:
|
||||
```python
|
||||
link_config = LinkPreviewConfig(
|
||||
query="machine learning tutorials",
|
||||
score_threshold=0.3,
|
||||
concurrent_requests=10
|
||||
)
|
||||
|
||||
result = await crawler.arun(url, config=CrawlerRunConfig(
|
||||
link_preview_config=link_config,
|
||||
score_links=True
|
||||
))
|
||||
# Links ranked by relevance and quality
|
||||
```
|
||||
|
||||
- **🎣 Async URL Seeder**: Discover thousands of URLs in seconds:
|
||||
```python
|
||||
seeder = AsyncUrlSeeder(SeedingConfig(
|
||||
source="sitemap+cc",
|
||||
pattern="*/blog/*",
|
||||
query="python tutorials",
|
||||
score_threshold=0.4
|
||||
))
|
||||
|
||||
urls = await seeder.discover("https://example.com")
|
||||
```
|
||||
|
||||
- **⚡ Performance Boost**: Up to 3x faster with optimized resource handling and memory efficiency
|
||||
|
||||
Read the full details in our [0.7.0 Release Notes](https://docs.crawl4ai.com/blog/release-v0.7.0) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
|
||||
### Previous Version: 0.6.0 Release Highlights
|
||||
|
||||
- **🌎 World-aware Crawling**: Set geolocation, language, and timezone for authentic locale-specific content:
|
||||
```python
|
||||
crun_cfg = CrawlerRunConfig(
|
||||
url="https://browserleaks.com/geo", # test page that shows your location
|
||||
locale="en-US", # Accept-Language & UI locale
|
||||
timezone_id="America/Los_Angeles", # JS Date()/Intl timezone
|
||||
geolocation=GeolocationConfig( # override GPS coords
|
||||
latitude=34.0522,
|
||||
longitude=-118.2437,
|
||||
accuracy=10.0,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
- **📊 Table-to-DataFrame Extraction**: Extract HTML tables directly to CSV or pandas DataFrames:
|
||||
```python
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
|
||||
try:
|
||||
# Set up scraping parameters
|
||||
crawl_config = CrawlerRunConfig(
|
||||
table_score_threshold=8, # Strict table detection
|
||||
)
|
||||
|
||||
# Execute market data extraction
|
||||
results: List[CrawlResult] = await crawler.arun(
|
||||
url="https://coinmarketcap.com/?page=1", config=crawl_config
|
||||
)
|
||||
|
||||
# Process results
|
||||
raw_df = pd.DataFrame()
|
||||
for result in results:
|
||||
if result.success and result.media["tables"]:
|
||||
raw_df = pd.DataFrame(
|
||||
result.media["tables"][0]["rows"],
|
||||
columns=result.media["tables"][0]["headers"],
|
||||
)
|
||||
break
|
||||
print(raw_df.head())
|
||||
|
||||
finally:
|
||||
await crawler.stop()
|
||||
```
|
||||
|
||||
- **🚀 Browser Pooling**: Pages launch hot with pre-warmed browser instances for lower latency and memory usage
|
||||
|
||||
- **🕸️ Network and Console Capture**: Full traffic logs and MHTML snapshots for debugging:
|
||||
```python
|
||||
crawler_config = CrawlerRunConfig(
|
||||
capture_network=True,
|
||||
capture_console=True,
|
||||
mhtml=True
|
||||
)
|
||||
```
|
||||
|
||||
- **🔌 MCP Integration**: Connect to AI tools like Claude Code through the Model Context Protocol
|
||||
```bash
|
||||
# Add Crawl4AI to Claude Code
|
||||
claude mcp add --transport sse c4ai-sse http://localhost:11235/mcp/sse
|
||||
```
|
||||
|
||||
- **🖥️ Interactive Playground**: Test configurations and generate API requests with the built-in web interface at `http://localhost:11235//playground`
|
||||
|
||||
- **🐳 Revamped Docker Deployment**: Streamlined multi-architecture Docker image with improved resource efficiency
|
||||
|
||||
- **📱 Multi-stage Build System**: Optimized Dockerfile with platform-specific performance enhancements
|
||||
|
||||
|
||||
### Previous Version: 0.5.0 Major Release Highlights
|
||||
|
||||
- **🚀 Deep Crawling System**: Explore websites beyond initial URLs with BFS, DFS, and BestFirst strategies
|
||||
- **⚡ Memory-Adaptive Dispatcher**: Dynamically adjusts concurrency based on system memory
|
||||
- **🔄 Multiple Crawling Strategies**: Browser-based and lightweight HTTP-only crawlers
|
||||
- **💻 Command-Line Interface**: New `crwl` CLI provides convenient terminal access
|
||||
- **👤 Browser Profiler**: Create and manage persistent browser profiles
|
||||
- **🧠 Crawl4AI Coding Assistant**: AI-powered coding assistant
|
||||
- **🏎️ LXML Scraping Mode**: Fast HTML parsing using the `lxml` library
|
||||
- **🌐 Proxy Rotation**: Built-in support for proxy switching
|
||||
- **🤖 LLM Content Filter**: Intelligent markdown generation using LLMs
|
||||
- **📄 PDF Processing**: Extract text, images, and metadata from PDF files
|
||||
- **🔗 URL Redirection Tracking**: Automatically follow and record HTTP redirects
|
||||
- **🤖 LLM Schema Generation**: Easily create extraction schemas with LLM assistance
|
||||
- **🔍 robots.txt Compliance**: Respect website crawling rules
|
||||
|
||||
Read the full details in our [0.5.0 Release Notes](https://docs.crawl4ai.com/blog/releases/0.5.0.html) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
Read the full details in our [0.5.0 Release Notes](https://docs.crawl4ai.com/blog/releases/0.5.0.html).
|
||||
|
||||
## Version Numbering in Crawl4AI
|
||||
|
||||
@@ -540,7 +683,7 @@ We use different suffixes to indicate development stages:
|
||||
- `dev` (0.4.3dev1): Development versions, unstable
|
||||
- `a` (0.4.3a1): Alpha releases, experimental features
|
||||
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
|
||||
- `rc` (0.4.3rc1): Release candidates, potential final version
|
||||
- `rc` (0.4.3): Release candidates, potential final version
|
||||
|
||||
#### Installation
|
||||
- Regular installation (stable version):
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
import warnings
|
||||
|
||||
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig
|
||||
# MODIFIED: Add SeedingConfig and VirtualScrollConfig here
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig, SeedingConfig, VirtualScrollConfig, LinkPreviewConfig
|
||||
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy,
|
||||
@@ -23,7 +24,8 @@ from .extraction_strategy import (
|
||||
CosineStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy,
|
||||
JsonLxmlExtractionStrategy
|
||||
JsonLxmlExtractionStrategy,
|
||||
RegexExtractionStrategy
|
||||
)
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
@@ -35,6 +37,7 @@ from .content_filter_strategy import (
|
||||
)
|
||||
from .models import CrawlResult, MarkdownGenerationResult, DisplayMode
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
from .link_preview import LinkPreview
|
||||
from .async_dispatcher import (
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
@@ -64,6 +67,31 @@ from .deep_crawling import (
|
||||
DFSDeepCrawlStrategy,
|
||||
DeepCrawlDecorator,
|
||||
)
|
||||
# NEW: Import AsyncUrlSeeder
|
||||
from .async_url_seeder import AsyncUrlSeeder
|
||||
# Adaptive Crawler
|
||||
from .adaptive_crawler import (
|
||||
AdaptiveCrawler,
|
||||
AdaptiveConfig,
|
||||
CrawlState,
|
||||
CrawlStrategy,
|
||||
StatisticalStrategy
|
||||
)
|
||||
|
||||
# C4A Script Language Support
|
||||
from .script import (
|
||||
compile as c4a_compile,
|
||||
validate as c4a_validate,
|
||||
compile_file as c4a_compile_file,
|
||||
CompilationResult,
|
||||
ValidationResult,
|
||||
ErrorDetail
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
start_colab_display_server,
|
||||
setup_colab_environment
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AsyncLoggerBase",
|
||||
@@ -71,6 +99,18 @@ __all__ = [
|
||||
"AsyncWebCrawler",
|
||||
"BrowserProfiler",
|
||||
"LLMConfig",
|
||||
"GeolocationConfig",
|
||||
# NEW: Add SeedingConfig and VirtualScrollConfig
|
||||
"SeedingConfig",
|
||||
"VirtualScrollConfig",
|
||||
# NEW: Add AsyncUrlSeeder
|
||||
"AsyncUrlSeeder",
|
||||
# Adaptive Crawler
|
||||
"AdaptiveCrawler",
|
||||
"AdaptiveConfig",
|
||||
"CrawlState",
|
||||
"CrawlStrategy",
|
||||
"StatisticalStrategy",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
@@ -104,6 +144,7 @@ __all__ = [
|
||||
"JsonCssExtractionStrategy",
|
||||
"JsonXPathExtractionStrategy",
|
||||
"JsonLxmlExtractionStrategy",
|
||||
"RegexExtractionStrategy",
|
||||
"ChunkingStrategy",
|
||||
"RegexChunking",
|
||||
"DefaultMarkdownGenerator",
|
||||
@@ -116,11 +157,23 @@ __all__ = [
|
||||
"SemaphoreDispatcher",
|
||||
"RateLimiter",
|
||||
"CrawlerMonitor",
|
||||
"LinkPreview",
|
||||
"DisplayMode",
|
||||
"MarkdownGenerationResult",
|
||||
"Crawl4aiDockerClient",
|
||||
"ProxyRotationStrategy",
|
||||
"RoundRobinProxyStrategy",
|
||||
"ProxyConfig",
|
||||
"start_colab_display_server",
|
||||
"setup_colab_environment",
|
||||
# C4A Script additions
|
||||
"c4a_compile",
|
||||
"c4a_validate",
|
||||
"c4a_compile_file",
|
||||
"CompilationResult",
|
||||
"ValidationResult",
|
||||
"ErrorDetail",
|
||||
"LinkPreviewConfig"
|
||||
]
|
||||
|
||||
|
||||
@@ -149,4 +202,4 @@ __all__ = [
|
||||
|
||||
# Disable all Pydantic warnings
|
||||
warnings.filterwarnings("ignore", module="pydantic")
|
||||
# pydantic_warnings.filter_warnings()
|
||||
# pydantic_warnings.filter_warnings()
|
||||
@@ -1,2 +1,8 @@
|
||||
# crawl4ai/_version.py
|
||||
__version__ = "0.5.0.post8"
|
||||
# crawl4ai/__version__.py
|
||||
|
||||
# This is the version that will be used for stable releases
|
||||
__version__ = "0.7.1"
|
||||
|
||||
# For nightly builds, this gets set during build process
|
||||
__nightly_version__ = None
|
||||
|
||||
|
||||
1847
crawl4ai/adaptive_crawler copy.py
Normal file
1847
crawl4ai/adaptive_crawler copy.py
Normal file
File diff suppressed because it is too large
Load Diff
1861
crawl4ai/adaptive_crawler.py
Normal file
1861
crawl4ai/adaptive_crawler.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,12 @@
|
||||
import os
|
||||
from typing import Union
|
||||
from .config import (
|
||||
DEFAULT_PROVIDER,
|
||||
DEFAULT_PROVIDER_API_KEY,
|
||||
MIN_WORD_THRESHOLD,
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
PROVIDER_MODELS,
|
||||
PROVIDER_MODELS_PREFIXES,
|
||||
SCREENSHOT_HEIGHT_TRESHOLD,
|
||||
PAGE_TIMEOUT,
|
||||
IMAGE_SCORE_THRESHOLD,
|
||||
@@ -16,7 +18,7 @@ from .extraction_strategy import ExtractionStrategy, LLMExtractionStrategy
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
|
||||
from .markdown_generation_strategy import MarkdownGenerationStrategy, DefaultMarkdownGenerator
|
||||
from .content_scraping_strategy import ContentScrapingStrategy, WebScrapingStrategy
|
||||
from .content_scraping_strategy import ContentScrapingStrategy, WebScrapingStrategy, LXMLWebScrapingStrategy
|
||||
from .deep_crawling import DeepCrawlStrategy
|
||||
|
||||
from .cache_context import CacheMode
|
||||
@@ -27,11 +29,8 @@ import inspect
|
||||
from typing import Any, Dict, Optional
|
||||
from enum import Enum
|
||||
|
||||
from .proxy_strategy import ProxyConfig
|
||||
try:
|
||||
from .browser.models import DockerConfig
|
||||
except ImportError:
|
||||
DockerConfig = None
|
||||
# from .proxy_strategy import ProxyConfig
|
||||
|
||||
|
||||
|
||||
def to_serializable_dict(obj: Any, ignore_default_value : bool = False) -> Dict:
|
||||
@@ -161,6 +160,163 @@ def is_empty_value(value: Any) -> bool:
|
||||
return True
|
||||
return False
|
||||
|
||||
class GeolocationConfig:
|
||||
def __init__(
|
||||
self,
|
||||
latitude: float,
|
||||
longitude: float,
|
||||
accuracy: Optional[float] = 0.0
|
||||
):
|
||||
"""Configuration class for geolocation settings.
|
||||
|
||||
Args:
|
||||
latitude: Latitude coordinate (e.g., 37.7749)
|
||||
longitude: Longitude coordinate (e.g., -122.4194)
|
||||
accuracy: Accuracy in meters. Default: 0.0
|
||||
"""
|
||||
self.latitude = latitude
|
||||
self.longitude = longitude
|
||||
self.accuracy = accuracy
|
||||
|
||||
@staticmethod
|
||||
def from_dict(geo_dict: Dict) -> "GeolocationConfig":
|
||||
"""Create a GeolocationConfig from a dictionary."""
|
||||
return GeolocationConfig(
|
||||
latitude=geo_dict.get("latitude"),
|
||||
longitude=geo_dict.get("longitude"),
|
||||
accuracy=geo_dict.get("accuracy", 0.0)
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"latitude": self.latitude,
|
||||
"longitude": self.longitude,
|
||||
"accuracy": self.accuracy
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "GeolocationConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
GeolocationConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return GeolocationConfig.from_dict(config_dict)
|
||||
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
server: str,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
):
|
||||
"""Configuration class for a single proxy.
|
||||
|
||||
Args:
|
||||
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||
username: Optional username for proxy authentication
|
||||
password: Optional password for proxy authentication
|
||||
ip: Optional IP address for verification purposes
|
||||
"""
|
||||
self.server = server
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
# Extract IP from server if not explicitly provided
|
||||
self.ip = ip or self._extract_ip_from_server()
|
||||
|
||||
def _extract_ip_from_server(self) -> Optional[str]:
|
||||
"""Extract IP address from server URL."""
|
||||
try:
|
||||
# Simple extraction assuming http://ip:port format
|
||||
if "://" in self.server:
|
||||
parts = self.server.split("://")[1].split(":")
|
||||
return parts[0]
|
||||
else:
|
||||
parts = self.server.split(":")
|
||||
return parts[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||
parts = proxy_str.split(":")
|
||||
if len(parts) == 4: # ip:port:username:password
|
||||
ip, port, username, password = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
username=username,
|
||||
password=password,
|
||||
ip=ip
|
||||
)
|
||||
elif len(parts) == 2: # ip:port only
|
||||
ip, port = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
ip=ip
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a dictionary."""
|
||||
return ProxyConfig(
|
||||
server=proxy_dict.get("server"),
|
||||
username=proxy_dict.get("username"),
|
||||
password=proxy_dict.get("password"),
|
||||
ip=proxy_dict.get("ip")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||
"""Load proxies from environment variable.
|
||||
|
||||
Args:
|
||||
env_var: Name of environment variable containing comma-separated proxy strings
|
||||
|
||||
Returns:
|
||||
List of ProxyConfig objects
|
||||
"""
|
||||
proxies = []
|
||||
try:
|
||||
proxy_list = os.getenv(env_var, "").split(",")
|
||||
for proxy in proxy_list:
|
||||
if not proxy:
|
||||
continue
|
||||
proxies.append(ProxyConfig.from_string(proxy))
|
||||
except Exception as e:
|
||||
print(f"Error loading proxies from environment: {e}")
|
||||
return proxies
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"server": self.server,
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"ip": self.ip
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "ProxyConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
ProxyConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return ProxyConfig.from_dict(config_dict)
|
||||
|
||||
class BrowserConfig:
|
||||
"""
|
||||
@@ -197,8 +353,6 @@ class BrowserConfig:
|
||||
Default: None.
|
||||
proxy_config (ProxyConfig or dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
If None, no additional proxy config. Default: None.
|
||||
docker_config (DockerConfig or dict or None): Configuration for Docker-based browser automation.
|
||||
Contains settings for Docker container operation. Default: None.
|
||||
viewport_width (int): Default viewport width for pages. Default: 1080.
|
||||
viewport_height (int): Default viewport height for pages. Default: 600.
|
||||
viewport (dict): Default viewport dimensions for pages. If set, overrides viewport_width and viewport_height.
|
||||
@@ -244,7 +398,6 @@ class BrowserConfig:
|
||||
channel: str = "chromium",
|
||||
proxy: str = None,
|
||||
proxy_config: Union[ProxyConfig, dict, None] = None,
|
||||
docker_config: Union[DockerConfig, dict, None] = None,
|
||||
viewport_width: int = 1080,
|
||||
viewport_height: int = 600,
|
||||
viewport: dict = None,
|
||||
@@ -272,7 +425,7 @@ class BrowserConfig:
|
||||
host: str = "localhost",
|
||||
):
|
||||
self.browser_type = browser_type
|
||||
self.headless = headless or True
|
||||
self.headless = headless
|
||||
self.browser_mode = browser_mode
|
||||
self.use_managed_browser = use_managed_browser
|
||||
self.cdp_url = cdp_url
|
||||
@@ -285,15 +438,7 @@ class BrowserConfig:
|
||||
self.chrome_channel = ""
|
||||
self.proxy = proxy
|
||||
self.proxy_config = proxy_config
|
||||
|
||||
# Handle docker configuration
|
||||
if isinstance(docker_config, dict) and DockerConfig is not None:
|
||||
self.docker_config = DockerConfig.from_kwargs(docker_config)
|
||||
else:
|
||||
self.docker_config = docker_config
|
||||
|
||||
if self.docker_config:
|
||||
self.user_data_dir = self.docker_config.user_data_dir
|
||||
|
||||
self.viewport_width = viewport_width
|
||||
self.viewport_height = viewport_height
|
||||
@@ -364,7 +509,6 @@ class BrowserConfig:
|
||||
channel=kwargs.get("channel", "chromium"),
|
||||
proxy=kwargs.get("proxy"),
|
||||
proxy_config=kwargs.get("proxy_config", None),
|
||||
docker_config=kwargs.get("docker_config", None),
|
||||
viewport_width=kwargs.get("viewport_width", 1080),
|
||||
viewport_height=kwargs.get("viewport_height", 600),
|
||||
accept_downloads=kwargs.get("accept_downloads", False),
|
||||
@@ -421,13 +565,7 @@ class BrowserConfig:
|
||||
"debugging_port": self.debugging_port,
|
||||
"host": self.host,
|
||||
}
|
||||
|
||||
# Include docker_config if it exists
|
||||
if hasattr(self, "docker_config") and self.docker_config is not None:
|
||||
if hasattr(self.docker_config, "to_dict"):
|
||||
result["docker_config"] = self.docker_config.to_dict()
|
||||
else:
|
||||
result["docker_config"] = self.docker_config
|
||||
|
||||
|
||||
return result
|
||||
|
||||
@@ -457,6 +595,145 @@ class BrowserConfig:
|
||||
return config
|
||||
return BrowserConfig.from_kwargs(config)
|
||||
|
||||
class VirtualScrollConfig:
|
||||
"""Configuration for virtual scroll handling.
|
||||
|
||||
This config enables capturing content from pages with virtualized scrolling
|
||||
(like Twitter, Instagram feeds) where DOM elements are recycled as user scrolls.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
container_selector: str,
|
||||
scroll_count: int = 10,
|
||||
scroll_by: Union[str, int] = "container_height",
|
||||
wait_after_scroll: float = 0.5,
|
||||
):
|
||||
"""
|
||||
Initialize virtual scroll configuration.
|
||||
|
||||
Args:
|
||||
container_selector: CSS selector for the scrollable container
|
||||
scroll_count: Maximum number of scrolls to perform
|
||||
scroll_by: Amount to scroll - can be:
|
||||
- "container_height": scroll by container's height
|
||||
- "page_height": scroll by viewport height
|
||||
- int: fixed pixel amount
|
||||
wait_after_scroll: Seconds to wait after each scroll for content to load
|
||||
"""
|
||||
self.container_selector = container_selector
|
||||
self.scroll_count = scroll_count
|
||||
self.scroll_by = scroll_by
|
||||
self.wait_after_scroll = wait_after_scroll
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"container_selector": self.container_selector,
|
||||
"scroll_count": self.scroll_count,
|
||||
"scroll_by": self.scroll_by,
|
||||
"wait_after_scroll": self.wait_after_scroll,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "VirtualScrollConfig":
|
||||
"""Create instance from dictionary."""
|
||||
return cls(**data)
|
||||
|
||||
class LinkPreviewConfig:
|
||||
"""Configuration for link head extraction and scoring."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
include_internal: bool = True,
|
||||
include_external: bool = False,
|
||||
include_patterns: Optional[List[str]] = None,
|
||||
exclude_patterns: Optional[List[str]] = None,
|
||||
concurrency: int = 10,
|
||||
timeout: int = 5,
|
||||
max_links: int = 100,
|
||||
query: Optional[str] = None,
|
||||
score_threshold: Optional[float] = None,
|
||||
verbose: bool = False
|
||||
):
|
||||
"""
|
||||
Initialize link extraction configuration.
|
||||
|
||||
Args:
|
||||
include_internal: Whether to include same-domain links
|
||||
include_external: Whether to include different-domain links
|
||||
include_patterns: List of glob patterns to include (e.g., ["*/docs/*", "*/api/*"])
|
||||
exclude_patterns: List of glob patterns to exclude (e.g., ["*/login*", "*/admin*"])
|
||||
concurrency: Number of links to process simultaneously
|
||||
timeout: Timeout in seconds for each link's head extraction
|
||||
max_links: Maximum number of links to process (prevents overload)
|
||||
query: Query string for BM25 contextual scoring (optional)
|
||||
score_threshold: Minimum relevance score to include links (0.0-1.0, optional)
|
||||
verbose: Show detailed progress during extraction
|
||||
"""
|
||||
self.include_internal = include_internal
|
||||
self.include_external = include_external
|
||||
self.include_patterns = include_patterns
|
||||
self.exclude_patterns = exclude_patterns
|
||||
self.concurrency = concurrency
|
||||
self.timeout = timeout
|
||||
self.max_links = max_links
|
||||
self.query = query
|
||||
self.score_threshold = score_threshold
|
||||
self.verbose = verbose
|
||||
|
||||
# Validation
|
||||
if concurrency <= 0:
|
||||
raise ValueError("concurrency must be positive")
|
||||
if timeout <= 0:
|
||||
raise ValueError("timeout must be positive")
|
||||
if max_links <= 0:
|
||||
raise ValueError("max_links must be positive")
|
||||
if score_threshold is not None and not (0.0 <= score_threshold <= 1.0):
|
||||
raise ValueError("score_threshold must be between 0.0 and 1.0")
|
||||
if not include_internal and not include_external:
|
||||
raise ValueError("At least one of include_internal or include_external must be True")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(config_dict: Dict[str, Any]) -> "LinkPreviewConfig":
|
||||
"""Create LinkPreviewConfig from dictionary (for backward compatibility)."""
|
||||
if not config_dict:
|
||||
return None
|
||||
|
||||
return LinkPreviewConfig(
|
||||
include_internal=config_dict.get("include_internal", True),
|
||||
include_external=config_dict.get("include_external", False),
|
||||
include_patterns=config_dict.get("include_patterns"),
|
||||
exclude_patterns=config_dict.get("exclude_patterns"),
|
||||
concurrency=config_dict.get("concurrency", 10),
|
||||
timeout=config_dict.get("timeout", 5),
|
||||
max_links=config_dict.get("max_links", 100),
|
||||
query=config_dict.get("query"),
|
||||
score_threshold=config_dict.get("score_threshold"),
|
||||
verbose=config_dict.get("verbose", False)
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary format."""
|
||||
return {
|
||||
"include_internal": self.include_internal,
|
||||
"include_external": self.include_external,
|
||||
"include_patterns": self.include_patterns,
|
||||
"exclude_patterns": self.exclude_patterns,
|
||||
"concurrency": self.concurrency,
|
||||
"timeout": self.timeout,
|
||||
"max_links": self.max_links,
|
||||
"query": self.query,
|
||||
"score_threshold": self.score_threshold,
|
||||
"verbose": self.verbose
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "LinkPreviewConfig":
|
||||
"""Create a copy with updated values."""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return LinkPreviewConfig.from_dict(config_dict)
|
||||
|
||||
|
||||
class HTTPCrawlerConfig:
|
||||
"""HTTP-specific crawler configuration"""
|
||||
@@ -589,6 +866,14 @@ class CrawlerRunConfig():
|
||||
proxy_config (ProxyConfig or dict or None): Detailed proxy configuration, e.g. {"server": "...", "username": "..."}.
|
||||
If None, no additional proxy config. Default: None.
|
||||
|
||||
# Browser Location and Identity Parameters
|
||||
locale (str or None): Locale to use for the browser context (e.g., "en-US").
|
||||
Default: None.
|
||||
timezone_id (str or None): Timezone identifier to use for the browser context (e.g., "America/New_York").
|
||||
Default: None.
|
||||
geolocation (GeolocationConfig or None): Geolocation configuration for the browser.
|
||||
Default: None.
|
||||
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate: bool = False,
|
||||
# Caching Parameters
|
||||
@@ -616,6 +901,9 @@ class CrawlerRunConfig():
|
||||
Default: 60000 (60 seconds).
|
||||
wait_for (str or None): A CSS selector or JS condition to wait for before extracting content.
|
||||
Default: None.
|
||||
wait_for_timeout (int or None): Specific timeout in ms for the wait_for condition.
|
||||
If None, uses page_timeout instead.
|
||||
Default: None.
|
||||
wait_for_images (bool): If True, wait for images to load before extracting content.
|
||||
Default: False.
|
||||
delay_before_return_html (float): Delay in seconds before retrieving final HTML.
|
||||
@@ -638,6 +926,8 @@ class CrawlerRunConfig():
|
||||
Default: False.
|
||||
scroll_delay (float): Delay in seconds between scroll steps if scan_full_page is True.
|
||||
Default: 0.2.
|
||||
max_scroll_steps (Optional[int]): Maximum number of scroll steps to perform during full page scan.
|
||||
If None, scrolls until the entire page is loaded. Default: None.
|
||||
process_iframes (bool): If True, attempts to process and inline iframe content.
|
||||
Default: False.
|
||||
remove_overlay_elements (bool): If True, remove overlays/popups before extracting HTML.
|
||||
@@ -669,6 +959,12 @@ class CrawlerRunConfig():
|
||||
table_score_threshold (int): Minimum score threshold for processing a table.
|
||||
Default: 7.
|
||||
|
||||
# Virtual Scroll Parameters
|
||||
virtual_scroll_config (VirtualScrollConfig or dict or None): Configuration for handling virtual scroll containers.
|
||||
Used for capturing content from pages with virtualized
|
||||
scrolling (e.g., Twitter, Instagram feeds).
|
||||
Default: None.
|
||||
|
||||
# Link and Domain Handling Parameters
|
||||
exclude_social_media_domains (list of str): List of domains to exclude for social media links.
|
||||
Default: SOCIAL_MEDIA_DOMAINS (from config).
|
||||
@@ -682,6 +978,9 @@ class CrawlerRunConfig():
|
||||
Default: [].
|
||||
exclude_internal_links (bool): If True, exclude internal links from the results.
|
||||
Default: False.
|
||||
score_links (bool): If True, calculate intrinsic quality scores for all links using URL structure,
|
||||
text quality, and contextual relevance metrics. Separate from link_preview_config.
|
||||
Default: False.
|
||||
|
||||
# Debugging and Logging Parameters
|
||||
verbose (bool): Enable verbose logging.
|
||||
@@ -738,6 +1037,10 @@ class CrawlerRunConfig():
|
||||
scraping_strategy: ContentScrapingStrategy = None,
|
||||
proxy_config: Union[ProxyConfig, dict, None] = None,
|
||||
proxy_rotation_strategy: Optional[ProxyRotationStrategy] = None,
|
||||
# Browser Location and Identity Parameters
|
||||
locale: Optional[str] = None,
|
||||
timezone_id: Optional[str] = None,
|
||||
geolocation: Optional[GeolocationConfig] = None,
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate: bool = False,
|
||||
# Caching Parameters
|
||||
@@ -752,6 +1055,7 @@ class CrawlerRunConfig():
|
||||
wait_until: str = "domcontentloaded",
|
||||
page_timeout: int = PAGE_TIMEOUT,
|
||||
wait_for: str = None,
|
||||
wait_for_timeout: int = None,
|
||||
wait_for_images: bool = False,
|
||||
delay_before_return_html: float = 0.1,
|
||||
mean_delay: float = 0.1,
|
||||
@@ -759,10 +1063,12 @@ class CrawlerRunConfig():
|
||||
semaphore_count: int = 5,
|
||||
# Page Interaction Parameters
|
||||
js_code: Union[str, List[str]] = None,
|
||||
c4a_script: Union[str, List[str]] = None,
|
||||
js_only: bool = False,
|
||||
ignore_body_visibility: bool = True,
|
||||
scan_full_page: bool = False,
|
||||
scroll_delay: float = 0.2,
|
||||
max_scroll_steps: Optional[int] = None,
|
||||
process_iframes: bool = False,
|
||||
remove_overlay_elements: bool = False,
|
||||
simulate_user: bool = False,
|
||||
@@ -786,6 +1092,7 @@ class CrawlerRunConfig():
|
||||
exclude_social_media_links: bool = False,
|
||||
exclude_domains: list = None,
|
||||
exclude_internal_links: bool = False,
|
||||
score_links: bool = False,
|
||||
# Debugging and Logging Parameters
|
||||
verbose: bool = True,
|
||||
log_console: bool = False,
|
||||
@@ -802,6 +1109,10 @@ class CrawlerRunConfig():
|
||||
user_agent_generator_config: dict = {},
|
||||
# Deep Crawl Parameters
|
||||
deep_crawl_strategy: Optional[DeepCrawlStrategy] = None,
|
||||
# Link Extraction Parameters
|
||||
link_preview_config: Union[LinkPreviewConfig, Dict[str, Any]] = None,
|
||||
# Virtual Scroll Parameters
|
||||
virtual_scroll_config: Union[VirtualScrollConfig, Dict[str, Any]] = None,
|
||||
# Experimental Parameters
|
||||
experimental: Dict[str, Any] = None,
|
||||
):
|
||||
@@ -823,9 +1134,14 @@ class CrawlerRunConfig():
|
||||
self.remove_forms = remove_forms
|
||||
self.prettiify = prettiify
|
||||
self.parser_type = parser_type
|
||||
self.scraping_strategy = scraping_strategy or WebScrapingStrategy()
|
||||
self.scraping_strategy = scraping_strategy or LXMLWebScrapingStrategy()
|
||||
self.proxy_config = proxy_config
|
||||
self.proxy_rotation_strategy = proxy_rotation_strategy
|
||||
|
||||
# Browser Location and Identity Parameters
|
||||
self.locale = locale
|
||||
self.timezone_id = timezone_id
|
||||
self.geolocation = geolocation
|
||||
|
||||
# SSL Parameters
|
||||
self.fetch_ssl_certificate = fetch_ssl_certificate
|
||||
@@ -843,6 +1159,7 @@ class CrawlerRunConfig():
|
||||
self.wait_until = wait_until
|
||||
self.page_timeout = page_timeout
|
||||
self.wait_for = wait_for
|
||||
self.wait_for_timeout = wait_for_timeout
|
||||
self.wait_for_images = wait_for_images
|
||||
self.delay_before_return_html = delay_before_return_html
|
||||
self.mean_delay = mean_delay
|
||||
@@ -851,10 +1168,12 @@ class CrawlerRunConfig():
|
||||
|
||||
# Page Interaction Parameters
|
||||
self.js_code = js_code
|
||||
self.c4a_script = c4a_script
|
||||
self.js_only = js_only
|
||||
self.ignore_body_visibility = ignore_body_visibility
|
||||
self.scan_full_page = scan_full_page
|
||||
self.scroll_delay = scroll_delay
|
||||
self.max_scroll_steps = max_scroll_steps
|
||||
self.process_iframes = process_iframes
|
||||
self.remove_overlay_elements = remove_overlay_elements
|
||||
self.simulate_user = simulate_user
|
||||
@@ -882,6 +1201,7 @@ class CrawlerRunConfig():
|
||||
self.exclude_social_media_links = exclude_social_media_links
|
||||
self.exclude_domains = exclude_domains or []
|
||||
self.exclude_internal_links = exclude_internal_links
|
||||
self.score_links = score_links
|
||||
|
||||
# Debugging and Logging Parameters
|
||||
self.verbose = verbose
|
||||
@@ -924,8 +1244,83 @@ class CrawlerRunConfig():
|
||||
# Deep Crawl Parameters
|
||||
self.deep_crawl_strategy = deep_crawl_strategy
|
||||
|
||||
# Link Extraction Parameters
|
||||
if link_preview_config is None:
|
||||
self.link_preview_config = None
|
||||
elif isinstance(link_preview_config, LinkPreviewConfig):
|
||||
self.link_preview_config = link_preview_config
|
||||
elif isinstance(link_preview_config, dict):
|
||||
# Convert dict to config object for backward compatibility
|
||||
self.link_preview_config = LinkPreviewConfig.from_dict(link_preview_config)
|
||||
else:
|
||||
raise ValueError("link_preview_config must be LinkPreviewConfig object or dict")
|
||||
|
||||
# Virtual Scroll Parameters
|
||||
if virtual_scroll_config is None:
|
||||
self.virtual_scroll_config = None
|
||||
elif isinstance(virtual_scroll_config, VirtualScrollConfig):
|
||||
self.virtual_scroll_config = virtual_scroll_config
|
||||
elif isinstance(virtual_scroll_config, dict):
|
||||
# Convert dict to config object for backward compatibility
|
||||
self.virtual_scroll_config = VirtualScrollConfig.from_dict(virtual_scroll_config)
|
||||
else:
|
||||
raise ValueError("virtual_scroll_config must be VirtualScrollConfig object or dict")
|
||||
|
||||
# Experimental Parameters
|
||||
self.experimental = experimental or {}
|
||||
|
||||
# Compile C4A scripts if provided
|
||||
if self.c4a_script and not self.js_code:
|
||||
self._compile_c4a_script()
|
||||
|
||||
|
||||
def _compile_c4a_script(self):
|
||||
"""Compile C4A script to JavaScript"""
|
||||
try:
|
||||
# Try importing the compiler
|
||||
try:
|
||||
from .script import compile
|
||||
except ImportError:
|
||||
from crawl4ai.script import compile
|
||||
|
||||
# Handle both string and list inputs
|
||||
if isinstance(self.c4a_script, str):
|
||||
scripts = [self.c4a_script]
|
||||
else:
|
||||
scripts = self.c4a_script
|
||||
|
||||
# Compile each script
|
||||
compiled_js = []
|
||||
for i, script in enumerate(scripts):
|
||||
result = compile(script)
|
||||
|
||||
if result.success:
|
||||
compiled_js.extend(result.js_code)
|
||||
else:
|
||||
# Format error message following existing patterns
|
||||
error = result.first_error
|
||||
error_msg = (
|
||||
f"C4A Script compilation error (script {i+1}):\n"
|
||||
f" Line {error.line}, Column {error.column}: {error.message}\n"
|
||||
f" Code: {error.source_line}"
|
||||
)
|
||||
if error.suggestions:
|
||||
error_msg += f"\n Suggestion: {error.suggestions[0].message}"
|
||||
|
||||
raise ValueError(error_msg)
|
||||
|
||||
self.js_code = compiled_js
|
||||
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"C4A script compiler not available. "
|
||||
"Please ensure crawl4ai.script module is properly installed."
|
||||
)
|
||||
except Exception as e:
|
||||
# Re-raise with context
|
||||
if "compilation error" not in str(e).lower():
|
||||
raise ValueError(f"Failed to compile C4A script: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
def __getattr__(self, name):
|
||||
@@ -966,6 +1361,10 @@ class CrawlerRunConfig():
|
||||
scraping_strategy=kwargs.get("scraping_strategy"),
|
||||
proxy_config=kwargs.get("proxy_config"),
|
||||
proxy_rotation_strategy=kwargs.get("proxy_rotation_strategy"),
|
||||
# Browser Location and Identity Parameters
|
||||
locale=kwargs.get("locale", None),
|
||||
timezone_id=kwargs.get("timezone_id", None),
|
||||
geolocation=kwargs.get("geolocation", None),
|
||||
# SSL Parameters
|
||||
fetch_ssl_certificate=kwargs.get("fetch_ssl_certificate", False),
|
||||
# Caching Parameters
|
||||
@@ -980,6 +1379,7 @@ class CrawlerRunConfig():
|
||||
wait_until=kwargs.get("wait_until", "domcontentloaded"),
|
||||
page_timeout=kwargs.get("page_timeout", 60000),
|
||||
wait_for=kwargs.get("wait_for"),
|
||||
wait_for_timeout=kwargs.get("wait_for_timeout"),
|
||||
wait_for_images=kwargs.get("wait_for_images", False),
|
||||
delay_before_return_html=kwargs.get("delay_before_return_html", 0.1),
|
||||
mean_delay=kwargs.get("mean_delay", 0.1),
|
||||
@@ -991,6 +1391,7 @@ class CrawlerRunConfig():
|
||||
ignore_body_visibility=kwargs.get("ignore_body_visibility", True),
|
||||
scan_full_page=kwargs.get("scan_full_page", False),
|
||||
scroll_delay=kwargs.get("scroll_delay", 0.2),
|
||||
max_scroll_steps=kwargs.get("max_scroll_steps"),
|
||||
process_iframes=kwargs.get("process_iframes", False),
|
||||
remove_overlay_elements=kwargs.get("remove_overlay_elements", False),
|
||||
simulate_user=kwargs.get("simulate_user", False),
|
||||
@@ -1023,6 +1424,7 @@ class CrawlerRunConfig():
|
||||
exclude_social_media_links=kwargs.get("exclude_social_media_links", False),
|
||||
exclude_domains=kwargs.get("exclude_domains", []),
|
||||
exclude_internal_links=kwargs.get("exclude_internal_links", False),
|
||||
score_links=kwargs.get("score_links", False),
|
||||
# Debugging and Logging Parameters
|
||||
verbose=kwargs.get("verbose", True),
|
||||
log_console=kwargs.get("log_console", False),
|
||||
@@ -1038,6 +1440,8 @@ class CrawlerRunConfig():
|
||||
user_agent_generator_config=kwargs.get("user_agent_generator_config", {}),
|
||||
# Deep Crawl Parameters
|
||||
deep_crawl_strategy=kwargs.get("deep_crawl_strategy"),
|
||||
# Link Extraction Parameters
|
||||
link_preview_config=kwargs.get("link_preview_config"),
|
||||
url=kwargs.get("url"),
|
||||
# Experimental Parameters
|
||||
experimental=kwargs.get("experimental"),
|
||||
@@ -1075,6 +1479,9 @@ class CrawlerRunConfig():
|
||||
"scraping_strategy": self.scraping_strategy,
|
||||
"proxy_config": self.proxy_config,
|
||||
"proxy_rotation_strategy": self.proxy_rotation_strategy,
|
||||
"locale": self.locale,
|
||||
"timezone_id": self.timezone_id,
|
||||
"geolocation": self.geolocation,
|
||||
"fetch_ssl_certificate": self.fetch_ssl_certificate,
|
||||
"cache_mode": self.cache_mode,
|
||||
"session_id": self.session_id,
|
||||
@@ -1086,6 +1493,7 @@ class CrawlerRunConfig():
|
||||
"wait_until": self.wait_until,
|
||||
"page_timeout": self.page_timeout,
|
||||
"wait_for": self.wait_for,
|
||||
"wait_for_timeout": self.wait_for_timeout,
|
||||
"wait_for_images": self.wait_for_images,
|
||||
"delay_before_return_html": self.delay_before_return_html,
|
||||
"mean_delay": self.mean_delay,
|
||||
@@ -1096,6 +1504,7 @@ class CrawlerRunConfig():
|
||||
"ignore_body_visibility": self.ignore_body_visibility,
|
||||
"scan_full_page": self.scan_full_page,
|
||||
"scroll_delay": self.scroll_delay,
|
||||
"max_scroll_steps": self.max_scroll_steps,
|
||||
"process_iframes": self.process_iframes,
|
||||
"remove_overlay_elements": self.remove_overlay_elements,
|
||||
"simulate_user": self.simulate_user,
|
||||
@@ -1117,6 +1526,7 @@ class CrawlerRunConfig():
|
||||
"exclude_social_media_links": self.exclude_social_media_links,
|
||||
"exclude_domains": self.exclude_domains,
|
||||
"exclude_internal_links": self.exclude_internal_links,
|
||||
"score_links": self.score_links,
|
||||
"verbose": self.verbose,
|
||||
"log_console": self.log_console,
|
||||
"capture_network_requests": self.capture_network_requests,
|
||||
@@ -1128,6 +1538,7 @@ class CrawlerRunConfig():
|
||||
"user_agent_mode": self.user_agent_mode,
|
||||
"user_agent_generator_config": self.user_agent_generator_config,
|
||||
"deep_crawl_strategy": self.deep_crawl_strategy,
|
||||
"link_preview_config": self.link_preview_config.to_dict() if self.link_preview_config else None,
|
||||
"url": self.url,
|
||||
"experimental": self.experimental,
|
||||
}
|
||||
@@ -1158,14 +1569,13 @@ class CrawlerRunConfig():
|
||||
config_dict.update(kwargs)
|
||||
return CrawlerRunConfig.from_kwargs(config_dict)
|
||||
|
||||
|
||||
class LLMConfig:
|
||||
def __init__(
|
||||
self,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
temprature: Optional[float] = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
frequency_penalty: Optional[float] = None,
|
||||
@@ -1180,11 +1590,20 @@ class LLMConfig:
|
||||
elif api_token and api_token.startswith("env:"):
|
||||
self.api_token = os.getenv(api_token[4:])
|
||||
else:
|
||||
self.api_token = PROVIDER_MODELS.get(provider, "no-token") or os.getenv(
|
||||
DEFAULT_PROVIDER_API_KEY
|
||||
)
|
||||
# Check if given provider starts with any of key in PROVIDER_MODELS_PREFIXES
|
||||
# If not, check if it is in PROVIDER_MODELS
|
||||
prefixes = PROVIDER_MODELS_PREFIXES.keys()
|
||||
if any(provider.startswith(prefix) for prefix in prefixes):
|
||||
selected_prefix = next(
|
||||
(prefix for prefix in prefixes if provider.startswith(prefix)),
|
||||
None,
|
||||
)
|
||||
self.api_token = PROVIDER_MODELS_PREFIXES.get(selected_prefix)
|
||||
else:
|
||||
self.provider = DEFAULT_PROVIDER
|
||||
self.api_token = os.getenv(DEFAULT_PROVIDER_API_KEY)
|
||||
self.base_url = base_url
|
||||
self.temprature = temprature
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
@@ -1198,7 +1617,7 @@ class LLMConfig:
|
||||
provider=kwargs.get("provider", DEFAULT_PROVIDER),
|
||||
api_token=kwargs.get("api_token"),
|
||||
base_url=kwargs.get("base_url"),
|
||||
temprature=kwargs.get("temprature"),
|
||||
temperature=kwargs.get("temperature"),
|
||||
max_tokens=kwargs.get("max_tokens"),
|
||||
top_p=kwargs.get("top_p"),
|
||||
frequency_penalty=kwargs.get("frequency_penalty"),
|
||||
@@ -1212,7 +1631,7 @@ class LLMConfig:
|
||||
"provider": self.provider,
|
||||
"api_token": self.api_token,
|
||||
"base_url": self.base_url,
|
||||
"temprature": self.temprature,
|
||||
"temperature": self.temperature,
|
||||
"max_tokens": self.max_tokens,
|
||||
"top_p": self.top_p,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
@@ -1234,4 +1653,88 @@ class LLMConfig:
|
||||
config_dict.update(kwargs)
|
||||
return LLMConfig.from_kwargs(config_dict)
|
||||
|
||||
class SeedingConfig:
|
||||
"""
|
||||
Configuration class for URL discovery and pre-validation via AsyncUrlSeeder.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
source: str = "sitemap+cc",
|
||||
pattern: Optional[str] = "*",
|
||||
live_check: bool = False,
|
||||
extract_head: bool = False,
|
||||
max_urls: int = -1,
|
||||
concurrency: int = 1000,
|
||||
hits_per_sec: int = 5,
|
||||
force: bool = False,
|
||||
base_directory: Optional[str] = None,
|
||||
llm_config: Optional[LLMConfig] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
query: Optional[str] = None,
|
||||
score_threshold: Optional[float] = None,
|
||||
scoring_method: str = "bm25",
|
||||
filter_nonsense_urls: bool = True,
|
||||
):
|
||||
"""
|
||||
Initialize URL seeding configuration.
|
||||
|
||||
Args:
|
||||
source: Discovery source(s) to use. Options: "sitemap", "cc" (Common Crawl),
|
||||
or "sitemap+cc" (both). Default: "sitemap+cc"
|
||||
pattern: URL pattern to filter discovered URLs (e.g., "*example.com/blog/*").
|
||||
Supports glob-style wildcards. Default: "*" (all URLs)
|
||||
live_check: Whether to perform HEAD requests to verify URL liveness.
|
||||
Default: False
|
||||
extract_head: Whether to fetch and parse <head> section for metadata extraction.
|
||||
Required for BM25 relevance scoring. Default: False
|
||||
max_urls: Maximum number of URLs to discover. Use -1 for no limit.
|
||||
Default: -1
|
||||
concurrency: Maximum concurrent requests for live checks/head extraction.
|
||||
Default: 1000
|
||||
hits_per_sec: Rate limit in requests per second to avoid overwhelming servers.
|
||||
Default: 5
|
||||
force: If True, bypasses the AsyncUrlSeeder's internal .jsonl cache and
|
||||
re-fetches URLs. Default: False
|
||||
base_directory: Base directory for UrlSeeder's cache files (.jsonl).
|
||||
If None, uses default ~/.crawl4ai/. Default: None
|
||||
llm_config: LLM configuration for future features (e.g., semantic scoring).
|
||||
Currently unused. Default: None
|
||||
verbose: Override crawler's general verbose setting for seeding operations.
|
||||
Default: None (inherits from crawler)
|
||||
query: Search query for BM25 relevance scoring (e.g., "python tutorials").
|
||||
Requires extract_head=True. Default: None
|
||||
score_threshold: Minimum relevance score (0.0-1.0) to include URL.
|
||||
Only applies when query is provided. Default: None
|
||||
scoring_method: Scoring algorithm to use. Currently only "bm25" is supported.
|
||||
Future: "semantic". Default: "bm25"
|
||||
filter_nonsense_urls: Filter out utility URLs like robots.txt, sitemap.xml,
|
||||
ads.txt, favicon.ico, etc. Default: True
|
||||
"""
|
||||
self.source = source
|
||||
self.pattern = pattern
|
||||
self.live_check = live_check
|
||||
self.extract_head = extract_head
|
||||
self.max_urls = max_urls
|
||||
self.concurrency = concurrency
|
||||
self.hits_per_sec = hits_per_sec
|
||||
self.force = force
|
||||
self.base_directory = base_directory
|
||||
self.llm_config = llm_config
|
||||
self.verbose = verbose
|
||||
self.query = query
|
||||
self.score_threshold = score_threshold
|
||||
self.scoring_method = scoring_method
|
||||
self.filter_nonsense_urls = filter_nonsense_urls
|
||||
|
||||
# Add to_dict, from_kwargs, and clone methods for consistency
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {k: v for k, v in self.__dict__.items() if k != 'llm_config' or v is not None}
|
||||
|
||||
@staticmethod
|
||||
def from_kwargs(kwargs: Dict[str, Any]) -> 'SeedingConfig':
|
||||
return SeedingConfig(**kwargs)
|
||||
|
||||
def clone(self, **kwargs: Any) -> 'SeedingConfig':
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return SeedingConfig.from_kwargs(config_dict)
|
||||
|
||||
@@ -24,7 +24,7 @@ from .browser_manager import BrowserManager
|
||||
|
||||
import aiofiles
|
||||
import aiohttp
|
||||
import cchardet
|
||||
import chardet
|
||||
from aiohttp.client import ClientTimeout
|
||||
from urllib.parse import urlparse
|
||||
from types import MappingProxyType
|
||||
@@ -130,6 +130,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
Close the browser and clean up resources.
|
||||
"""
|
||||
await self.browser_manager.close()
|
||||
# Explicitly reset the static Playwright instance
|
||||
BrowserManager._playwright_instance = None
|
||||
|
||||
async def kill_session(self, session_id: str):
|
||||
"""
|
||||
@@ -439,10 +441,13 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
status_code = 200 # Default for local/raw HTML
|
||||
screenshot_data = None
|
||||
|
||||
if url.startswith(("http://", "https://")):
|
||||
if url.startswith(("http://", "https://", "view-source:")):
|
||||
return await self._crawl_web(url, config)
|
||||
|
||||
elif url.startswith("file://"):
|
||||
# initialize empty lists for console messages
|
||||
captured_console = []
|
||||
|
||||
# Process local file
|
||||
local_file_path = url[7:] # Remove 'file://' prefix
|
||||
if not os.path.exists(local_file_path):
|
||||
@@ -464,9 +469,15 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
console_messages=captured_console,
|
||||
)
|
||||
|
||||
elif url.startswith("raw:") or url.startswith("raw://"):
|
||||
#####
|
||||
# Since both "raw:" and "raw://" start with "raw:", the first condition is always true for both, so "raw://" will be sliced as "//...", which is incorrect.
|
||||
# Fix: Check for "raw://" first, then "raw:"
|
||||
# Also, the prefix "raw://" is actually 6 characters long, not 7, so it should be sliced accordingly: url[6:]
|
||||
#####
|
||||
elif url.startswith("raw://") or url.startswith("raw:"):
|
||||
# Process raw HTML content
|
||||
raw_html = url[4:] if url[:4] == "raw:" else url[7:]
|
||||
# raw_html = url[4:] if url[:4] == "raw:" else url[7:]
|
||||
raw_html = url[6:] if url.startswith("raw://") else url[4:]
|
||||
html = raw_html
|
||||
if config.screenshot:
|
||||
screenshot_data = await self._generate_screenshot_from_html(html)
|
||||
@@ -569,6 +580,14 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
async def handle_response_capture(response):
|
||||
try:
|
||||
try:
|
||||
# body = await response.body()
|
||||
# json_body = await response.json()
|
||||
text_body = await response.text()
|
||||
except Exception as e:
|
||||
body = None
|
||||
# json_body = None
|
||||
# text_body = None
|
||||
captured_requests.append({
|
||||
"event_type": "response",
|
||||
"url": response.url,
|
||||
@@ -577,7 +596,12 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
"headers": dict(response.headers), # Convert Header dict
|
||||
"from_service_worker": response.from_service_worker,
|
||||
"request_timing": response.request.timing, # Detailed timing info
|
||||
"timestamp": time.time()
|
||||
"timestamp": time.time(),
|
||||
"body" : {
|
||||
# "raw": body,
|
||||
# "json": json_body,
|
||||
"text": text_body
|
||||
}
|
||||
})
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
@@ -679,14 +703,12 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
if console_log_type == "error":
|
||||
self.logger.error(
|
||||
message=f"Console error: {msg}", # Use f-string for variable interpolation
|
||||
tag="CONSOLE",
|
||||
params={"msg": msg.text},
|
||||
tag="CONSOLE"
|
||||
)
|
||||
elif console_log_type == "debug":
|
||||
self.logger.debug(
|
||||
message=f"Console: {msg}", # Use f-string for variable interpolation
|
||||
tag="CONSOLE",
|
||||
params={"msg": msg.text},
|
||||
tag="CONSOLE"
|
||||
)
|
||||
|
||||
page.on("console", log_consol)
|
||||
@@ -728,18 +750,49 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
)
|
||||
redirected_url = page.url
|
||||
except Error as e:
|
||||
raise RuntimeError(f"Failed on navigating ACS-GOTO:\n{str(e)}")
|
||||
# Allow navigation to be aborted when downloading files
|
||||
# This is expected behavior for downloads in some browser engines
|
||||
if 'net::ERR_ABORTED' in str(e) and self.browser_config.accept_downloads:
|
||||
self.logger.info(
|
||||
message=f"Navigation aborted, likely due to file download: {url}",
|
||||
tag="GOTO",
|
||||
params={"url": url},
|
||||
)
|
||||
response = None
|
||||
else:
|
||||
raise RuntimeError(f"Failed on navigating ACS-GOTO:\n{str(e)}")
|
||||
|
||||
await self.execute_hook(
|
||||
"after_goto", page, context=context, url=url, response=response, config=config
|
||||
)
|
||||
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# Walk the redirect chain. Playwright returns only the last
|
||||
# hop, so we trace the `request.redirected_from` links until the
|
||||
# first response that differs from the final one and surface its
|
||||
# status-code.
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
if response is None:
|
||||
status_code = 200
|
||||
response_headers = {}
|
||||
else:
|
||||
status_code = response.status
|
||||
response_headers = response.headers
|
||||
first_resp = response
|
||||
req = response.request
|
||||
while req and req.redirected_from:
|
||||
prev_req = req.redirected_from
|
||||
prev_resp = await prev_req.response()
|
||||
if prev_resp: # keep earliest
|
||||
first_resp = prev_resp
|
||||
req = prev_req
|
||||
|
||||
status_code = first_resp.status
|
||||
response_headers = first_resp.headers
|
||||
# if response is None:
|
||||
# status_code = 200
|
||||
# response_headers = {}
|
||||
# else:
|
||||
# status_code = response.status
|
||||
# response_headers = response.headers
|
||||
|
||||
else:
|
||||
status_code = 200
|
||||
@@ -771,7 +824,7 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
except Error:
|
||||
visibility_info = await self.check_visibility(page)
|
||||
|
||||
if self.config.verbose:
|
||||
if self.browser_config.verbose:
|
||||
self.logger.debug(
|
||||
message="Body visibility info: {info}",
|
||||
tag="DEBUG",
|
||||
@@ -883,7 +936,12 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
# Handle full page scanning
|
||||
if config.scan_full_page:
|
||||
await self._handle_full_page_scan(page, config.scroll_delay)
|
||||
# await self._handle_full_page_scan(page, config.scroll_delay)
|
||||
await self._handle_full_page_scan(page, config.scroll_delay, config.max_scroll_steps)
|
||||
|
||||
# Handle virtual scroll if configured
|
||||
if config.virtual_scroll_config:
|
||||
await self._handle_virtual_scroll(page, config.virtual_scroll_config)
|
||||
|
||||
# Execute JavaScript if provided
|
||||
# if config.js_code:
|
||||
@@ -924,8 +982,10 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
if config.wait_for:
|
||||
try:
|
||||
# Use wait_for_timeout if specified, otherwise fall back to page_timeout
|
||||
timeout = config.wait_for_timeout if config.wait_for_timeout is not None else config.page_timeout
|
||||
await self.smart_wait(
|
||||
page, config.wait_for, timeout=config.page_timeout
|
||||
page, config.wait_for, timeout=timeout
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Wait condition failed: {str(e)}")
|
||||
@@ -967,7 +1027,11 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
for selector in selectors:
|
||||
try:
|
||||
content = await page.evaluate(f"document.querySelector('{selector}')?.outerHTML || ''")
|
||||
content = await page.evaluate(
|
||||
f"""Array.from(document.querySelectorAll("{selector}"))
|
||||
.map(el => el.outerHTML)
|
||||
.join('')"""
|
||||
)
|
||||
html_parts.append(content)
|
||||
except Error as e:
|
||||
print(f"Warning: Could not get content for selector '{selector}': {str(e)}")
|
||||
@@ -1046,7 +1110,13 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
finally:
|
||||
# If no session_id is given we should close the page
|
||||
if not config.session_id:
|
||||
all_contexts = page.context.browser.contexts
|
||||
total_pages = sum(len(context.pages) for context in all_contexts)
|
||||
if config.session_id:
|
||||
pass
|
||||
elif total_pages <= 1 and (self.browser_config.use_managed_browser or self.browser_config.headless):
|
||||
pass
|
||||
else:
|
||||
# Detach listeners before closing to prevent potential errors during close
|
||||
if config.capture_network_requests:
|
||||
page.remove_listener("request", handle_request_capture)
|
||||
@@ -1056,9 +1126,11 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
page.remove_listener("console", handle_console_capture)
|
||||
page.remove_listener("pageerror", handle_pageerror_capture)
|
||||
|
||||
# Close the page
|
||||
await page.close()
|
||||
|
||||
async def _handle_full_page_scan(self, page: Page, scroll_delay: float = 0.1):
|
||||
# async def _handle_full_page_scan(self, page: Page, scroll_delay: float = 0.1):
|
||||
async def _handle_full_page_scan(self, page: Page, scroll_delay: float = 0.1, max_scroll_steps: Optional[int] = None):
|
||||
"""
|
||||
Helper method to handle full page scanning.
|
||||
|
||||
@@ -1073,6 +1145,7 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
Args:
|
||||
page (Page): The Playwright page object
|
||||
scroll_delay (float): The delay between page scrolls
|
||||
max_scroll_steps (Optional[int]): Maximum number of scroll steps to perform. If None, scrolls until end.
|
||||
|
||||
"""
|
||||
try:
|
||||
@@ -1097,9 +1170,21 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
dimensions = await self.get_page_dimensions(page)
|
||||
total_height = dimensions["height"]
|
||||
|
||||
scroll_step_count = 0
|
||||
while current_position < total_height:
|
||||
####
|
||||
# NEW FEATURE: Check if we've reached the maximum allowed scroll steps
|
||||
# This prevents infinite scrolling on very long pages or infinite scroll scenarios
|
||||
# If max_scroll_steps is None, this check is skipped (unlimited scrolling - original behavior)
|
||||
####
|
||||
if max_scroll_steps is not None and scroll_step_count >= max_scroll_steps:
|
||||
break
|
||||
current_position = min(current_position + viewport_height, total_height)
|
||||
await self.safe_scroll(page, 0, current_position, delay=scroll_delay)
|
||||
|
||||
# Increment the step counter for max_scroll_steps tracking
|
||||
scroll_step_count += 1
|
||||
|
||||
# await page.evaluate(f"window.scrollTo(0, {current_position})")
|
||||
# await asyncio.sleep(scroll_delay)
|
||||
|
||||
@@ -1123,6 +1208,177 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
# await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
||||
await self.safe_scroll(page, 0, total_height)
|
||||
|
||||
async def _handle_virtual_scroll(self, page: Page, config: "VirtualScrollConfig"):
|
||||
"""
|
||||
Handle virtual scroll containers (e.g., Twitter-like feeds) by capturing
|
||||
content at different scroll positions and merging unique elements.
|
||||
|
||||
Following the design:
|
||||
1. Get container HTML
|
||||
2. Scroll by container height
|
||||
3. Wait and check if container HTML changed
|
||||
4. Three cases:
|
||||
- No change: continue scrolling
|
||||
- New items added (appended): continue (items already in page)
|
||||
- Items replaced: capture HTML chunk and add to list
|
||||
5. After N scrolls, merge chunks if any were captured
|
||||
|
||||
Args:
|
||||
page: The Playwright page object
|
||||
config: Virtual scroll configuration
|
||||
"""
|
||||
try:
|
||||
# Import VirtualScrollConfig to avoid circular import
|
||||
from .async_configs import VirtualScrollConfig
|
||||
|
||||
# Ensure config is a VirtualScrollConfig instance
|
||||
if isinstance(config, dict):
|
||||
config = VirtualScrollConfig.from_dict(config)
|
||||
|
||||
self.logger.info(
|
||||
message="Starting virtual scroll capture for container: {selector}",
|
||||
tag="VSCROLL",
|
||||
params={"selector": config.container_selector}
|
||||
)
|
||||
|
||||
# JavaScript function to handle virtual scroll capture
|
||||
virtual_scroll_js = """
|
||||
async (config) => {
|
||||
const container = document.querySelector(config.container_selector);
|
||||
if (!container) {
|
||||
throw new Error(`Container not found: ${config.container_selector}`);
|
||||
}
|
||||
|
||||
// List to store HTML chunks when content is replaced
|
||||
const htmlChunks = [];
|
||||
let previousHTML = container.innerHTML;
|
||||
let scrollCount = 0;
|
||||
|
||||
// Determine scroll amount
|
||||
let scrollAmount;
|
||||
if (typeof config.scroll_by === 'number') {
|
||||
scrollAmount = config.scroll_by;
|
||||
} else if (config.scroll_by === 'page_height') {
|
||||
scrollAmount = window.innerHeight;
|
||||
} else { // container_height
|
||||
scrollAmount = container.offsetHeight;
|
||||
}
|
||||
|
||||
// Perform scrolling
|
||||
while (scrollCount < config.scroll_count) {
|
||||
// Scroll the container
|
||||
container.scrollTop += scrollAmount;
|
||||
|
||||
// Wait for content to potentially load
|
||||
await new Promise(resolve => setTimeout(resolve, config.wait_after_scroll * 1000));
|
||||
|
||||
// Get current HTML
|
||||
const currentHTML = container.innerHTML;
|
||||
|
||||
// Determine what changed
|
||||
if (currentHTML === previousHTML) {
|
||||
// Case 0: No change - continue scrolling
|
||||
console.log(`Scroll ${scrollCount + 1}: No change in content`);
|
||||
} else if (currentHTML.startsWith(previousHTML)) {
|
||||
// Case 1: New items appended - content already in page
|
||||
console.log(`Scroll ${scrollCount + 1}: New items appended`);
|
||||
} else {
|
||||
// Case 2: Items replaced - capture the previous HTML
|
||||
console.log(`Scroll ${scrollCount + 1}: Content replaced, capturing chunk`);
|
||||
htmlChunks.push(previousHTML);
|
||||
}
|
||||
|
||||
// Update previous HTML for next iteration
|
||||
previousHTML = currentHTML;
|
||||
scrollCount++;
|
||||
|
||||
// Check if we've reached the end
|
||||
if (container.scrollTop + container.clientHeight >= container.scrollHeight - 10) {
|
||||
console.log(`Reached end of scrollable content at scroll ${scrollCount}`);
|
||||
// Capture final chunk if content was replaced
|
||||
if (htmlChunks.length > 0) {
|
||||
htmlChunks.push(currentHTML);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we have chunks (case 2 occurred), merge them
|
||||
if (htmlChunks.length > 0) {
|
||||
console.log(`Merging ${htmlChunks.length} HTML chunks`);
|
||||
|
||||
// Parse all chunks to extract unique elements
|
||||
const tempDiv = document.createElement('div');
|
||||
const seenTexts = new Set();
|
||||
const uniqueElements = [];
|
||||
|
||||
// Process each chunk
|
||||
for (const chunk of htmlChunks) {
|
||||
tempDiv.innerHTML = chunk;
|
||||
const elements = tempDiv.children;
|
||||
|
||||
for (let i = 0; i < elements.length; i++) {
|
||||
const element = elements[i];
|
||||
// Normalize text for deduplication
|
||||
const normalizedText = element.innerText
|
||||
.toLowerCase()
|
||||
.replace(/[\\s\\W]/g, ''); // Remove spaces and symbols
|
||||
|
||||
if (!seenTexts.has(normalizedText)) {
|
||||
seenTexts.add(normalizedText);
|
||||
uniqueElements.push(element.outerHTML);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Replace container content with merged unique elements
|
||||
container.innerHTML = uniqueElements.join('\\n');
|
||||
console.log(`Merged ${uniqueElements.length} unique elements from ${htmlChunks.length} chunks`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
chunksCount: htmlChunks.length,
|
||||
uniqueCount: uniqueElements.length,
|
||||
replaced: true
|
||||
};
|
||||
} else {
|
||||
console.log('No content replacement detected, all content remains in page');
|
||||
return {
|
||||
success: true,
|
||||
chunksCount: 0,
|
||||
uniqueCount: 0,
|
||||
replaced: false
|
||||
};
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
# Execute virtual scroll capture
|
||||
result = await page.evaluate(virtual_scroll_js, config.to_dict())
|
||||
|
||||
if result.get("replaced", False):
|
||||
self.logger.success(
|
||||
message="Virtual scroll completed. Merged {unique} unique elements from {chunks} chunks",
|
||||
tag="VSCROLL",
|
||||
params={
|
||||
"unique": result.get("uniqueCount", 0),
|
||||
"chunks": result.get("chunksCount", 0)
|
||||
}
|
||||
)
|
||||
else:
|
||||
self.logger.info(
|
||||
message="Virtual scroll completed. Content was appended, no merging needed",
|
||||
tag="VSCROLL"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Virtual scroll capture failed: {error}",
|
||||
tag="VSCROLL",
|
||||
params={"error": str(e)}
|
||||
)
|
||||
# Continue with normal flow even if virtual scroll fails
|
||||
|
||||
async def _handle_download(self, download):
|
||||
"""
|
||||
Handle file downloads.
|
||||
@@ -1415,12 +1671,32 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
num_segments = (page_height // viewport_height) + 1
|
||||
for i in range(num_segments):
|
||||
y_offset = i * viewport_height
|
||||
# Special handling for the last segment
|
||||
if i == num_segments - 1:
|
||||
last_part_height = page_height % viewport_height
|
||||
|
||||
# If page_height is an exact multiple of viewport_height,
|
||||
# we don't need an extra segment
|
||||
if last_part_height == 0:
|
||||
# Skip last segment if page height is exact multiple of viewport
|
||||
break
|
||||
|
||||
# Adjust viewport to exactly match the remaining content height
|
||||
await page.set_viewport_size({"width": page_width, "height": last_part_height})
|
||||
|
||||
await page.evaluate(f"window.scrollTo(0, {y_offset})")
|
||||
await asyncio.sleep(0.01) # wait for render
|
||||
seg_shot = await page.screenshot(full_page=False)
|
||||
|
||||
# Capture the current segment
|
||||
# Note: Using compression options (format, quality) would go here
|
||||
seg_shot = await page.screenshot(full_page=False, type="jpeg", quality=85)
|
||||
# seg_shot = await page.screenshot(full_page=False)
|
||||
img = Image.open(BytesIO(seg_shot)).convert("RGB")
|
||||
segments.append(img)
|
||||
|
||||
# Reset viewport to original size after capturing segments
|
||||
await page.set_viewport_size({"width": page_width, "height": viewport_height})
|
||||
|
||||
total_height = sum(img.height for img in segments)
|
||||
stitched = Image.new("RGB", (segments[0].width, total_height))
|
||||
offset = 0
|
||||
@@ -1450,8 +1726,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
finally:
|
||||
await page.close()
|
||||
# finally:
|
||||
# await page.close()
|
||||
|
||||
async def take_screenshot_naive(self, page: Page) -> str:
|
||||
"""
|
||||
@@ -1484,8 +1760,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
finally:
|
||||
await page.close()
|
||||
# finally:
|
||||
# await page.close()
|
||||
|
||||
async def export_storage_state(self, path: str = None) -> dict:
|
||||
"""
|
||||
@@ -1549,12 +1825,31 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
# then wait for the new page to load before continuing
|
||||
result = None
|
||||
try:
|
||||
# OLD VERSION:
|
||||
# result = await page.evaluate(
|
||||
# f"""
|
||||
# (async () => {{
|
||||
# try {{
|
||||
# const script_result = {script};
|
||||
# return {{ success: true, result: script_result }};
|
||||
# }} catch (err) {{
|
||||
# return {{ success: false, error: err.toString(), stack: err.stack }};
|
||||
# }}
|
||||
# }})();
|
||||
# """
|
||||
# )
|
||||
|
||||
# """ NEW VERSION:
|
||||
# When {script} contains statements (e.g., const link = …; link.click();),
|
||||
# this forms invalid JavaScript, causing Playwright execution error: SyntaxError: Unexpected token 'const'.
|
||||
# """
|
||||
result = await page.evaluate(
|
||||
f"""
|
||||
(async () => {{
|
||||
try {{
|
||||
const script_result = {script};
|
||||
return {{ success: true, result: script_result }};
|
||||
return await (async () => {{
|
||||
{script}
|
||||
}})();
|
||||
}} catch (err) {{
|
||||
return {{ success: false, error: err.toString(), stack: err.stack }};
|
||||
}}
|
||||
@@ -1975,7 +2270,7 @@ class AsyncHTTPCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
await self.start()
|
||||
yield self._session
|
||||
finally:
|
||||
await self.close()
|
||||
pass
|
||||
|
||||
def set_hook(self, hook_type: str, hook_func: Callable) -> None:
|
||||
if hook_type in self.hooks:
|
||||
@@ -2091,7 +2386,7 @@ class AsyncHTTPCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
|
||||
encoding = response.charset
|
||||
if not encoding:
|
||||
encoding = cchardet.detect(content.tobytes())['encoding'] or 'utf-8'
|
||||
encoding = chardet.detect(content.tobytes())['encoding'] or 'utf-8'
|
||||
|
||||
result = AsyncCrawlResponse(
|
||||
html=content.tobytes().decode(encoding, errors='replace'),
|
||||
|
||||
@@ -171,7 +171,10 @@ class AsyncDatabaseManager:
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type="error"),
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
|
||||
raise
|
||||
@@ -189,7 +192,10 @@ class AsyncDatabaseManager:
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type="error"),
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
|
||||
@@ -126,6 +126,7 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
fairness_timeout: float = 600.0, # 10 minutes before prioritizing long-waiting URLs
|
||||
memory_wait_timeout: Optional[float] = 600.0,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
@@ -136,27 +137,46 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.fairness_timeout = fairness_timeout
|
||||
self.memory_wait_timeout = memory_wait_timeout
|
||||
self.result_queue = asyncio.Queue()
|
||||
self.task_queue = asyncio.PriorityQueue() # Priority queue for better management
|
||||
self.memory_pressure_mode = False # Flag to indicate when we're in memory pressure mode
|
||||
self.current_memory_percent = 0.0 # Track current memory usage
|
||||
self._high_memory_start_time: Optional[float] = None
|
||||
|
||||
async def _memory_monitor_task(self):
|
||||
"""Background task to continuously monitor memory usage and update state"""
|
||||
while True:
|
||||
self.current_memory_percent = psutil.virtual_memory().percent
|
||||
|
||||
|
||||
# Enter memory pressure mode if we cross the threshold
|
||||
if not self.memory_pressure_mode and self.current_memory_percent >= self.memory_threshold_percent:
|
||||
self.memory_pressure_mode = True
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("PRESSURE")
|
||||
|
||||
if self.current_memory_percent >= self.memory_threshold_percent:
|
||||
if not self.memory_pressure_mode:
|
||||
self.memory_pressure_mode = True
|
||||
self._high_memory_start_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("PRESSURE")
|
||||
else:
|
||||
if self._high_memory_start_time is None:
|
||||
self._high_memory_start_time = time.time()
|
||||
if (
|
||||
self.memory_wait_timeout is not None
|
||||
and self._high_memory_start_time is not None
|
||||
and time.time() - self._high_memory_start_time >= self.memory_wait_timeout
|
||||
):
|
||||
raise MemoryError(
|
||||
"Memory usage exceeded threshold for"
|
||||
f" {self.memory_wait_timeout} seconds"
|
||||
)
|
||||
|
||||
# Exit memory pressure mode if we go below recovery threshold
|
||||
elif self.memory_pressure_mode and self.current_memory_percent <= self.recovery_threshold_percent:
|
||||
self.memory_pressure_mode = False
|
||||
self._high_memory_start_time = None
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("NORMAL")
|
||||
elif self.current_memory_percent < self.memory_threshold_percent:
|
||||
self._high_memory_start_time = None
|
||||
|
||||
# In critical mode, we might need to take more drastic action
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
@@ -307,7 +327,7 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
self.monitor.start()
|
||||
|
||||
results = []
|
||||
|
||||
|
||||
try:
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
@@ -316,11 +336,18 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
self.monitor.add_task(task_id, url)
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
|
||||
active_tasks = []
|
||||
|
||||
|
||||
# Process until both queues are empty
|
||||
while not self.task_queue.empty() or active_tasks:
|
||||
if memory_monitor.done():
|
||||
exc = memory_monitor.exception()
|
||||
if exc:
|
||||
for t in active_tasks:
|
||||
t.cancel()
|
||||
raise exc
|
||||
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
@@ -465,8 +492,14 @@ class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
active_tasks = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
|
||||
while completed_count < total_urls:
|
||||
if memory_monitor.done():
|
||||
exc = memory_monitor.exception()
|
||||
if exc:
|
||||
for t in active_tasks:
|
||||
t.cancel()
|
||||
raise exc
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
|
||||
@@ -1,18 +1,49 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional, Dict, Any
|
||||
from colorama import Fore, Style, init
|
||||
from typing import Optional, Dict, Any, List
|
||||
import os
|
||||
from datetime import datetime
|
||||
from urllib.parse import unquote
|
||||
from rich.console import Console
|
||||
from rich.text import Text
|
||||
from .utils import create_box_message
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
DEFAULT = 0
|
||||
DEBUG = 1
|
||||
INFO = 2
|
||||
SUCCESS = 3
|
||||
WARNING = 4
|
||||
ERROR = 5
|
||||
CRITICAL = 6
|
||||
ALERT = 7
|
||||
NOTICE = 8
|
||||
EXCEPTION = 9
|
||||
FATAL = 10
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return self.name.lower()
|
||||
|
||||
class LogColor(str, Enum):
|
||||
"""Enum for log colors."""
|
||||
|
||||
DEBUG = "bright_black"
|
||||
INFO = "cyan"
|
||||
SUCCESS = "green"
|
||||
WARNING = "yellow"
|
||||
ERROR = "red"
|
||||
CYAN = "cyan"
|
||||
GREEN = "green"
|
||||
YELLOW = "yellow"
|
||||
MAGENTA = "magenta"
|
||||
DIM_MAGENTA = "dim magenta"
|
||||
RED = "red"
|
||||
|
||||
def __str__(self):
|
||||
"""Automatically convert rich color to string."""
|
||||
return self.value
|
||||
|
||||
|
||||
class AsyncLoggerBase(ABC):
|
||||
@@ -37,13 +68,14 @@ class AsyncLoggerBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 50):
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 50):
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncLogger(AsyncLoggerBase):
|
||||
"""
|
||||
Asynchronous logger with support for colored console output and file logging.
|
||||
@@ -61,14 +93,21 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
"DEBUG": "⋯",
|
||||
"INFO": "ℹ",
|
||||
"WARNING": "⚠",
|
||||
"SUCCESS": "✔",
|
||||
"CRITICAL": "‼",
|
||||
"ALERT": "⚡",
|
||||
"NOTICE": "ℹ",
|
||||
"EXCEPTION": "❗",
|
||||
"FATAL": "☠",
|
||||
"DEFAULT": "•",
|
||||
}
|
||||
|
||||
DEFAULT_COLORS = {
|
||||
LogLevel.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
LogLevel.INFO: Fore.CYAN,
|
||||
LogLevel.SUCCESS: Fore.GREEN,
|
||||
LogLevel.WARNING: Fore.YELLOW,
|
||||
LogLevel.ERROR: Fore.RED,
|
||||
LogLevel.DEBUG: LogColor.DEBUG,
|
||||
LogLevel.INFO: LogColor.INFO,
|
||||
LogLevel.SUCCESS: LogColor.SUCCESS,
|
||||
LogLevel.WARNING: LogColor.WARNING,
|
||||
LogLevel.ERROR: LogColor.ERROR,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
@@ -77,7 +116,7 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
log_level: LogLevel = LogLevel.DEBUG,
|
||||
tag_width: int = 10,
|
||||
icons: Optional[Dict[str, str]] = None,
|
||||
colors: Optional[Dict[LogLevel, str]] = None,
|
||||
colors: Optional[Dict[LogLevel, LogColor]] = None,
|
||||
verbose: bool = True,
|
||||
):
|
||||
"""
|
||||
@@ -91,13 +130,13 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
colors: Custom colors for different log levels
|
||||
verbose: Whether to output to console
|
||||
"""
|
||||
init() # Initialize colorama
|
||||
self.log_file = log_file
|
||||
self.log_level = log_level
|
||||
self.tag_width = tag_width
|
||||
self.icons = icons or self.DEFAULT_ICONS
|
||||
self.colors = colors or self.DEFAULT_COLORS
|
||||
self.verbose = verbose
|
||||
self.console = Console()
|
||||
|
||||
# Create log file directory if needed
|
||||
if log_file:
|
||||
@@ -110,20 +149,23 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
def _get_icon(self, tag: str) -> str:
|
||||
"""Get the icon for a tag, defaulting to info icon if not found."""
|
||||
return self.icons.get(tag, self.icons["INFO"])
|
||||
|
||||
def _shorten(self, text, length, placeholder="..."):
|
||||
"""Truncate text in the middle if longer than length, or pad if shorter."""
|
||||
if len(text) <= length:
|
||||
return text.ljust(length) # Pad with spaces to reach desired length
|
||||
half = (length - len(placeholder)) // 2
|
||||
shortened = text[:half] + placeholder + text[-half:]
|
||||
return shortened.ljust(length) # Also pad shortened text to consistent length
|
||||
|
||||
def _write_to_file(self, message: str):
|
||||
"""Write a message to the log file if configured."""
|
||||
if self.log_file:
|
||||
text = Text.from_markup(message)
|
||||
plain_text = text.plain
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
# Strip ANSI color codes for file output
|
||||
clean_message = message.replace(Fore.RESET, "").replace(
|
||||
Style.RESET_ALL, ""
|
||||
)
|
||||
for color in vars(Fore).values():
|
||||
if isinstance(color, str):
|
||||
clean_message = clean_message.replace(color, "")
|
||||
f.write(f"[{timestamp}] {clean_message}\n")
|
||||
f.write(f"[{timestamp}] {plain_text}\n")
|
||||
|
||||
def _log(
|
||||
self,
|
||||
@@ -131,8 +173,9 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
message: str,
|
||||
tag: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
colors: Optional[Dict[str, str]] = None,
|
||||
base_color: Optional[str] = None,
|
||||
colors: Optional[Dict[str, LogColor]] = None,
|
||||
boxes: Optional[List[str]] = None,
|
||||
base_color: Optional[LogColor] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
@@ -144,55 +187,44 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
tag: Tag for the message
|
||||
params: Parameters to format into the message
|
||||
colors: Color overrides for specific parameters
|
||||
boxes: Box overrides for specific parameters
|
||||
base_color: Base color for the entire message
|
||||
"""
|
||||
if level.value < self.log_level.value:
|
||||
return
|
||||
|
||||
# Format the message with parameters if provided
|
||||
# avoid conflict with rich formatting
|
||||
parsed_message = message.replace("[", "[[").replace("]", "]]")
|
||||
if params:
|
||||
try:
|
||||
# First format the message with raw parameters
|
||||
formatted_message = message.format(**params)
|
||||
# FIXME: If there are formatting strings in floating point format,
|
||||
# this may result in colors and boxes not being applied properly.
|
||||
# such as {value:.2f}, the value is 0.23333 format it to 0.23,
|
||||
# but we replace("0.23333", "[color]0.23333[/color]")
|
||||
formatted_message = parsed_message.format(**params)
|
||||
for key, value in params.items():
|
||||
# value_str may discard `[` and `]`, so we need to replace it.
|
||||
value_str = str(value).replace("[", "[[").replace("]", "]]")
|
||||
# check is need apply color
|
||||
if colors and key in colors:
|
||||
color_str = f"[{colors[key]}]{value_str}[/{colors[key]}]"
|
||||
formatted_message = formatted_message.replace(value_str, color_str)
|
||||
value_str = color_str
|
||||
|
||||
# Then apply colors if specified
|
||||
color_map = {
|
||||
"green": Fore.GREEN,
|
||||
"red": Fore.RED,
|
||||
"yellow": Fore.YELLOW,
|
||||
"blue": Fore.BLUE,
|
||||
"cyan": Fore.CYAN,
|
||||
"magenta": Fore.MAGENTA,
|
||||
"white": Fore.WHITE,
|
||||
"black": Fore.BLACK,
|
||||
"reset": Style.RESET_ALL,
|
||||
}
|
||||
if colors:
|
||||
for key, color in colors.items():
|
||||
# Find the formatted value in the message and wrap it with color
|
||||
if color in color_map:
|
||||
color = color_map[color]
|
||||
if key in params:
|
||||
value_str = str(params[key])
|
||||
formatted_message = formatted_message.replace(
|
||||
value_str, f"{color}{value_str}{Style.RESET_ALL}"
|
||||
)
|
||||
# check is need apply box
|
||||
if boxes and key in boxes:
|
||||
formatted_message = formatted_message.replace(value_str,
|
||||
create_box_message(value_str, type=str(level)))
|
||||
|
||||
except KeyError as e:
|
||||
formatted_message = (
|
||||
f"LOGGING ERROR: Missing parameter {e} in message template"
|
||||
)
|
||||
level = LogLevel.ERROR
|
||||
else:
|
||||
formatted_message = message
|
||||
formatted_message = parsed_message
|
||||
|
||||
# Construct the full log line
|
||||
color = base_color or self.colors[level]
|
||||
log_line = f"{color}{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message}{Style.RESET_ALL}"
|
||||
color: LogColor = base_color or self.colors[level]
|
||||
log_line = f"[{color}]{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message} [/{color}]"
|
||||
|
||||
# Output to console if verbose
|
||||
if self.verbose or kwargs.get("force_verbose", False):
|
||||
print(log_line)
|
||||
self.console.print(log_line)
|
||||
|
||||
# Write to file if configured
|
||||
self._write_to_file(log_line)
|
||||
@@ -212,6 +244,22 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message."""
|
||||
self._log(LogLevel.WARNING, message, tag, **kwargs)
|
||||
|
||||
def critical(self, message: str, tag: str = "CRITICAL", **kwargs):
|
||||
"""Log a critical message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def exception(self, message: str, tag: str = "EXCEPTION", **kwargs):
|
||||
"""Log an exception message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def fatal(self, message: str, tag: str = "FATAL", **kwargs):
|
||||
"""Log a fatal message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def alert(self, message: str, tag: str = "ALERT", **kwargs):
|
||||
"""Log an alert message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def notice(self, message: str, tag: str = "NOTICE", **kwargs):
|
||||
"""Log a notice message."""
|
||||
self._log(LogLevel.INFO, message, tag, **kwargs)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message."""
|
||||
@@ -223,7 +271,7 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
success: bool,
|
||||
timing: float,
|
||||
tag: str = "FETCH",
|
||||
url_length: int = 50,
|
||||
url_length: int = 100,
|
||||
):
|
||||
"""
|
||||
Convenience method for logging URL fetch status.
|
||||
@@ -235,19 +283,20 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Status: {status} | Time: {timing:.2f}s",
|
||||
message="{url} | {status} | ⏱: {timing:.2f}s",
|
||||
tag=tag,
|
||||
params={
|
||||
"url": url,
|
||||
"url_length": url_length,
|
||||
"status": success,
|
||||
"url": readable_url,
|
||||
"status": "✓" if success else "✗",
|
||||
"timing": timing,
|
||||
},
|
||||
colors={
|
||||
"status": Fore.GREEN if success else Fore.RED,
|
||||
"timing": Fore.YELLOW,
|
||||
"status": LogColor.SUCCESS if success else LogColor.ERROR,
|
||||
"timing": LogColor.WARNING,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -263,11 +312,13 @@ class AsyncLogger(AsyncLoggerBase):
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Error: {error}",
|
||||
message="{url} | Error: {error}",
|
||||
tag=tag,
|
||||
params={"url": url, "url_length": url_length, "error": error},
|
||||
params={"url": readable_url, "error": error},
|
||||
)
|
||||
|
||||
class AsyncFileLogger(AsyncLoggerBase):
|
||||
@@ -311,13 +362,13 @@ class AsyncFileLogger(AsyncLoggerBase):
|
||||
"""Log an error message to file."""
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 50):
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
"""Log URL fetch status to file."""
|
||||
status = "SUCCESS" if success else "FAILED"
|
||||
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
|
||||
self._write_to_file("URL_STATUS", message, tag)
|
||||
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 50):
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
"""Log error status to file."""
|
||||
message = f"{url[:url_length]}... | Error: {error}"
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
1471
crawl4ai/async_url_seeder.py
Normal file
1471
crawl4ai/async_url_seeder.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,6 @@ from .__version__ import __version__ as crawl4ai_version
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from colorama import Fore
|
||||
from pathlib import Path
|
||||
from typing import Optional, List
|
||||
import json
|
||||
@@ -36,17 +35,18 @@ from .markdown_generation_strategy import (
|
||||
)
|
||||
from .deep_crawling import DeepCrawlDecorator
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, ProxyConfig, SeedingConfig
|
||||
from .async_dispatcher import * # noqa: F403
|
||||
from .async_dispatcher import BaseDispatcher, MemoryAdaptiveDispatcher, RateLimiter
|
||||
from .async_url_seeder import AsyncUrlSeeder
|
||||
|
||||
from .utils import (
|
||||
sanitize_input_encode,
|
||||
InvalidCSSSelectorError,
|
||||
fast_format_html,
|
||||
create_box_message,
|
||||
get_error_context,
|
||||
RobotsParser,
|
||||
preprocess_html_for_schema,
|
||||
)
|
||||
|
||||
|
||||
@@ -111,7 +111,8 @@ class AsyncWebCrawler:
|
||||
self,
|
||||
crawler_strategy: AsyncCrawlerStrategy = None,
|
||||
config: BrowserConfig = None,
|
||||
base_directory: str = str(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())),
|
||||
base_directory: str = str(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())),
|
||||
thread_safe: bool = False,
|
||||
logger: AsyncLoggerBase = None,
|
||||
**kwargs,
|
||||
@@ -139,7 +140,8 @@ class AsyncWebCrawler:
|
||||
)
|
||||
|
||||
# Initialize crawler strategy
|
||||
params = {k: v for k, v in kwargs.items() if k in ["browser_config", "logger"]}
|
||||
params = {k: v for k, v in kwargs.items() if k in [
|
||||
"browser_config", "logger"]}
|
||||
self.crawler_strategy = crawler_strategy or AsyncPlaywrightCrawlerStrategy(
|
||||
browser_config=browser_config,
|
||||
logger=self.logger,
|
||||
@@ -162,6 +164,8 @@ class AsyncWebCrawler:
|
||||
# Decorate arun method with deep crawling capabilities
|
||||
self._deep_handler = DeepCrawlDecorator(self)
|
||||
self.arun = self._deep_handler(self.arun)
|
||||
|
||||
self.url_seeder: Optional[AsyncUrlSeeder] = None
|
||||
|
||||
async def start(self):
|
||||
"""
|
||||
@@ -237,7 +241,8 @@ class AsyncWebCrawler:
|
||||
|
||||
config = config or CrawlerRunConfig()
|
||||
if not isinstance(url, str) or not url:
|
||||
raise ValueError("Invalid URL, make sure the URL is a non-empty string")
|
||||
raise ValueError(
|
||||
"Invalid URL, make sure the URL is a non-empty string")
|
||||
|
||||
async with self._lock or self.nullcontext():
|
||||
try:
|
||||
@@ -291,12 +296,12 @@ class AsyncWebCrawler:
|
||||
|
||||
# Update proxy configuration from rotation strategy if available
|
||||
if config and config.proxy_rotation_strategy:
|
||||
next_proxy = await config.proxy_rotation_strategy.get_next_proxy()
|
||||
next_proxy: ProxyConfig = await config.proxy_rotation_strategy.get_next_proxy()
|
||||
if next_proxy:
|
||||
self.logger.info(
|
||||
message="Switch proxy: {proxy}",
|
||||
tag="PROXY",
|
||||
params={"proxy": next_proxy.server},
|
||||
params={"proxy": next_proxy.server}
|
||||
)
|
||||
config.proxy_config = next_proxy
|
||||
# config = config.clone(proxy_config=next_proxy)
|
||||
@@ -306,7 +311,8 @@ class AsyncWebCrawler:
|
||||
t1 = time.perf_counter()
|
||||
|
||||
if config.user_agent:
|
||||
self.crawler_strategy.update_user_agent(config.user_agent)
|
||||
self.crawler_strategy.update_user_agent(
|
||||
config.user_agent)
|
||||
|
||||
# Check robots.txt if enabled
|
||||
if config and config.check_robots_txt:
|
||||
@@ -353,10 +359,11 @@ class AsyncWebCrawler:
|
||||
html=html,
|
||||
extracted_content=extracted_content,
|
||||
config=config, # Pass the config object instead of individual parameters
|
||||
screenshot=screenshot_data,
|
||||
screenshot_data=screenshot_data,
|
||||
pdf_data=pdf_data,
|
||||
verbose=config.verbose,
|
||||
is_raw_html=True if url.startswith("raw:") else False,
|
||||
redirected_url=async_response.redirected_url,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@@ -372,20 +379,14 @@ class AsyncWebCrawler:
|
||||
crawl_result.console_messages = async_response.console_messages
|
||||
|
||||
crawl_result.success = bool(html)
|
||||
crawl_result.session_id = getattr(config, "session_id", None)
|
||||
crawl_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
|
||||
self.logger.success(
|
||||
message="{url:.50}... | Status: {status} | Total: {timing}",
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=crawl_result.success,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE",
|
||||
params={
|
||||
"url": cache_context.display_url,
|
||||
"status": crawl_result.success,
|
||||
"timing": f"{time.perf_counter() - start_time:.2f}s",
|
||||
},
|
||||
colors={
|
||||
"status": Fore.GREEN if crawl_result.success else Fore.RED,
|
||||
"timing": Fore.YELLOW,
|
||||
},
|
||||
)
|
||||
|
||||
# Update cache if appropriate
|
||||
@@ -395,19 +396,15 @@ class AsyncWebCrawler:
|
||||
return CrawlResultContainer(crawl_result)
|
||||
|
||||
else:
|
||||
self.logger.success(
|
||||
message="{url:.50}... | Status: {status} | Total: {timing}",
|
||||
tag="COMPLETE",
|
||||
params={
|
||||
"url": cache_context.display_url,
|
||||
"status": True,
|
||||
"timing": f"{time.perf_counter() - start_time:.2f}s",
|
||||
},
|
||||
colors={"status": Fore.GREEN, "timing": Fore.YELLOW},
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=True,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE"
|
||||
)
|
||||
|
||||
cached_result.success = bool(html)
|
||||
cached_result.session_id = getattr(config, "session_id", None)
|
||||
cached_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
cached_result.redirected_url = cached_result.redirected_url or url
|
||||
return CrawlResultContainer(cached_result)
|
||||
|
||||
@@ -423,7 +420,7 @@ class AsyncWebCrawler:
|
||||
|
||||
self.logger.error_status(
|
||||
url=url,
|
||||
error=create_box_message(error_message, type="error"),
|
||||
error=error_message,
|
||||
tag="ERROR",
|
||||
)
|
||||
|
||||
@@ -439,7 +436,7 @@ class AsyncWebCrawler:
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
config: CrawlerRunConfig,
|
||||
screenshot: str,
|
||||
screenshot_data: str,
|
||||
pdf_data: str,
|
||||
verbose: bool,
|
||||
**kwargs,
|
||||
@@ -452,7 +449,7 @@ class AsyncWebCrawler:
|
||||
html: Raw HTML content
|
||||
extracted_content: Previously extracted content (if any)
|
||||
config: Configuration object controlling processing behavior
|
||||
screenshot: Screenshot data (if any)
|
||||
screenshot_data: Screenshot data (if any)
|
||||
pdf_data: PDF data (if any)
|
||||
verbose: Whether to enable verbose logging
|
||||
**kwargs: Additional parameters for backwards compatibility
|
||||
@@ -474,12 +471,14 @@ class AsyncWebCrawler:
|
||||
params = config.__dict__.copy()
|
||||
params.pop("url", None)
|
||||
# add keys from kwargs to params that doesn't exist in params
|
||||
params.update({k: v for k, v in kwargs.items() if k not in params.keys()})
|
||||
params.update({k: v for k, v in kwargs.items()
|
||||
if k not in params.keys()})
|
||||
|
||||
################################
|
||||
# Scraping Strategy Execution #
|
||||
################################
|
||||
result: ScrapingResult = scraping_strategy.scrap(url, html, **params)
|
||||
result: ScrapingResult = scraping_strategy.scrap(
|
||||
url, html, **params)
|
||||
|
||||
if result is None:
|
||||
raise ValueError(
|
||||
@@ -495,16 +494,24 @@ class AsyncWebCrawler:
|
||||
|
||||
# Extract results - handle both dict and ScrapingResult
|
||||
if isinstance(result, dict):
|
||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||
cleaned_html = sanitize_input_encode(
|
||||
result.get("cleaned_html", ""))
|
||||
media = result.get("media", {})
|
||||
tables = media.pop("tables", []) if isinstance(media, dict) else []
|
||||
links = result.get("links", {})
|
||||
metadata = result.get("metadata", {})
|
||||
else:
|
||||
cleaned_html = sanitize_input_encode(result.cleaned_html)
|
||||
media = result.media.model_dump()
|
||||
links = result.links.model_dump()
|
||||
# media = result.media.model_dump()
|
||||
# tables = media.pop("tables", [])
|
||||
# links = result.links.model_dump()
|
||||
media = result.media.model_dump() if hasattr(result.media, 'model_dump') else result.media
|
||||
tables = media.pop("tables", []) if isinstance(media, dict) else []
|
||||
links = result.links.model_dump() if hasattr(result.links, 'model_dump') else result.links
|
||||
metadata = result.metadata
|
||||
|
||||
fit_html = preprocess_html_for_schema(html_content=html, text_threshold= 500, max_size= 300_000)
|
||||
|
||||
################################
|
||||
# Generate Markdown #
|
||||
################################
|
||||
@@ -512,27 +519,65 @@ class AsyncWebCrawler:
|
||||
config.markdown_generator or DefaultMarkdownGenerator()
|
||||
)
|
||||
|
||||
# --- SELECT HTML SOURCE BASED ON CONTENT_SOURCE ---
|
||||
# Get the desired source from the generator config, default to 'cleaned_html'
|
||||
selected_html_source = getattr(markdown_generator, 'content_source', 'cleaned_html')
|
||||
|
||||
# Define the source selection logic using dict dispatch
|
||||
html_source_selector = {
|
||||
"raw_html": lambda: html, # The original raw HTML
|
||||
"cleaned_html": lambda: cleaned_html, # The HTML after scraping strategy
|
||||
"fit_html": lambda: fit_html, # The HTML after preprocessing for schema
|
||||
}
|
||||
|
||||
markdown_input_html = cleaned_html # Default to cleaned_html
|
||||
|
||||
try:
|
||||
# Get the appropriate lambda function, default to returning cleaned_html if key not found
|
||||
source_lambda = html_source_selector.get(selected_html_source, lambda: cleaned_html)
|
||||
# Execute the lambda to get the selected HTML
|
||||
markdown_input_html = source_lambda()
|
||||
|
||||
# Log which source is being used (optional, but helpful for debugging)
|
||||
# if self.logger and verbose:
|
||||
# actual_source_used = selected_html_source if selected_html_source in html_source_selector else 'cleaned_html (default)'
|
||||
# self.logger.debug(f"Using '{actual_source_used}' as source for Markdown generation for {url}", tag="MARKDOWN_SRC")
|
||||
|
||||
except Exception as e:
|
||||
# Handle potential errors, especially from preprocess_html_for_schema
|
||||
if self.logger:
|
||||
self.logger.warning(
|
||||
f"Error getting/processing '{selected_html_source}' for markdown source: {e}. Falling back to cleaned_html.",
|
||||
tag="MARKDOWN_SRC"
|
||||
)
|
||||
# Ensure markdown_input_html is still the default cleaned_html in case of error
|
||||
markdown_input_html = cleaned_html
|
||||
# --- END: HTML SOURCE SELECTION ---
|
||||
|
||||
# Uncomment if by default we want to use PruningContentFilter
|
||||
# if not config.content_filter and not markdown_generator.content_filter:
|
||||
# markdown_generator.content_filter = PruningContentFilter()
|
||||
|
||||
markdown_result: MarkdownGenerationResult = (
|
||||
markdown_generator.generate_markdown(
|
||||
cleaned_html=cleaned_html,
|
||||
base_url=url,
|
||||
input_html=markdown_input_html,
|
||||
base_url=params.get("redirected_url", url)
|
||||
# html2text_options=kwargs.get('html2text', {})
|
||||
)
|
||||
)
|
||||
|
||||
# Log processing completion
|
||||
self.logger.info(
|
||||
message="{url:.50}... | Time: {timing}s",
|
||||
tag="SCRAPE",
|
||||
params={
|
||||
"url": _url,
|
||||
"timing": int((time.perf_counter() - t1) * 1000) / 1000,
|
||||
},
|
||||
self.logger.url_status(
|
||||
url=_url,
|
||||
success=True,
|
||||
timing=int((time.perf_counter() - t1) * 1000) / 1000,
|
||||
tag="SCRAPE"
|
||||
)
|
||||
# self.logger.info(
|
||||
# message="{url:.50}... | Time: {timing}s",
|
||||
# tag="SCRAPE",
|
||||
# params={"url": _url, "timing": int((time.perf_counter() - t1) * 1000) / 1000},
|
||||
# )
|
||||
|
||||
################################
|
||||
# Structured Content Extraction #
|
||||
@@ -546,16 +591,19 @@ class AsyncWebCrawler:
|
||||
# Choose content based on input_format
|
||||
content_format = config.extraction_strategy.input_format
|
||||
if content_format == "fit_markdown" and not markdown_result.fit_markdown:
|
||||
self.logger.warning(
|
||||
message="Fit markdown requested but not available. Falling back to raw markdown.",
|
||||
tag="EXTRACT",
|
||||
params={"url": _url},
|
||||
)
|
||||
|
||||
self.logger.url_status(
|
||||
url=_url,
|
||||
success=bool(html),
|
||||
timing=time.perf_counter() - t1,
|
||||
tag="EXTRACT",
|
||||
)
|
||||
content_format = "markdown"
|
||||
|
||||
content = {
|
||||
"markdown": markdown_result.raw_markdown,
|
||||
"html": html,
|
||||
"fit_html": fit_html,
|
||||
"cleaned_html": cleaned_html,
|
||||
"fit_markdown": markdown_result.fit_markdown,
|
||||
}.get(content_format, markdown_result.raw_markdown)
|
||||
@@ -563,7 +611,7 @@ class AsyncWebCrawler:
|
||||
# Use IdentityChunking for HTML input, otherwise use provided chunking strategy
|
||||
chunking = (
|
||||
IdentityChunking()
|
||||
if content_format in ["html", "cleaned_html"]
|
||||
if content_format in ["html", "cleaned_html", "fit_html"]
|
||||
else config.chunking_strategy
|
||||
)
|
||||
sections = chunking.chunk(content)
|
||||
@@ -573,15 +621,12 @@ class AsyncWebCrawler:
|
||||
)
|
||||
|
||||
# Log extraction completion
|
||||
self.logger.info(
|
||||
message="Completed for {url:.50}... | Time: {timing}s",
|
||||
tag="EXTRACT",
|
||||
params={"url": _url, "timing": time.perf_counter() - t1},
|
||||
)
|
||||
|
||||
# Handle screenshot and PDF data
|
||||
screenshot_data = None if not screenshot else screenshot
|
||||
pdf_data = None if not pdf_data else pdf_data
|
||||
self.logger.url_status(
|
||||
url=_url,
|
||||
success=bool(html),
|
||||
timing=time.perf_counter() - t1,
|
||||
tag="EXTRACT",
|
||||
)
|
||||
|
||||
# Apply HTML formatting if requested
|
||||
if config.prettiify:
|
||||
@@ -591,9 +636,11 @@ class AsyncWebCrawler:
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
fit_html=fit_html,
|
||||
cleaned_html=cleaned_html,
|
||||
markdown=markdown_result,
|
||||
media=media,
|
||||
tables=tables, # NEW
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=screenshot_data,
|
||||
@@ -706,3 +753,94 @@ class AsyncWebCrawler:
|
||||
else:
|
||||
_results = await dispatcher.run_urls(crawler=self, urls=urls, config=config)
|
||||
return [transform_result(res) for res in _results]
|
||||
|
||||
async def aseed_urls(
|
||||
self,
|
||||
domain_or_domains: Union[str, List[str]],
|
||||
config: Optional[SeedingConfig] = None,
|
||||
**kwargs
|
||||
) -> Union[List[str], Dict[str, List[Union[str, Dict[str, Any]]]]]:
|
||||
"""
|
||||
Discovers, filters, and optionally validates URLs for a given domain(s)
|
||||
using sitemaps and Common Crawl archives.
|
||||
|
||||
Args:
|
||||
domain_or_domains: A single domain string (e.g., "iana.org") or a list of domains.
|
||||
config: A SeedingConfig object to control the seeding process.
|
||||
Parameters passed directly via kwargs will override those in 'config'.
|
||||
**kwargs: Additional parameters (e.g., `source`, `live_check`, `extract_head`,
|
||||
`pattern`, `concurrency`, `hits_per_sec`, `force_refresh`, `verbose`)
|
||||
that will be used to construct or update the SeedingConfig.
|
||||
|
||||
Returns:
|
||||
If `extract_head` is False:
|
||||
- For a single domain: `List[str]` of discovered URLs.
|
||||
- For multiple domains: `Dict[str, List[str]]` mapping each domain to its URLs.
|
||||
If `extract_head` is True:
|
||||
- For a single domain: `List[Dict[str, Any]]` where each dict contains 'url'
|
||||
and 'head_data' (parsed <head> metadata).
|
||||
- For multiple domains: `Dict[str, List[Dict[str, Any]]]` mapping each domain
|
||||
to a list of URL data dictionaries.
|
||||
|
||||
Raises:
|
||||
ValueError: If `domain_or_domains` is not a string or a list of strings.
|
||||
Exception: Any underlying exceptions from AsyncUrlSeeder or network operations.
|
||||
|
||||
Example:
|
||||
>>> # Discover URLs from sitemap with live check for 'example.com'
|
||||
>>> result = await crawler.aseed_urls("example.com", source="sitemap", live_check=True, hits_per_sec=10)
|
||||
|
||||
>>> # Discover URLs from Common Crawl, extract head data for 'example.com' and 'python.org'
|
||||
>>> multi_domain_result = await crawler.aseed_urls(
|
||||
>>> ["example.com", "python.org"],
|
||||
>>> source="cc", extract_head=True, concurrency=200, hits_per_sec=50
|
||||
>>> )
|
||||
"""
|
||||
# Initialize AsyncUrlSeeder here if it hasn't been already
|
||||
if not self.url_seeder:
|
||||
# Pass the crawler's base_directory for seeder's cache management
|
||||
# Pass the crawler's logger for consistent logging
|
||||
self.url_seeder = AsyncUrlSeeder(
|
||||
base_directory=self.crawl4ai_folder,
|
||||
logger=self.logger
|
||||
)
|
||||
|
||||
# Merge config object with direct kwargs, giving kwargs precedence
|
||||
seeding_config = config.clone(**kwargs) if config else SeedingConfig.from_kwargs(kwargs)
|
||||
|
||||
# Ensure base_directory is set for the seeder's cache
|
||||
seeding_config.base_directory = seeding_config.base_directory or self.crawl4ai_folder
|
||||
# Ensure the seeder uses the crawler's logger (if not already set)
|
||||
if not self.url_seeder.logger:
|
||||
self.url_seeder.logger = self.logger
|
||||
|
||||
# Pass verbose setting if explicitly provided in SeedingConfig or kwargs
|
||||
if seeding_config.verbose is not None:
|
||||
self.url_seeder.logger.verbose = seeding_config.verbose
|
||||
else: # Default to crawler's verbose setting
|
||||
self.url_seeder.logger.verbose = self.logger.verbose
|
||||
|
||||
|
||||
if isinstance(domain_or_domains, str):
|
||||
self.logger.info(
|
||||
message="Starting URL seeding for domain: {domain}",
|
||||
tag="SEED",
|
||||
params={"domain": domain_or_domains}
|
||||
)
|
||||
return await self.url_seeder.urls(
|
||||
domain_or_domains,
|
||||
seeding_config
|
||||
)
|
||||
elif isinstance(domain_or_domains, (list, tuple)):
|
||||
self.logger.info(
|
||||
message="Starting URL seeding for {count} domains",
|
||||
tag="SEED",
|
||||
params={"count": len(domain_or_domains)}
|
||||
)
|
||||
# AsyncUrlSeeder.many_urls directly accepts a list of domains and individual params.
|
||||
return await self.url_seeder.many_urls(
|
||||
domain_or_domains,
|
||||
seeding_config
|
||||
)
|
||||
else:
|
||||
raise ValueError("`domain_or_domains` must be a string or a list of strings.")
|
||||
@@ -5,29 +5,17 @@ import os
|
||||
import sys
|
||||
import shutil
|
||||
import tempfile
|
||||
import psutil
|
||||
import signal
|
||||
import subprocess
|
||||
import shlex
|
||||
from playwright.async_api import BrowserContext
|
||||
import hashlib
|
||||
from .js_snippet import load_js_script
|
||||
from .config import DOWNLOAD_PAGE_TIMEOUT
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from playwright_stealth import StealthConfig
|
||||
from .utils import get_chromium_path
|
||||
|
||||
stealth_config = StealthConfig(
|
||||
webdriver=True,
|
||||
chrome_app=True,
|
||||
chrome_csi=True,
|
||||
chrome_load_times=True,
|
||||
chrome_runtime=True,
|
||||
navigator_languages=True,
|
||||
navigator_plugins=True,
|
||||
navigator_permissions=True,
|
||||
webgl_vendor=True,
|
||||
outerdimensions=True,
|
||||
navigator_hardware_concurrency=True,
|
||||
media_codecs=True,
|
||||
)
|
||||
|
||||
BROWSER_DISABLE_OPTIONS = [
|
||||
"--disable-background-networking",
|
||||
@@ -76,6 +64,51 @@ class ManagedBrowser:
|
||||
_cleanup(): Terminates the browser process and removes the temporary directory.
|
||||
create_profile(): Static method to create a user profile by launching a browser for user interaction.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def build_browser_flags(config: BrowserConfig) -> List[str]:
|
||||
"""Common CLI flags for launching Chromium"""
|
||||
flags = [
|
||||
"--disable-gpu",
|
||||
"--disable-gpu-compositing",
|
||||
"--disable-software-rasterizer",
|
||||
"--no-sandbox",
|
||||
"--disable-dev-shm-usage",
|
||||
"--no-first-run",
|
||||
"--no-default-browser-check",
|
||||
"--disable-infobars",
|
||||
"--window-position=0,0",
|
||||
"--ignore-certificate-errors",
|
||||
"--ignore-certificate-errors-spki-list",
|
||||
"--disable-blink-features=AutomationControlled",
|
||||
"--window-position=400,0",
|
||||
"--disable-renderer-backgrounding",
|
||||
"--disable-ipc-flooding-protection",
|
||||
"--force-color-profile=srgb",
|
||||
"--mute-audio",
|
||||
"--disable-background-timer-throttling",
|
||||
]
|
||||
if config.light_mode:
|
||||
flags.extend(BROWSER_DISABLE_OPTIONS)
|
||||
if config.text_mode:
|
||||
flags.extend([
|
||||
"--blink-settings=imagesEnabled=false",
|
||||
"--disable-remote-fonts",
|
||||
"--disable-images",
|
||||
"--disable-javascript",
|
||||
"--disable-software-rasterizer",
|
||||
"--disable-dev-shm-usage",
|
||||
])
|
||||
# proxy support
|
||||
if config.proxy:
|
||||
flags.append(f"--proxy-server={config.proxy}")
|
||||
elif config.proxy_config:
|
||||
creds = ""
|
||||
if config.proxy_config.username and config.proxy_config.password:
|
||||
creds = f"{config.proxy_config.username}:{config.proxy_config.password}@"
|
||||
flags.append(f"--proxy-server={creds}{config.proxy_config.server}")
|
||||
# dedupe
|
||||
return list(dict.fromkeys(flags))
|
||||
|
||||
browser_type: str
|
||||
user_data_dir: str
|
||||
@@ -94,6 +127,7 @@ class ManagedBrowser:
|
||||
host: str = "localhost",
|
||||
debugging_port: int = 9222,
|
||||
cdp_url: Optional[str] = None,
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the ManagedBrowser instance.
|
||||
@@ -109,17 +143,19 @@ class ManagedBrowser:
|
||||
host (str): Host for debugging the browser. Default: "localhost".
|
||||
debugging_port (int): Port for debugging the browser. Default: 9222.
|
||||
cdp_url (str or None): CDP URL to connect to the browser. Default: None.
|
||||
browser_config (BrowserConfig): Configuration object containing all browser settings. Default: None.
|
||||
"""
|
||||
self.browser_type = browser_type
|
||||
self.user_data_dir = user_data_dir
|
||||
self.headless = headless
|
||||
self.browser_type = browser_config.browser_type
|
||||
self.user_data_dir = browser_config.user_data_dir
|
||||
self.headless = browser_config.headless
|
||||
self.browser_process = None
|
||||
self.temp_dir = None
|
||||
self.debugging_port = debugging_port
|
||||
self.host = host
|
||||
self.debugging_port = browser_config.debugging_port
|
||||
self.host = browser_config.host
|
||||
self.logger = logger
|
||||
self.shutting_down = False
|
||||
self.cdp_url = cdp_url
|
||||
self.cdp_url = browser_config.cdp_url
|
||||
self.browser_config = browser_config
|
||||
|
||||
async def start(self) -> str:
|
||||
"""
|
||||
@@ -142,6 +178,48 @@ class ManagedBrowser:
|
||||
# Get browser path and args based on OS and browser type
|
||||
# browser_path = self._get_browser_path()
|
||||
args = await self._get_browser_args()
|
||||
|
||||
if self.browser_config.extra_args:
|
||||
args.extend(self.browser_config.extra_args)
|
||||
|
||||
|
||||
# ── make sure no old Chromium instance is owning the same port/profile ──
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
if psutil is None:
|
||||
raise RuntimeError("psutil not available, cannot clean old browser")
|
||||
for p in psutil.process_iter(["pid", "name", "cmdline"]):
|
||||
cl = " ".join(p.info.get("cmdline") or [])
|
||||
if (
|
||||
f"--remote-debugging-port={self.debugging_port}" in cl
|
||||
and f"--user-data-dir={self.user_data_dir}" in cl
|
||||
):
|
||||
p.kill()
|
||||
p.wait(timeout=5)
|
||||
else: # macOS / Linux
|
||||
# kill any process listening on the same debugging port
|
||||
pids = (
|
||||
subprocess.check_output(shlex.split(f"lsof -t -i:{self.debugging_port}"))
|
||||
.decode()
|
||||
.strip()
|
||||
.splitlines()
|
||||
)
|
||||
for pid in pids:
|
||||
try:
|
||||
os.kill(int(pid), signal.SIGTERM)
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
# remove Chromium singleton locks, or new launch exits with
|
||||
# “Opening in existing browser session.”
|
||||
for f in ("SingletonLock", "SingletonSocket", "SingletonCookie"):
|
||||
fp = os.path.join(self.user_data_dir, f)
|
||||
if os.path.exists(fp):
|
||||
os.remove(fp)
|
||||
except Exception as _e:
|
||||
# non-fatal — we'll try to start anyway, but log what happened
|
||||
self.logger.warning(f"pre-launch cleanup failed: {_e}", tag="BROWSER")
|
||||
|
||||
|
||||
# Start browser process
|
||||
try:
|
||||
@@ -162,6 +240,13 @@ class ManagedBrowser:
|
||||
preexec_fn=os.setpgrp # Start in a new process group
|
||||
)
|
||||
|
||||
# If verbose is True print args used to run the process
|
||||
if self.logger and self.browser_config.verbose:
|
||||
self.logger.debug(
|
||||
f"Starting browser with args: {' '.join(args)}",
|
||||
tag="BROWSER"
|
||||
)
|
||||
|
||||
# We'll monitor for a short time to make sure it starts properly, but won't keep monitoring
|
||||
await asyncio.sleep(0.5) # Give browser time to start
|
||||
await self._initial_startup_check()
|
||||
@@ -274,29 +359,29 @@ class ManagedBrowser:
|
||||
return browser_path
|
||||
|
||||
async def _get_browser_args(self) -> List[str]:
|
||||
"""Returns browser-specific command line arguments"""
|
||||
base_args = [await self._get_browser_path()]
|
||||
|
||||
"""Returns full CLI args for launching the browser"""
|
||||
base = [await self._get_browser_path()]
|
||||
if self.browser_type == "chromium":
|
||||
args = [
|
||||
flags = [
|
||||
f"--remote-debugging-port={self.debugging_port}",
|
||||
f"--user-data-dir={self.user_data_dir}",
|
||||
]
|
||||
if self.headless:
|
||||
args.append("--headless=new")
|
||||
flags.append("--headless=new")
|
||||
# merge common launch flags
|
||||
flags.extend(self.build_browser_flags(self.browser_config))
|
||||
elif self.browser_type == "firefox":
|
||||
args = [
|
||||
flags = [
|
||||
"--remote-debugging-port",
|
||||
str(self.debugging_port),
|
||||
"--profile",
|
||||
self.user_data_dir,
|
||||
]
|
||||
if self.headless:
|
||||
args.append("--headless")
|
||||
flags.append("--headless")
|
||||
else:
|
||||
raise NotImplementedError(f"Browser type {self.browser_type} not supported")
|
||||
|
||||
return base_args + args
|
||||
return base + flags
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup browser process and temporary directory"""
|
||||
@@ -418,6 +503,56 @@ class ManagedBrowser:
|
||||
return profiler.delete_profile(profile_name_or_path)
|
||||
|
||||
|
||||
async def clone_runtime_state(
|
||||
src: BrowserContext,
|
||||
dst: BrowserContext,
|
||||
crawlerRunConfig: CrawlerRunConfig | None = None,
|
||||
browserConfig: BrowserConfig | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Bring everything that *can* be changed at runtime from `src` → `dst`.
|
||||
|
||||
1. Cookies
|
||||
2. localStorage (and sessionStorage, same API)
|
||||
3. Extra headers, permissions, geolocation if supplied in configs
|
||||
"""
|
||||
|
||||
# ── 1. cookies ────────────────────────────────────────────────────────────
|
||||
cookies = await src.cookies()
|
||||
if cookies:
|
||||
await dst.add_cookies(cookies)
|
||||
|
||||
# ── 2. localStorage / sessionStorage ──────────────────────────────────────
|
||||
state = await src.storage_state()
|
||||
for origin in state.get("origins", []):
|
||||
url = origin["origin"]
|
||||
kvs = origin.get("localStorage", [])
|
||||
if not kvs:
|
||||
continue
|
||||
|
||||
page = dst.pages[0] if dst.pages else await dst.new_page()
|
||||
await page.goto(url, wait_until="domcontentloaded")
|
||||
for k, v in kvs:
|
||||
await page.evaluate("(k,v)=>localStorage.setItem(k,v)", k, v)
|
||||
|
||||
# ── 3. runtime-mutable extras from configs ────────────────────────────────
|
||||
# headers
|
||||
if browserConfig and browserConfig.headers:
|
||||
await dst.set_extra_http_headers(browserConfig.headers)
|
||||
|
||||
# geolocation
|
||||
if crawlerRunConfig and crawlerRunConfig.geolocation:
|
||||
await dst.grant_permissions(["geolocation"])
|
||||
await dst.set_geolocation(
|
||||
{
|
||||
"latitude": crawlerRunConfig.geolocation.latitude,
|
||||
"longitude": crawlerRunConfig.geolocation.longitude,
|
||||
"accuracy": crawlerRunConfig.geolocation.accuracy,
|
||||
}
|
||||
)
|
||||
|
||||
return dst
|
||||
|
||||
|
||||
|
||||
class BrowserManager:
|
||||
@@ -477,6 +612,7 @@ class BrowserManager:
|
||||
logger=self.logger,
|
||||
debugging_port=self.config.debugging_port,
|
||||
cdp_url=self.config.cdp_url,
|
||||
browser_config=self.config,
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
@@ -565,6 +701,9 @@ class BrowserManager:
|
||||
if self.config.extra_args:
|
||||
args.extend(self.config.extra_args)
|
||||
|
||||
# Deduplicate args
|
||||
args = list(dict.fromkeys(args))
|
||||
|
||||
browser_args = {"headless": self.config.headless, "args": args}
|
||||
|
||||
if self.config.chrome_channel:
|
||||
@@ -779,6 +918,23 @@ class BrowserManager:
|
||||
# Update context settings with text mode settings
|
||||
context_settings.update(text_mode_settings)
|
||||
|
||||
# inject locale / tz / geo if user provided them
|
||||
if crawlerRunConfig:
|
||||
if crawlerRunConfig.locale:
|
||||
context_settings["locale"] = crawlerRunConfig.locale
|
||||
if crawlerRunConfig.timezone_id:
|
||||
context_settings["timezone_id"] = crawlerRunConfig.timezone_id
|
||||
if crawlerRunConfig.geolocation:
|
||||
context_settings["geolocation"] = {
|
||||
"latitude": crawlerRunConfig.geolocation.latitude,
|
||||
"longitude": crawlerRunConfig.geolocation.longitude,
|
||||
"accuracy": crawlerRunConfig.geolocation.accuracy,
|
||||
}
|
||||
# ensure geolocation permission
|
||||
perms = context_settings.get("permissions", [])
|
||||
perms.append("geolocation")
|
||||
context_settings["permissions"] = perms
|
||||
|
||||
# Create and return the context with all settings
|
||||
context = await self.browser.new_context(**context_settings)
|
||||
|
||||
@@ -811,6 +967,10 @@ class BrowserManager:
|
||||
"semaphore_count",
|
||||
"url"
|
||||
]
|
||||
|
||||
# Do NOT exclude locale, timezone_id, or geolocation as these DO affect browser context
|
||||
# and should cause a new context to be created if they change
|
||||
|
||||
for key in ephemeral_keys:
|
||||
if key in config_dict:
|
||||
del config_dict[key]
|
||||
@@ -842,11 +1002,17 @@ class BrowserManager:
|
||||
|
||||
# If using a managed browser, just grab the shared default_context
|
||||
if self.config.use_managed_browser:
|
||||
context = self.default_context
|
||||
pages = context.pages
|
||||
page = next((p for p in pages if p.url == crawlerRunConfig.url), None)
|
||||
if not page:
|
||||
page = await context.new_page()
|
||||
if self.config.storage_state:
|
||||
context = await self.create_browser_context(crawlerRunConfig)
|
||||
ctx = self.default_context # default context, one window only
|
||||
ctx = await clone_runtime_state(context, ctx, crawlerRunConfig, self.config)
|
||||
page = await ctx.new_page()
|
||||
else:
|
||||
context = self.default_context
|
||||
pages = context.pages
|
||||
page = next((p for p in pages if p.url == crawlerRunConfig.url), None)
|
||||
if not page:
|
||||
page = context.pages[0] # await context.new_page()
|
||||
else:
|
||||
# Otherwise, check if we have an existing context for this config
|
||||
config_signature = self._make_config_signature(crawlerRunConfig)
|
||||
|
||||
@@ -15,12 +15,12 @@ import shutil
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
from typing import List, Dict, Optional, Any, Tuple
|
||||
from colorama import Fore, Style, init
|
||||
from typing import List, Dict, Optional, Any
|
||||
from rich.console import Console
|
||||
|
||||
from .async_configs import BrowserConfig
|
||||
from .browser_manager import ManagedBrowser
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase, LogColor
|
||||
from .utils import get_home_folder
|
||||
|
||||
|
||||
@@ -45,8 +45,8 @@ class BrowserProfiler:
|
||||
logger (AsyncLoggerBase, optional): Logger for outputting messages.
|
||||
If None, a default AsyncLogger will be created.
|
||||
"""
|
||||
# Initialize colorama for colorful terminal output
|
||||
init()
|
||||
# Initialize rich console for colorful input prompts
|
||||
self.console = Console()
|
||||
|
||||
# Create a logger if not provided
|
||||
if logger is None:
|
||||
@@ -127,26 +127,30 @@ class BrowserProfiler:
|
||||
profile_path = os.path.join(self.profiles_dir, profile_name)
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
|
||||
# Print instructions for the user with colorama formatting
|
||||
border = f"{Fore.CYAN}{'='*80}{Style.RESET_ALL}"
|
||||
self.logger.info(f"\n{border}", tag="PROFILE")
|
||||
self.logger.info(f"Creating browser profile: {Fore.GREEN}{profile_name}{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info(f"Profile directory: {Fore.YELLOW}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||
# Print instructions for the user with rich formatting
|
||||
border = f"{'='*80}"
|
||||
self.logger.info("{border}", tag="PROFILE", params={"border": f"\n{border}"}, colors={"border": LogColor.CYAN})
|
||||
self.logger.info("Creating browser profile: {profile_name}", tag="PROFILE", params={"profile_name": profile_name}, colors={"profile_name": LogColor.GREEN})
|
||||
self.logger.info("Profile directory: {profile_path}", tag="PROFILE", params={"profile_path": profile_path}, colors={"profile_path": LogColor.YELLOW})
|
||||
|
||||
self.logger.info("\nInstructions:", tag="PROFILE")
|
||||
self.logger.info("1. A browser window will open for you to set up your profile.", tag="PROFILE")
|
||||
self.logger.info(f"2. {Fore.CYAN}Log in to websites{Style.RESET_ALL}, configure settings, etc. as needed.", tag="PROFILE")
|
||||
self.logger.info(f"3. When you're done, {Fore.YELLOW}press 'q' in this terminal{Style.RESET_ALL} to close the browser.", tag="PROFILE")
|
||||
self.logger.info("{segment}, configure settings, etc. as needed.", tag="PROFILE", params={"segment": "2. Log in to websites"}, colors={"segment": LogColor.CYAN})
|
||||
self.logger.info("3. When you're done, {segment} to close the browser.", tag="PROFILE", params={"segment": "press 'q' in this terminal"}, colors={"segment": LogColor.YELLOW})
|
||||
self.logger.info("4. The profile will be saved and ready to use with Crawl4AI.", tag="PROFILE")
|
||||
self.logger.info(f"{border}\n", tag="PROFILE")
|
||||
self.logger.info("{border}", tag="PROFILE", params={"border": f"{border}\n"}, colors={"border": LogColor.CYAN})
|
||||
|
||||
browser_config.headless = False
|
||||
browser_config.user_data_dir = profile_path
|
||||
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_config.browser_type,
|
||||
user_data_dir=profile_path,
|
||||
headless=False, # Must be visible
|
||||
browser_config=browser_config,
|
||||
# user_data_dir=profile_path,
|
||||
# headless=False, # Must be visible
|
||||
logger=self.logger,
|
||||
debugging_port=browser_config.debugging_port
|
||||
# debugging_port=browser_config.debugging_port
|
||||
)
|
||||
|
||||
# Set up signal handlers to ensure cleanup on interrupt
|
||||
@@ -181,7 +185,7 @@ class BrowserProfiler:
|
||||
import select
|
||||
|
||||
# First output the prompt
|
||||
self.logger.info(f"{Fore.CYAN}Press '{Fore.WHITE}q{Fore.CYAN}' when you've finished using the browser...{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info("Press 'q' when you've finished using the browser...", tag="PROFILE")
|
||||
|
||||
# Save original terminal settings
|
||||
fd = sys.stdin.fileno()
|
||||
@@ -197,7 +201,7 @@ class BrowserProfiler:
|
||||
if readable:
|
||||
key = sys.stdin.read(1)
|
||||
if key.lower() == 'q':
|
||||
self.logger.info(f"{Fore.GREEN}Closing browser and saving profile...{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info("Closing browser and saving profile...", tag="PROFILE", base_color=LogColor.GREEN)
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
@@ -214,8 +218,18 @@ class BrowserProfiler:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
try:
|
||||
from playwright.async_api import async_playwright
|
||||
|
||||
# Start the browser
|
||||
await managed_browser.start()
|
||||
# await managed_browser.start()
|
||||
# 1. ── Start the browser ─────────────────────────────────────────
|
||||
cdp_url = await managed_browser.start()
|
||||
|
||||
# 2. ── Attach Playwright to that running Chrome ──────────────────
|
||||
pw = await async_playwright().start()
|
||||
browser = await pw.chromium.connect_over_cdp(cdp_url)
|
||||
# Grab the existing default context (there is always one)
|
||||
context = browser.contexts[0]
|
||||
|
||||
# Check if browser started successfully
|
||||
browser_process = managed_browser.browser_process
|
||||
@@ -223,7 +237,7 @@ class BrowserProfiler:
|
||||
self.logger.error("Failed to start browser process.", tag="PROFILE")
|
||||
return None
|
||||
|
||||
self.logger.info(f"Browser launched. {Fore.CYAN}Waiting for you to finish...{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.info("Browser launched. Waiting for you to finish...", tag="PROFILE")
|
||||
|
||||
# Start listening for keyboard input
|
||||
listener_task = asyncio.create_task(listen_for_quit_command())
|
||||
@@ -240,15 +254,27 @@ class BrowserProfiler:
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# 3. ── Persist storage state *before* we kill Chrome ─────────────
|
||||
state_file = os.path.join(profile_path, "storage_state.json")
|
||||
try:
|
||||
await context.storage_state(path=state_file)
|
||||
self.logger.info(f"[PROFILE].i storage_state saved → {state_file}", tag="PROFILE")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"[PROFILE].w failed to save storage_state: {e}", tag="PROFILE")
|
||||
|
||||
# 4. ── Close everything cleanly ──────────────────────────────────
|
||||
await browser.close()
|
||||
await pw.stop()
|
||||
|
||||
# If the browser is still running and the user pressed 'q', terminate it
|
||||
if browser_process.poll() is None and user_done_event.is_set():
|
||||
self.logger.info("Terminating browser process...", tag="PROFILE")
|
||||
await managed_browser.cleanup()
|
||||
|
||||
self.logger.success(f"Browser closed. Profile saved at: {Fore.GREEN}{profile_path}{Style.RESET_ALL}", tag="PROFILE")
|
||||
self.logger.success(f"Browser closed. Profile saved at: {profile_path}", tag="PROFILE")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error creating profile: {str(e)}", tag="PROFILE")
|
||||
self.logger.error(f"Error creating profile: {e!s}", tag="PROFILE")
|
||||
await managed_browser.cleanup()
|
||||
return None
|
||||
finally:
|
||||
@@ -440,25 +466,27 @@ class BrowserProfiler:
|
||||
```
|
||||
"""
|
||||
while True:
|
||||
self.logger.info(f"\n{Fore.CYAN}Profile Management Options:{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"1. {Fore.GREEN}Create a new profile{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"2. {Fore.YELLOW}List available profiles{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"3. {Fore.RED}Delete a profile{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info("\nProfile Management Options:", tag="MENU")
|
||||
self.logger.info("1. Create a new profile", tag="MENU", base_color=LogColor.GREEN)
|
||||
self.logger.info("2. List available profiles", tag="MENU", base_color=LogColor.YELLOW)
|
||||
self.logger.info("3. Delete a profile", tag="MENU", base_color=LogColor.RED)
|
||||
|
||||
# Only show crawl option if callback provided
|
||||
if crawl_callback:
|
||||
self.logger.info(f"4. {Fore.CYAN}Use a profile to crawl a website{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info(f"5. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info("4. Use a profile to crawl a website", tag="MENU", base_color=LogColor.CYAN)
|
||||
self.logger.info("5. Exit", tag="MENU", base_color=LogColor.MAGENTA)
|
||||
exit_option = "5"
|
||||
else:
|
||||
self.logger.info(f"4. {Fore.MAGENTA}Exit{Style.RESET_ALL}", tag="MENU")
|
||||
self.logger.info("4. Exit", tag="MENU", base_color=LogColor.MAGENTA)
|
||||
exit_option = "4"
|
||||
|
||||
choice = input(f"\n{Fore.CYAN}Enter your choice (1-{exit_option}): {Style.RESET_ALL}")
|
||||
self.logger.info(f"\n[cyan]Enter your choice (1-{exit_option}): [/cyan]", end="")
|
||||
choice = input()
|
||||
|
||||
if choice == "1":
|
||||
# Create new profile
|
||||
name = input(f"{Fore.GREEN}Enter a name for the new profile (or press Enter for auto-generated name): {Style.RESET_ALL}")
|
||||
self.console.print("[green]Enter a name for the new profile (or press Enter for auto-generated name): [/green]", end="")
|
||||
name = input()
|
||||
await self.create_profile(name or None)
|
||||
|
||||
elif choice == "2":
|
||||
@@ -469,11 +497,11 @@ class BrowserProfiler:
|
||||
self.logger.warning(" No profiles found. Create one first with option 1.", tag="PROFILES")
|
||||
continue
|
||||
|
||||
# Print profile information with colorama formatting
|
||||
# Print profile information
|
||||
self.logger.info("\nAvailable profiles:", tag="PROFILES")
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {Fore.CYAN}{profile['name']}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f" Path: {Fore.YELLOW}{profile['path']}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
self.logger.info(f" Path: {profile['path']}", tag="PROFILES", base_color=LogColor.YELLOW)
|
||||
self.logger.info(f" Created: {profile['created'].strftime('%Y-%m-%d %H:%M:%S')}", tag="PROFILES")
|
||||
self.logger.info(f" Browser type: {profile['type']}", tag="PROFILES")
|
||||
self.logger.info("", tag="PROFILES") # Empty line for spacing
|
||||
@@ -486,12 +514,13 @@ class BrowserProfiler:
|
||||
continue
|
||||
|
||||
# Display numbered list
|
||||
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info("\nAvailable profiles:", tag="PROFILES", base_color=LogColor.YELLOW)
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
|
||||
# Get profile to delete
|
||||
profile_idx = input(f"{Fore.RED}Enter the number of the profile to delete (or 'c' to cancel): {Style.RESET_ALL}")
|
||||
self.console.print("[red]Enter the number of the profile to delete (or 'c' to cancel): [/red]", end="")
|
||||
profile_idx = input()
|
||||
if profile_idx.lower() == 'c':
|
||||
continue
|
||||
|
||||
@@ -499,17 +528,18 @@ class BrowserProfiler:
|
||||
idx = int(profile_idx) - 1
|
||||
if 0 <= idx < len(profiles):
|
||||
profile_name = profiles[idx]["name"]
|
||||
self.logger.info(f"Deleting profile: {Fore.YELLOW}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info(f"Deleting profile: [yellow]{profile_name}[/yellow]", tag="PROFILES")
|
||||
|
||||
# Confirm deletion
|
||||
confirm = input(f"{Fore.RED}Are you sure you want to delete this profile? (y/n): {Style.RESET_ALL}")
|
||||
self.console.print("[red]Are you sure you want to delete this profile? (y/n): [/red]", end="")
|
||||
confirm = input()
|
||||
if confirm.lower() == 'y':
|
||||
success = self.delete_profile(profiles[idx]["path"])
|
||||
|
||||
if success:
|
||||
self.logger.success(f"Profile {Fore.GREEN}{profile_name}{Style.RESET_ALL} deleted successfully", tag="PROFILES")
|
||||
self.logger.success(f"Profile {profile_name} deleted successfully", tag="PROFILES")
|
||||
else:
|
||||
self.logger.error(f"Failed to delete profile {Fore.RED}{profile_name}{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.error(f"Failed to delete profile {profile_name}", tag="PROFILES")
|
||||
else:
|
||||
self.logger.error("Invalid profile number", tag="PROFILES")
|
||||
except ValueError:
|
||||
@@ -523,12 +553,13 @@ class BrowserProfiler:
|
||||
continue
|
||||
|
||||
# Display numbered list
|
||||
self.logger.info(f"\n{Fore.YELLOW}Available profiles:{Style.RESET_ALL}", tag="PROFILES")
|
||||
self.logger.info("\nAvailable profiles:", tag="PROFILES", base_color=LogColor.YELLOW)
|
||||
for i, profile in enumerate(profiles):
|
||||
self.logger.info(f"[{i+1}] {profile['name']}", tag="PROFILES")
|
||||
|
||||
# Get profile to use
|
||||
profile_idx = input(f"{Fore.CYAN}Enter the number of the profile to use (or 'c' to cancel): {Style.RESET_ALL}")
|
||||
self.console.print("[cyan]Enter the number of the profile to use (or 'c' to cancel): [/cyan]", end="")
|
||||
profile_idx = input()
|
||||
if profile_idx.lower() == 'c':
|
||||
continue
|
||||
|
||||
@@ -536,7 +567,8 @@ class BrowserProfiler:
|
||||
idx = int(profile_idx) - 1
|
||||
if 0 <= idx < len(profiles):
|
||||
profile_path = profiles[idx]["path"]
|
||||
url = input(f"{Fore.CYAN}Enter the URL to crawl: {Style.RESET_ALL}")
|
||||
self.console.print("[cyan]Enter the URL to crawl: [/cyan]", end="")
|
||||
url = input()
|
||||
if url:
|
||||
# Call the provided crawl callback
|
||||
await crawl_callback(profile_path, url)
|
||||
@@ -597,17 +629,26 @@ class BrowserProfiler:
|
||||
os.makedirs(profile_path, exist_ok=True)
|
||||
|
||||
# Print initial information
|
||||
border = f"{Fore.CYAN}{'='*80}{Style.RESET_ALL}"
|
||||
self.logger.info(f"\n{border}", tag="CDP")
|
||||
self.logger.info(f"Launching standalone browser with CDP debugging", tag="CDP")
|
||||
self.logger.info(f"Browser type: {Fore.GREEN}{browser_type}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Profile path: {Fore.YELLOW}{profile_path}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Debugging port: {Fore.CYAN}{debugging_port}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Headless mode: {Fore.CYAN}{headless}{Style.RESET_ALL}", tag="CDP")
|
||||
border = f"{'='*80}"
|
||||
self.logger.info("{border}", tag="CDP", params={"border": border}, colors={"border": LogColor.CYAN})
|
||||
self.logger.info("Launching standalone browser with CDP debugging", tag="CDP")
|
||||
self.logger.info("Browser type: {browser_type}", tag="CDP", params={"browser_type": browser_type}, colors={"browser_type": LogColor.CYAN})
|
||||
self.logger.info("Profile path: {profile_path}", tag="CDP", params={"profile_path": profile_path}, colors={"profile_path": LogColor.YELLOW})
|
||||
self.logger.info(f"Debugging port: {debugging_port}", tag="CDP")
|
||||
self.logger.info(f"Headless mode: {headless}", tag="CDP")
|
||||
|
||||
# create browser config
|
||||
browser_config = BrowserConfig(
|
||||
browser_type=browser_type,
|
||||
headless=headless,
|
||||
user_data_dir=profile_path,
|
||||
debugging_port=debugging_port,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Create managed browser instance
|
||||
managed_browser = ManagedBrowser(
|
||||
browser_type=browser_type,
|
||||
browser_config=browser_config,
|
||||
user_data_dir=profile_path,
|
||||
headless=headless,
|
||||
logger=self.logger,
|
||||
@@ -646,7 +687,7 @@ class BrowserProfiler:
|
||||
import select
|
||||
|
||||
# First output the prompt
|
||||
self.logger.info(f"{Fore.CYAN}Press '{Fore.WHITE}q{Fore.CYAN}' to stop the browser and exit...{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info("Press 'q' to stop the browser and exit...", tag="CDP")
|
||||
|
||||
# Save original terminal settings
|
||||
fd = sys.stdin.fileno()
|
||||
@@ -662,7 +703,7 @@ class BrowserProfiler:
|
||||
if readable:
|
||||
key = sys.stdin.read(1)
|
||||
if key.lower() == 'q':
|
||||
self.logger.info(f"{Fore.GREEN}Closing browser...{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info("Closing browser...", tag="CDP")
|
||||
user_done_event.set()
|
||||
return
|
||||
|
||||
@@ -716,20 +757,20 @@ class BrowserProfiler:
|
||||
self.logger.error("Failed to start browser process.", tag="CDP")
|
||||
return None
|
||||
|
||||
self.logger.info(f"Browser launched successfully. Retrieving CDP information...", tag="CDP")
|
||||
self.logger.info("Browser launched successfully. Retrieving CDP information...", tag="CDP")
|
||||
|
||||
# Get CDP URL and JSON config
|
||||
cdp_url, config_json = await get_cdp_json(debugging_port)
|
||||
|
||||
if cdp_url:
|
||||
self.logger.success(f"CDP URL: {Fore.GREEN}{cdp_url}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.success(f"CDP URL: {cdp_url}", tag="CDP")
|
||||
|
||||
if config_json:
|
||||
# Display relevant CDP information
|
||||
self.logger.info(f"Browser: {Fore.CYAN}{config_json.get('Browser', 'Unknown')}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info(f"Protocol Version: {config_json.get('Protocol-Version', 'Unknown')}", tag="CDP")
|
||||
self.logger.info(f"Browser: {config_json.get('Browser', 'Unknown')}", tag="CDP", colors={"Browser": LogColor.CYAN})
|
||||
self.logger.info(f"Protocol Version: {config_json.get('Protocol-Version', 'Unknown')}", tag="CDP", colors={"Protocol-Version": LogColor.CYAN})
|
||||
if 'webSocketDebuggerUrl' in config_json:
|
||||
self.logger.info(f"WebSocket URL: {Fore.GREEN}{config_json['webSocketDebuggerUrl']}{Style.RESET_ALL}", tag="CDP")
|
||||
self.logger.info("WebSocket URL: {webSocketDebuggerUrl}", tag="CDP", params={"webSocketDebuggerUrl": config_json['webSocketDebuggerUrl']}, colors={"webSocketDebuggerUrl": LogColor.GREEN})
|
||||
else:
|
||||
self.logger.warning("Could not retrieve CDP configuration JSON", tag="CDP")
|
||||
else:
|
||||
@@ -757,7 +798,7 @@ class BrowserProfiler:
|
||||
self.logger.info("Terminating browser process...", tag="CDP")
|
||||
await managed_browser.cleanup()
|
||||
|
||||
self.logger.success(f"Browser closed.", tag="CDP")
|
||||
self.logger.success("Browser closed.", tag="CDP")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error launching standalone browser: {str(e)}", tag="CDP")
|
||||
@@ -972,3 +1013,30 @@ class BrowserProfiler:
|
||||
'info': browser_info
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example usage
|
||||
profiler = BrowserProfiler()
|
||||
|
||||
# Create a new profile
|
||||
import os
|
||||
from pathlib import Path
|
||||
home_dir = Path.home()
|
||||
profile_path = asyncio.run(profiler.create_profile( str(home_dir / ".crawl4ai/profiles/test-profile")))
|
||||
|
||||
|
||||
|
||||
# Launch a standalone browser
|
||||
asyncio.run(profiler.launch_standalone_browser())
|
||||
|
||||
# List profiles
|
||||
profiles = profiler.list_profiles()
|
||||
for profile in profiles:
|
||||
print(f"Profile: {profile['name']}, Path: {profile['path']}")
|
||||
|
||||
# Delete a profile
|
||||
success = profiler.delete_profile("my-profile")
|
||||
if success:
|
||||
print("Profile deleted successfully")
|
||||
else:
|
||||
print("Failed to delete profile")
|
||||
@@ -27,7 +27,10 @@ from crawl4ai import (
|
||||
PruningContentFilter,
|
||||
BrowserProfiler,
|
||||
DefaultMarkdownGenerator,
|
||||
LLMConfig
|
||||
LLMConfig,
|
||||
BFSDeepCrawlStrategy,
|
||||
DFSDeepCrawlStrategy,
|
||||
BestFirstCrawlingStrategy,
|
||||
)
|
||||
from crawl4ai.config import USER_SETTINGS
|
||||
from litellm import completion
|
||||
@@ -1010,13 +1013,15 @@ def cdp_cmd(user_data_dir: Optional[str], port: int, browser_type: str, headless
|
||||
@click.option("--crawler", "-c", type=str, callback=parse_key_values, help="Crawler parameters as key1=value1,key2=value2")
|
||||
@click.option("--output", "-o", type=click.Choice(["all", "json", "markdown", "md", "markdown-fit", "md-fit"]), default="all")
|
||||
@click.option("--output-file", "-O", type=click.Path(), help="Output file path (default: stdout)")
|
||||
@click.option("--bypass-cache", "-b", is_flag=True, default=True, help="Bypass cache when crawling")
|
||||
@click.option("--bypass-cache", "-bc", is_flag=True, default=True, help="Bypass cache when crawling")
|
||||
@click.option("--question", "-q", help="Ask a question about the crawled content")
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
@click.option("--profile", "-p", help="Use a specific browser profile (by name)")
|
||||
@click.option("--deep-crawl", type=click.Choice(["bfs", "dfs", "best-first"]), help="Enable deep crawling with specified strategy (bfs, dfs, or best-first)")
|
||||
@click.option("--max-pages", type=int, default=10, help="Maximum number of pages to crawl in deep crawl mode")
|
||||
def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config: str,
|
||||
extraction_config: str, json_extract: str, schema: str, browser: Dict, crawler: Dict,
|
||||
output: str, output_file: str, bypass_cache: bool, question: str, verbose: bool, profile: str):
|
||||
output: str, output_file: str, bypass_cache: bool, question: str, verbose: bool, profile: str, deep_crawl: str, max_pages: int):
|
||||
"""Crawl a website and extract content
|
||||
|
||||
Simple Usage:
|
||||
@@ -1073,7 +1078,8 @@ def crawl_cmd(url: str, browser_config: str, crawler_config: str, filter_config:
|
||||
crawler_cfg.markdown_generator = DefaultMarkdownGenerator(
|
||||
content_filter = BM25ContentFilter(
|
||||
user_query=filter_conf.get("query"),
|
||||
bm25_threshold=filter_conf.get("threshold", 1.0)
|
||||
bm25_threshold=filter_conf.get("threshold", 1.0),
|
||||
use_stemming=filter_conf.get("use_stemming", True),
|
||||
)
|
||||
)
|
||||
elif filter_conf["type"] == "pruning":
|
||||
@@ -1155,6 +1161,27 @@ Always return valid, properly formatted JSON."""
|
||||
|
||||
crawler_cfg.scraping_strategy = LXMLWebScrapingStrategy()
|
||||
|
||||
# Handle deep crawling configuration
|
||||
if deep_crawl:
|
||||
if deep_crawl == "bfs":
|
||||
crawler_cfg.deep_crawl_strategy = BFSDeepCrawlStrategy(
|
||||
max_depth=3,
|
||||
max_pages=max_pages
|
||||
)
|
||||
elif deep_crawl == "dfs":
|
||||
crawler_cfg.deep_crawl_strategy = DFSDeepCrawlStrategy(
|
||||
max_depth=3,
|
||||
max_pages=max_pages
|
||||
)
|
||||
elif deep_crawl == "best-first":
|
||||
crawler_cfg.deep_crawl_strategy = BestFirstCrawlingStrategy(
|
||||
max_depth=3,
|
||||
max_pages=max_pages
|
||||
)
|
||||
|
||||
if verbose:
|
||||
console.print(f"[green]Deep crawling enabled:[/green] {deep_crawl} strategy, max {max_pages} pages")
|
||||
|
||||
config = get_global_config()
|
||||
|
||||
browser_cfg.verbose = config.get("VERBOSE", False)
|
||||
@@ -1169,39 +1196,60 @@ Always return valid, properly formatted JSON."""
|
||||
verbose
|
||||
)
|
||||
|
||||
# Handle deep crawl results (list) vs single result
|
||||
if isinstance(result, list):
|
||||
if len(result) == 0:
|
||||
click.echo("No results found during deep crawling")
|
||||
return
|
||||
# Use the first result for question answering and output
|
||||
main_result = result[0]
|
||||
all_results = result
|
||||
else:
|
||||
# Single result from regular crawling
|
||||
main_result = result
|
||||
all_results = [result]
|
||||
|
||||
# Handle question
|
||||
if question:
|
||||
provider, token = setup_llm_config()
|
||||
markdown = result.markdown.raw_markdown
|
||||
markdown = main_result.markdown.raw_markdown
|
||||
anyio.run(stream_llm_response, url, markdown, question, provider, token)
|
||||
return
|
||||
|
||||
# Handle output
|
||||
if not output_file:
|
||||
if output == "all":
|
||||
click.echo(json.dumps(result.model_dump(), indent=2))
|
||||
if isinstance(result, list):
|
||||
output_data = [r.model_dump() for r in all_results]
|
||||
click.echo(json.dumps(output_data, indent=2))
|
||||
else:
|
||||
click.echo(json.dumps(main_result.model_dump(), indent=2))
|
||||
elif output == "json":
|
||||
print(result.extracted_content)
|
||||
extracted_items = json.loads(result.extracted_content)
|
||||
print(main_result.extracted_content)
|
||||
extracted_items = json.loads(main_result.extracted_content)
|
||||
click.echo(json.dumps(extracted_items, indent=2))
|
||||
|
||||
elif output in ["markdown", "md"]:
|
||||
click.echo(result.markdown.raw_markdown)
|
||||
click.echo(main_result.markdown.raw_markdown)
|
||||
elif output in ["markdown-fit", "md-fit"]:
|
||||
click.echo(result.markdown.fit_markdown)
|
||||
click.echo(main_result.markdown.fit_markdown)
|
||||
else:
|
||||
if output == "all":
|
||||
with open(output_file, "w") as f:
|
||||
f.write(json.dumps(result.model_dump(), indent=2))
|
||||
if isinstance(result, list):
|
||||
output_data = [r.model_dump() for r in all_results]
|
||||
f.write(json.dumps(output_data, indent=2))
|
||||
else:
|
||||
f.write(json.dumps(main_result.model_dump(), indent=2))
|
||||
elif output == "json":
|
||||
with open(output_file, "w") as f:
|
||||
f.write(result.extracted_content)
|
||||
f.write(main_result.extracted_content)
|
||||
elif output in ["markdown", "md"]:
|
||||
with open(output_file, "w") as f:
|
||||
f.write(result.markdown.raw_markdown)
|
||||
f.write(main_result.markdown.raw_markdown)
|
||||
elif output in ["markdown-fit", "md-fit"]:
|
||||
with open(output_file, "w") as f:
|
||||
f.write(result.markdown.fit_markdown)
|
||||
f.write(main_result.markdown.fit_markdown)
|
||||
|
||||
except Exception as e:
|
||||
raise click.ClickException(str(e))
|
||||
@@ -1353,9 +1401,11 @@ def profiles_cmd():
|
||||
@click.option("--question", "-q", help="Ask a question about the crawled content")
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
@click.option("--profile", "-p", help="Use a specific browser profile (by name)")
|
||||
@click.option("--deep-crawl", type=click.Choice(["bfs", "dfs", "best-first"]), help="Enable deep crawling with specified strategy")
|
||||
@click.option("--max-pages", type=int, default=10, help="Maximum number of pages to crawl in deep crawl mode")
|
||||
def default(url: str, example: bool, browser_config: str, crawler_config: str, filter_config: str,
|
||||
extraction_config: str, json_extract: str, schema: str, browser: Dict, crawler: Dict,
|
||||
output: str, bypass_cache: bool, question: str, verbose: bool, profile: str):
|
||||
output: str, bypass_cache: bool, question: str, verbose: bool, profile: str, deep_crawl: str, max_pages: int):
|
||||
"""Crawl4AI CLI - Web content extraction tool
|
||||
|
||||
Simple Usage:
|
||||
@@ -1405,7 +1455,9 @@ def default(url: str, example: bool, browser_config: str, crawler_config: str, f
|
||||
bypass_cache=bypass_cache,
|
||||
question=question,
|
||||
verbose=verbose,
|
||||
profile=profile
|
||||
profile=profile,
|
||||
deep_crawl=deep_crawl,
|
||||
max_pages=max_pages
|
||||
)
|
||||
|
||||
def main():
|
||||
|
||||
@@ -29,6 +29,14 @@ PROVIDER_MODELS = {
|
||||
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
PROVIDER_MODELS_PREFIXES = {
|
||||
"ollama": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq": os.getenv("GROQ_API_KEY"),
|
||||
"openai": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini": os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
|
||||
# Chunk token threshold
|
||||
CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens
|
||||
|
||||
@@ -27,8 +27,7 @@ import json
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from colorama import Fore, Style
|
||||
from .async_logger import AsyncLogger, LogLevel, LogColor
|
||||
|
||||
|
||||
class RelevantContentFilter(ABC):
|
||||
@@ -406,6 +405,7 @@ class BM25ContentFilter(RelevantContentFilter):
|
||||
user_query: str = None,
|
||||
bm25_threshold: float = 1.0,
|
||||
language: str = "english",
|
||||
use_stemming: bool = True,
|
||||
):
|
||||
"""
|
||||
Initializes the BM25ContentFilter class, if not provided, falls back to page metadata.
|
||||
@@ -417,9 +417,11 @@ class BM25ContentFilter(RelevantContentFilter):
|
||||
user_query (str): User query for filtering (optional).
|
||||
bm25_threshold (float): BM25 threshold for filtering (default: 1.0).
|
||||
language (str): Language for stemming (default: 'english').
|
||||
use_stemming (bool): Whether to apply stemming (default: True).
|
||||
"""
|
||||
super().__init__(user_query=user_query)
|
||||
self.bm25_threshold = bm25_threshold
|
||||
self.use_stemming = use_stemming
|
||||
self.priority_tags = {
|
||||
"h1": 5.0,
|
||||
"h2": 4.0,
|
||||
@@ -433,7 +435,7 @@ class BM25ContentFilter(RelevantContentFilter):
|
||||
"pre": 1.5,
|
||||
"th": 1.5, # Table headers
|
||||
}
|
||||
self.stemmer = stemmer(language)
|
||||
self.stemmer = stemmer(language) if use_stemming else None
|
||||
|
||||
def filter_content(self, html: str, min_word_threshold: int = None) -> List[str]:
|
||||
"""
|
||||
@@ -480,13 +482,19 @@ class BM25ContentFilter(RelevantContentFilter):
|
||||
# for _, chunk, _, _ in candidates]
|
||||
# tokenized_query = [ps.stem(word) for word in query.lower().split()]
|
||||
|
||||
tokenized_corpus = [
|
||||
[self.stemmer.stemWord(word) for word in chunk.lower().split()]
|
||||
for _, chunk, _, _ in candidates
|
||||
]
|
||||
tokenized_query = [
|
||||
self.stemmer.stemWord(word) for word in query.lower().split()
|
||||
]
|
||||
if self.use_stemming:
|
||||
tokenized_corpus = [
|
||||
[self.stemmer.stemWord(word) for word in chunk.lower().split()]
|
||||
for _, chunk, _, _ in candidates
|
||||
]
|
||||
tokenized_query = [
|
||||
self.stemmer.stemWord(word) for word in query.lower().split()
|
||||
]
|
||||
else:
|
||||
tokenized_corpus = [
|
||||
chunk.lower().split() for _, chunk, _, _ in candidates
|
||||
]
|
||||
tokenized_query = query.lower().split()
|
||||
|
||||
# tokenized_corpus = [[self.stemmer.stemWord(word) for word in tokenize_text(chunk.lower())]
|
||||
# for _, chunk, _, _ in candidates]
|
||||
@@ -846,8 +854,7 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
},
|
||||
colors={
|
||||
**AsyncLogger.DEFAULT_COLORS,
|
||||
LogLevel.INFO: Fore.MAGENTA
|
||||
+ Style.DIM, # Dimmed purple for LLM ops
|
||||
LogLevel.INFO: LogColor.DIM_MAGENTA # Dimmed purple for LLM ops
|
||||
},
|
||||
)
|
||||
else:
|
||||
@@ -892,7 +899,7 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
"Starting LLM markdown content filtering process",
|
||||
tag="LLM",
|
||||
params={"provider": self.llm_config.provider},
|
||||
colors={"provider": Fore.CYAN},
|
||||
colors={"provider": LogColor.CYAN},
|
||||
)
|
||||
|
||||
# Cache handling
|
||||
@@ -929,7 +936,7 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
"LLM markdown: Split content into {chunk_count} chunks",
|
||||
tag="CHUNK",
|
||||
params={"chunk_count": len(html_chunks)},
|
||||
colors={"chunk_count": Fore.YELLOW},
|
||||
colors={"chunk_count": LogColor.YELLOW},
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
@@ -1038,7 +1045,7 @@ class LLMContentFilter(RelevantContentFilter):
|
||||
"LLM markdown: Completed processing in {time:.2f}s",
|
||||
tag="LLM",
|
||||
params={"time": end_time - start_time},
|
||||
colors={"time": Fore.YELLOW},
|
||||
colors={"time": LogColor.YELLOW},
|
||||
)
|
||||
|
||||
result = ordered_results if ordered_results else []
|
||||
|
||||
@@ -23,11 +23,14 @@ from .utils import (
|
||||
is_external_url,
|
||||
get_base_domain,
|
||||
extract_metadata_using_lxml,
|
||||
extract_page_context,
|
||||
calculate_link_intrinsic_score,
|
||||
)
|
||||
from lxml import etree
|
||||
from lxml import html as lhtml
|
||||
from typing import List
|
||||
from .models import ScrapingResult, MediaItem, Link, Media, Links
|
||||
import copy
|
||||
|
||||
# Pre-compile regular expressions for Open Graph and Twitter metadata
|
||||
OG_REGEX = re.compile(r"^og:")
|
||||
@@ -48,7 +51,7 @@ def parse_srcset(s: str) -> List[Dict]:
|
||||
if len(parts) >= 1:
|
||||
url = parts[0]
|
||||
width = (
|
||||
parts[1].rstrip("w")
|
||||
parts[1].rstrip("w").split('.')[0]
|
||||
if len(parts) > 1 and parts[1].endswith("w")
|
||||
else None
|
||||
)
|
||||
@@ -128,7 +131,8 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
Returns:
|
||||
ScrapingResult: A structured result containing the scraped content.
|
||||
"""
|
||||
raw_result = self._scrap(url, html, is_async=False, **kwargs)
|
||||
actual_url = kwargs.get("redirected_url", url)
|
||||
raw_result = self._scrap(actual_url, html, is_async=False, **kwargs)
|
||||
if raw_result is None:
|
||||
return ScrapingResult(
|
||||
cleaned_html="",
|
||||
@@ -619,6 +623,9 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
return False
|
||||
|
||||
keep_element = False
|
||||
# Special case for table elements - always preserve structure
|
||||
if element.name in ["tr", "td", "th"]:
|
||||
keep_element = True
|
||||
|
||||
exclude_domains = kwargs.get("exclude_domains", [])
|
||||
# exclude_social_media_domains = kwargs.get('exclude_social_media_domains', set(SOCIAL_MEDIA_DOMAINS))
|
||||
@@ -713,13 +720,18 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
|
||||
# Check flag if we should remove external images
|
||||
if kwargs.get("exclude_external_images", False):
|
||||
element.decompose()
|
||||
return False
|
||||
# src_url_base = src.split('/')[2]
|
||||
# url_base = url.split('/')[2]
|
||||
# if url_base not in src_url_base:
|
||||
# element.decompose()
|
||||
# return False
|
||||
# Handle relative URLs (which are always from the same domain)
|
||||
if not src.startswith('http') and not src.startswith('//'):
|
||||
return True # Keep relative URLs
|
||||
|
||||
# For absolute URLs, compare the base domains using the existing function
|
||||
src_base_domain = get_base_domain(src)
|
||||
url_base_domain = get_base_domain(url)
|
||||
|
||||
# If the domains don't match and both are valid, the image is external
|
||||
if src_base_domain and url_base_domain and src_base_domain != url_base_domain:
|
||||
element.decompose()
|
||||
return False
|
||||
|
||||
# if kwargs.get('exclude_social_media_links', False):
|
||||
# if image_src_base_domain in exclude_social_media_domains:
|
||||
@@ -859,6 +871,8 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
parser_type = kwargs.get("parser", "lxml")
|
||||
soup = BeautifulSoup(html, parser_type)
|
||||
body = soup.body
|
||||
if body is None:
|
||||
raise Exception("'<body>' tag is not found in fetched html. Consider adding wait_for=\"css:body\" to wait for body tag to be loaded into DOM.")
|
||||
base_domain = get_base_domain(url)
|
||||
|
||||
# Early removal of all images if exclude_all_images is set
|
||||
@@ -897,23 +911,6 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
for element in body.select(excluded_selector):
|
||||
element.extract()
|
||||
|
||||
# if False and css_selector:
|
||||
# selected_elements = body.select(css_selector)
|
||||
# if not selected_elements:
|
||||
# return {
|
||||
# "markdown": "",
|
||||
# "cleaned_html": "",
|
||||
# "success": True,
|
||||
# "media": {"images": [], "videos": [], "audios": []},
|
||||
# "links": {"internal": [], "external": []},
|
||||
# "metadata": {},
|
||||
# "message": f"No elements found for CSS selector: {css_selector}",
|
||||
# }
|
||||
# # raise InvalidCSSSelectorError(f"Invalid CSS selector, No elements found for CSS selector: {css_selector}")
|
||||
# body = soup.new_tag("div")
|
||||
# for el in selected_elements:
|
||||
# body.append(el)
|
||||
|
||||
content_element = None
|
||||
if target_elements:
|
||||
try:
|
||||
@@ -922,12 +919,12 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
for_content_targeted_element.extend(body.select(target_element))
|
||||
content_element = soup.new_tag("div")
|
||||
for el in for_content_targeted_element:
|
||||
content_element.append(el)
|
||||
content_element.append(copy.deepcopy(el))
|
||||
except Exception as e:
|
||||
self._log("error", f"Error with target element detection: {str(e)}", "SCRAPE")
|
||||
return None
|
||||
else:
|
||||
content_element = body
|
||||
content_element = body
|
||||
|
||||
kwargs["exclude_social_media_domains"] = set(
|
||||
kwargs.get("exclude_social_media_domains", []) + SOCIAL_MEDIA_DOMAINS
|
||||
@@ -954,6 +951,72 @@ class WebScrapingStrategy(ContentScrapingStrategy):
|
||||
# Update the links dictionary with unique links
|
||||
links["internal"] = list(internal_links_dict.values())
|
||||
links["external"] = list(external_links_dict.values())
|
||||
|
||||
# Extract head content for links if configured
|
||||
link_preview_config = kwargs.get("link_preview_config")
|
||||
if link_preview_config is not None:
|
||||
try:
|
||||
import asyncio
|
||||
from .link_preview import LinkPreview
|
||||
from .models import Links, Link
|
||||
|
||||
verbose = link_preview_config.verbose
|
||||
|
||||
if verbose:
|
||||
self._log("info", "Starting link head extraction for {internal} internal and {external} external links",
|
||||
params={"internal": len(links["internal"]), "external": len(links["external"])}, tag="LINK_EXTRACT")
|
||||
|
||||
# Convert dict links to Link objects
|
||||
internal_links = [Link(**link_data) for link_data in links["internal"]]
|
||||
external_links = [Link(**link_data) for link_data in links["external"]]
|
||||
links_obj = Links(internal=internal_links, external=external_links)
|
||||
|
||||
# Create a config object for LinkPreview
|
||||
class TempCrawlerRunConfig:
|
||||
def __init__(self, link_config, score_links):
|
||||
self.link_preview_config = link_config
|
||||
self.score_links = score_links
|
||||
|
||||
config = TempCrawlerRunConfig(link_preview_config, kwargs.get("score_links", False))
|
||||
|
||||
# Extract head content (run async operation in sync context)
|
||||
async def extract_links():
|
||||
async with LinkPreview(self.logger) as extractor:
|
||||
return await extractor.extract_link_heads(links_obj, config)
|
||||
|
||||
# Run the async operation
|
||||
try:
|
||||
# Check if we're already in an async context
|
||||
loop = asyncio.get_running_loop()
|
||||
# If we're in an async context, we need to run in a thread
|
||||
import concurrent.futures
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(asyncio.run, extract_links())
|
||||
updated_links = future.result()
|
||||
except RuntimeError:
|
||||
# No running loop, we can use asyncio.run directly
|
||||
updated_links = asyncio.run(extract_links())
|
||||
|
||||
# Convert back to dict format
|
||||
links["internal"] = [link.dict() for link in updated_links.internal]
|
||||
links["external"] = [link.dict() for link in updated_links.external]
|
||||
|
||||
if verbose:
|
||||
successful_internal = len([l for l in updated_links.internal if l.head_extraction_status == "valid"])
|
||||
successful_external = len([l for l in updated_links.external if l.head_extraction_status == "valid"])
|
||||
self._log("info", "Link head extraction completed: {internal_success}/{internal_total} internal, {external_success}/{external_total} external",
|
||||
params={
|
||||
"internal_success": successful_internal,
|
||||
"internal_total": len(updated_links.internal),
|
||||
"external_success": successful_external,
|
||||
"external_total": len(updated_links.external)
|
||||
}, tag="LINK_EXTRACT")
|
||||
else:
|
||||
self._log("info", "Link head extraction completed successfully", tag="LINK_EXTRACT")
|
||||
|
||||
except Exception as e:
|
||||
self._log("error", f"Link head extraction failed: {str(e)}", tag="LINK_EXTRACT")
|
||||
# Continue with original links if extraction fails
|
||||
|
||||
# # Process images using ThreadPoolExecutor
|
||||
imgs = body.find_all("img")
|
||||
@@ -1047,6 +1110,7 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
media: Dict[str, List],
|
||||
internal_links_dict: Dict[str, Any],
|
||||
external_links_dict: Dict[str, Any],
|
||||
page_context: dict = None,
|
||||
**kwargs,
|
||||
) -> bool:
|
||||
base_domain = kwargs.get("base_domain", get_base_domain(url))
|
||||
@@ -1066,6 +1130,25 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
"title": link.get("title", "").strip(),
|
||||
"base_domain": base_domain,
|
||||
}
|
||||
|
||||
# Add intrinsic scoring if enabled
|
||||
if kwargs.get("score_links", False) and page_context is not None:
|
||||
try:
|
||||
intrinsic_score = calculate_link_intrinsic_score(
|
||||
link_text=link_data["text"],
|
||||
url=normalized_href,
|
||||
title_attr=link_data["title"],
|
||||
class_attr=link.get("class", ""),
|
||||
rel_attr=link.get("rel", ""),
|
||||
page_context=page_context
|
||||
)
|
||||
link_data["intrinsic_score"] = intrinsic_score
|
||||
except Exception:
|
||||
# Fail gracefully - assign default score
|
||||
link_data["intrinsic_score"] = 0
|
||||
else:
|
||||
# No scoring enabled - assign infinity (all links equal priority)
|
||||
link_data["intrinsic_score"] = 0
|
||||
|
||||
is_external = is_external_url(normalized_href, base_domain)
|
||||
if is_external:
|
||||
@@ -1308,6 +1391,9 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
"source",
|
||||
"track",
|
||||
"wbr",
|
||||
"tr",
|
||||
"td",
|
||||
"th",
|
||||
}
|
||||
|
||||
for el in reversed(list(root.iterdescendants())):
|
||||
@@ -1498,6 +1584,33 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
|
||||
base_domain = get_base_domain(url)
|
||||
|
||||
# Extract page context for link scoring (if enabled) - do this BEFORE any removals
|
||||
page_context = None
|
||||
if kwargs.get("score_links", False):
|
||||
try:
|
||||
# Extract title
|
||||
title_elements = doc.xpath('//title')
|
||||
page_title = title_elements[0].text_content() if title_elements else ""
|
||||
|
||||
# Extract headlines
|
||||
headlines = []
|
||||
for tag in ['h1', 'h2', 'h3']:
|
||||
elements = doc.xpath(f'//{tag}')
|
||||
for el in elements:
|
||||
text = el.text_content().strip()
|
||||
if text:
|
||||
headlines.append(text)
|
||||
headlines_text = ' '.join(headlines)
|
||||
|
||||
# Extract meta description
|
||||
meta_desc_elements = doc.xpath('//meta[@name="description"]/@content')
|
||||
meta_description = meta_desc_elements[0] if meta_desc_elements else ""
|
||||
|
||||
# Create page context
|
||||
page_context = extract_page_context(page_title, headlines_text, meta_description, url)
|
||||
except Exception:
|
||||
page_context = {} # Fail gracefully
|
||||
|
||||
# Early removal of all images if exclude_all_images is set
|
||||
# This is more efficient in lxml as we remove elements before any processing
|
||||
if kwargs.get("exclude_all_images", False):
|
||||
@@ -1540,26 +1653,6 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
self._log("error", f"Error extracting metadata: {str(e)}", "SCRAPE")
|
||||
meta = {}
|
||||
|
||||
# Handle CSS selector targeting
|
||||
# if css_selector:
|
||||
# try:
|
||||
# selected_elements = body.cssselect(css_selector)
|
||||
# if not selected_elements:
|
||||
# return {
|
||||
# "markdown": "",
|
||||
# "cleaned_html": "",
|
||||
# "success": True,
|
||||
# "media": {"images": [], "videos": [], "audios": []},
|
||||
# "links": {"internal": [], "external": []},
|
||||
# "metadata": meta,
|
||||
# "message": f"No elements found for CSS selector: {css_selector}",
|
||||
# }
|
||||
# body = lhtml.Element("div")
|
||||
# body.extend(selected_elements)
|
||||
# except Exception as e:
|
||||
# self._log("error", f"Error with CSS selector: {str(e)}", "SCRAPE")
|
||||
# return None
|
||||
|
||||
content_element = None
|
||||
if target_elements:
|
||||
try:
|
||||
@@ -1567,7 +1660,7 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
for target_element in target_elements:
|
||||
for_content_targeted_element.extend(body.cssselect(target_element))
|
||||
content_element = lhtml.Element("div")
|
||||
content_element.extend(for_content_targeted_element)
|
||||
content_element.extend(copy.deepcopy(for_content_targeted_element))
|
||||
except Exception as e:
|
||||
self._log("error", f"Error with target element detection: {str(e)}", "SCRAPE")
|
||||
return None
|
||||
@@ -1606,6 +1699,7 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
media,
|
||||
internal_links_dict,
|
||||
external_links_dict,
|
||||
page_context=page_context,
|
||||
base_domain=base_domain,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -1636,7 +1730,7 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
# Remove empty elements
|
||||
self.remove_empty_elements_fast(body, 1)
|
||||
|
||||
# Remvoe unneeded attributes
|
||||
# Remove unneeded attributes
|
||||
self.remove_unwanted_attributes_fast(
|
||||
body, keep_data_attributes=kwargs.get("keep_data_attributes", False)
|
||||
)
|
||||
@@ -1650,14 +1744,84 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy):
|
||||
method="html",
|
||||
with_tail=False,
|
||||
).strip()
|
||||
|
||||
# Create links dictionary in the format expected by LinkPreview
|
||||
links = {
|
||||
"internal": list(internal_links_dict.values()),
|
||||
"external": list(external_links_dict.values()),
|
||||
}
|
||||
|
||||
# Extract head content for links if configured
|
||||
link_preview_config = kwargs.get("link_preview_config")
|
||||
if link_preview_config is not None:
|
||||
try:
|
||||
import asyncio
|
||||
from .link_preview import LinkPreview
|
||||
from .models import Links, Link
|
||||
|
||||
verbose = link_preview_config.verbose
|
||||
|
||||
if verbose:
|
||||
self._log("info", "Starting link head extraction for {internal} internal and {external} external links",
|
||||
params={"internal": len(links["internal"]), "external": len(links["external"])}, tag="LINK_EXTRACT")
|
||||
|
||||
# Convert dict links to Link objects
|
||||
internal_links = [Link(**link_data) for link_data in links["internal"]]
|
||||
external_links = [Link(**link_data) for link_data in links["external"]]
|
||||
links_obj = Links(internal=internal_links, external=external_links)
|
||||
|
||||
# Create a config object for LinkPreview
|
||||
class TempCrawlerRunConfig:
|
||||
def __init__(self, link_config, score_links):
|
||||
self.link_preview_config = link_config
|
||||
self.score_links = score_links
|
||||
|
||||
config = TempCrawlerRunConfig(link_preview_config, kwargs.get("score_links", False))
|
||||
|
||||
# Extract head content (run async operation in sync context)
|
||||
async def extract_links():
|
||||
async with LinkPreview(self.logger) as extractor:
|
||||
return await extractor.extract_link_heads(links_obj, config)
|
||||
|
||||
# Run the async operation
|
||||
try:
|
||||
# Check if we're already in an async context
|
||||
loop = asyncio.get_running_loop()
|
||||
# If we're in an async context, we need to run in a thread
|
||||
import concurrent.futures
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(asyncio.run, extract_links())
|
||||
updated_links = future.result()
|
||||
except RuntimeError:
|
||||
# No running loop, we can use asyncio.run directly
|
||||
updated_links = asyncio.run(extract_links())
|
||||
|
||||
# Convert back to dict format
|
||||
links["internal"] = [link.dict() for link in updated_links.internal]
|
||||
links["external"] = [link.dict() for link in updated_links.external]
|
||||
|
||||
if verbose:
|
||||
successful_internal = len([l for l in updated_links.internal if l.head_extraction_status == "valid"])
|
||||
successful_external = len([l for l in updated_links.external if l.head_extraction_status == "valid"])
|
||||
self._log("info", "Link head extraction completed: {internal_success}/{internal_total} internal, {external_success}/{external_total} external",
|
||||
params={
|
||||
"internal_success": successful_internal,
|
||||
"internal_total": len(updated_links.internal),
|
||||
"external_success": successful_external,
|
||||
"external_total": len(updated_links.external)
|
||||
}, tag="LINK_EXTRACT")
|
||||
else:
|
||||
self._log("info", "Link head extraction completed successfully", tag="LINK_EXTRACT")
|
||||
|
||||
except Exception as e:
|
||||
self._log("error", f"Error during link head extraction: {str(e)}", tag="LINK_EXTRACT")
|
||||
# Continue with original links if head extraction fails
|
||||
|
||||
return {
|
||||
"cleaned_html": cleaned_html,
|
||||
"success": success,
|
||||
"media": media,
|
||||
"links": {
|
||||
"internal": list(internal_links_dict.values()),
|
||||
"external": list(external_links_dict.values()),
|
||||
},
|
||||
"links": links,
|
||||
"metadata": meta,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
from pathlib import Path
|
||||
import json
|
||||
import os
|
||||
|
||||
@@ -11,6 +11,7 @@ from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
from ..utils import normalize_url_for_deep_crawl
|
||||
|
||||
from math import inf as infinity
|
||||
|
||||
@@ -106,13 +107,14 @@ class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||
valid_links = []
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
if url in visited:
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, new_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
valid_links.append(url)
|
||||
valid_links.append(base_url)
|
||||
|
||||
# If we have more valid links than capacity, limit them
|
||||
if len(valid_links) > remaining_capacity:
|
||||
@@ -148,6 +150,14 @@ class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
# Calculate how many more URLs we can process in this batch
|
||||
remaining = self.max_pages - self._pages_crawled
|
||||
batch_size = min(BATCH_SIZE, remaining)
|
||||
if batch_size <= 0:
|
||||
# No more pages to crawl
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
batch: List[Tuple[float, int, str, Optional[str]]] = []
|
||||
# Retrieve up to BATCH_SIZE items from the priority queue.
|
||||
for _ in range(BATCH_SIZE):
|
||||
@@ -182,6 +192,10 @@ class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
yield result
|
||||
|
||||
|
||||
@@ -117,7 +117,8 @@ class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
|
||||
visited.add(base_url)
|
||||
valid_links.append((base_url, score))
|
||||
|
||||
# If we have more valid links than capacity, sort by score and take the top ones
|
||||
@@ -156,9 +157,13 @@ class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
results: List[CrawlResult] = []
|
||||
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
# Check if we've already reached max_pages before starting a new level
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
visited.update(urls)
|
||||
|
||||
# Clone the config to disable deep crawling recursion and enforce batch mode.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
@@ -221,6 +226,10 @@ class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
# Count only successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
results_count += 1
|
||||
yield result
|
||||
|
||||
@@ -49,6 +49,10 @@ class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
# Only discover links from successful crawls
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
@@ -94,6 +98,10 @@ class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
# and only discover links from successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||
|
||||
@@ -227,10 +227,21 @@ class URLPatternFilter(URLFilter):
|
||||
# Prefix check (/foo/*)
|
||||
if self._simple_prefixes:
|
||||
path = url.split("?")[0]
|
||||
if any(path.startswith(p) for p in self._simple_prefixes):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
# if any(path.startswith(p) for p in self._simple_prefixes):
|
||||
# result = True
|
||||
# self._update_stats(result)
|
||||
# return not result if self._reverse else result
|
||||
####
|
||||
# Modified the prefix matching logic to ensure path boundary checking:
|
||||
# - Check if the matched prefix is followed by a path separator (`/`), query parameter (`?`), fragment (`#`), or is at the end of the path
|
||||
# - This ensures `/api/` only matches complete path segments, not substrings like `/apiv2/`
|
||||
####
|
||||
for prefix in self._simple_prefixes:
|
||||
if path.startswith(prefix):
|
||||
if len(path) == len(prefix) or path[len(prefix)] in ['/', '?', '#']:
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Complex patterns
|
||||
if self._path_patterns:
|
||||
@@ -337,6 +348,15 @@ class ContentTypeFilter(URLFilter):
|
||||
"sqlite": "application/vnd.sqlite3",
|
||||
# Placeholder
|
||||
"unknown": "application/octet-stream", # Fallback for unknown file types
|
||||
# php
|
||||
"php": "application/x-httpd-php",
|
||||
"php3": "application/x-httpd-php",
|
||||
"php4": "application/x-httpd-php",
|
||||
"php5": "application/x-httpd-php",
|
||||
"php7": "application/x-httpd-php",
|
||||
"phtml": "application/x-httpd-php",
|
||||
"phps": "application/x-httpd-php-source",
|
||||
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -73,6 +73,8 @@ class Crawl4aiDockerClient:
|
||||
def _prepare_request(self, urls: List[str], browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None) -> Dict[str, Any]:
|
||||
"""Prepare request data from configs."""
|
||||
if self._token:
|
||||
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
|
||||
return {
|
||||
"urls": urls,
|
||||
"browser_config": browser_config.dump() if browser_config else {},
|
||||
@@ -103,8 +105,6 @@ class Crawl4aiDockerClient:
|
||||
crawler_config: Optional[CrawlerRunConfig] = None
|
||||
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
|
||||
"""Execute a crawl operation."""
|
||||
if not self._token:
|
||||
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||
await self._check_server()
|
||||
|
||||
data = self._prepare_request(urls, browser_config, crawler_config)
|
||||
@@ -140,8 +140,6 @@ class Crawl4aiDockerClient:
|
||||
|
||||
async def get_schema(self) -> Dict[str, Any]:
|
||||
"""Retrieve configuration schemas."""
|
||||
if not self._token:
|
||||
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||
response = await self._request("GET", "/schema")
|
||||
return response.json()
|
||||
|
||||
@@ -167,4 +165,4 @@ async def main():
|
||||
print(schema)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
asyncio.run(main())
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import inspect
|
||||
from typing import Any, List, Dict, Optional
|
||||
from typing import Any, List, Dict, Optional, Tuple, Pattern, Union
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import json
|
||||
import time
|
||||
from enum import IntFlag, auto
|
||||
|
||||
from .prompts import PROMPT_EXTRACT_BLOCKS, PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION, PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION, JSON_SCHEMA_BUILDER_XPATH, PROMPT_EXTRACT_INFERRED_SCHEMA
|
||||
from .config import (
|
||||
@@ -540,7 +541,7 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
api_token: The API token for the provider.
|
||||
base_url: The base URL for the API request.
|
||||
api_base: The base URL for the API request.
|
||||
extra_args: Additional arguments for the API request, such as temprature, max_tokens, etc.
|
||||
extra_args: Additional arguments for the API request, such as temperature, max_tokens, etc.
|
||||
"""
|
||||
super().__init__( input_format=input_format, **kwargs)
|
||||
self.llm_config = llm_config
|
||||
@@ -655,11 +656,11 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
|
||||
try:
|
||||
response = response.choices[0].message.content
|
||||
content = response.choices[0].message.content
|
||||
blocks = None
|
||||
|
||||
if self.force_json_response:
|
||||
blocks = json.loads(response)
|
||||
blocks = json.loads(content)
|
||||
if isinstance(blocks, dict):
|
||||
# If it has only one key which calue is list then assign that to blocks, exampled: {"news": [..]}
|
||||
if len(blocks) == 1 and isinstance(list(blocks.values())[0], list):
|
||||
@@ -672,7 +673,7 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
blocks = blocks
|
||||
else:
|
||||
# blocks = extract_xml_data(["blocks"], response.choices[0].message.content)["blocks"]
|
||||
blocks = extract_xml_data(["blocks"], response)["blocks"]
|
||||
blocks = extract_xml_data(["blocks"], content)["blocks"]
|
||||
blocks = json.loads(blocks)
|
||||
|
||||
for block in blocks:
|
||||
@@ -1167,7 +1168,11 @@ In this scenario, use your best judgment to generate the schema. You need to exa
|
||||
elif not query and not target_json_example:
|
||||
user_message["content"] += """IMPORTANT: Since we neither have a query nor an example, it is crucial to rely solely on the HTML content provided. Leverage your expertise to determine the schema based on the repetitive patterns observed in the content."""
|
||||
|
||||
user_message["content"] += """IMPORTANT: Ensure your schema remains reliable by avoiding selectors that appear to generate dynamically and are not dependable. You want a reliable schema, as it consistently returns the same data even after many page reloads.
|
||||
user_message["content"] += """IMPORTANT:
|
||||
0/ Ensure your schema remains reliable by avoiding selectors that appear to generate dynamically and are not dependable. You want a reliable schema, as it consistently returns the same data even after many page reloads.
|
||||
1/ DO NOT USE use base64 kind of classes, they are temporary and not reliable.
|
||||
2/ Every selector must refer to only one unique element. You should ensure your selector points to a single element and is unique to the place that contains the information. You have to use available techniques based on CSS or XPATH requested schema to make sure your selector is unique and also not fragile, meaning if we reload the page now or in the future, the selector should remain reliable.
|
||||
3/ Do not use Regex as much as possible.
|
||||
|
||||
Analyze the HTML and generate a JSON schema that follows the specified format. Only output valid JSON schema, nothing else.
|
||||
"""
|
||||
@@ -1668,3 +1673,303 @@ class JsonXPathExtractionStrategy(JsonElementExtractionStrategy):
|
||||
def _get_element_attribute(self, element, attribute: str):
|
||||
return element.get(attribute)
|
||||
|
||||
"""
|
||||
RegexExtractionStrategy
|
||||
Fast, zero-LLM extraction of common entities via regular expressions.
|
||||
"""
|
||||
|
||||
_CTRL = {c: rf"\x{ord(c):02x}" for c in map(chr, range(32)) if c not in "\t\n\r"}
|
||||
|
||||
_WB_FIX = re.compile(r"\x08") # stray back-space → word-boundary
|
||||
_NEEDS_ESCAPE = re.compile(r"(?<!\\)\\(?![\\u])") # lone backslash
|
||||
|
||||
def _sanitize_schema(schema: Dict[str, str]) -> Dict[str, str]:
|
||||
"""Fix common JSON-escape goofs coming from LLMs or manual edits."""
|
||||
safe = {}
|
||||
for label, pat in schema.items():
|
||||
# 1️⃣ replace accidental control chars (inc. the infamous back-space)
|
||||
pat = _WB_FIX.sub(r"\\b", pat).translate(_CTRL)
|
||||
|
||||
# 2️⃣ double any single backslash that JSON kept single
|
||||
pat = _NEEDS_ESCAPE.sub(r"\\\\", pat)
|
||||
|
||||
# 3️⃣ quick sanity compile
|
||||
try:
|
||||
re.compile(pat)
|
||||
except re.error as e:
|
||||
raise ValueError(f"Regex for '{label}' won’t compile after fix: {e}") from None
|
||||
|
||||
safe[label] = pat
|
||||
return safe
|
||||
|
||||
|
||||
class RegexExtractionStrategy(ExtractionStrategy):
|
||||
"""
|
||||
A lean strategy that finds e-mails, phones, URLs, dates, money, etc.,
|
||||
using nothing but pre-compiled regular expressions.
|
||||
|
||||
Extraction returns::
|
||||
|
||||
{
|
||||
"url": "<page-url>",
|
||||
"label": "<pattern-label>",
|
||||
"value": "<matched-string>",
|
||||
"span": [start, end]
|
||||
}
|
||||
|
||||
Only `generate_schema()` touches an LLM, extraction itself is pure Python.
|
||||
"""
|
||||
|
||||
# -------------------------------------------------------------- #
|
||||
# Built-in patterns exposed as IntFlag so callers can bit-OR them
|
||||
# -------------------------------------------------------------- #
|
||||
class _B(IntFlag):
|
||||
EMAIL = auto()
|
||||
PHONE_INTL = auto()
|
||||
PHONE_US = auto()
|
||||
URL = auto()
|
||||
IPV4 = auto()
|
||||
IPV6 = auto()
|
||||
UUID = auto()
|
||||
CURRENCY = auto()
|
||||
PERCENTAGE = auto()
|
||||
NUMBER = auto()
|
||||
DATE_ISO = auto()
|
||||
DATE_US = auto()
|
||||
TIME_24H = auto()
|
||||
POSTAL_US = auto()
|
||||
POSTAL_UK = auto()
|
||||
HTML_COLOR_HEX = auto()
|
||||
TWITTER_HANDLE = auto()
|
||||
HASHTAG = auto()
|
||||
MAC_ADDR = auto()
|
||||
IBAN = auto()
|
||||
CREDIT_CARD = auto()
|
||||
NOTHING = auto()
|
||||
ALL = (
|
||||
EMAIL | PHONE_INTL | PHONE_US | URL | IPV4 | IPV6 | UUID
|
||||
| CURRENCY | PERCENTAGE | NUMBER | DATE_ISO | DATE_US | TIME_24H
|
||||
| POSTAL_US | POSTAL_UK | HTML_COLOR_HEX | TWITTER_HANDLE
|
||||
| HASHTAG | MAC_ADDR | IBAN | CREDIT_CARD
|
||||
)
|
||||
|
||||
# user-friendly aliases (RegexExtractionStrategy.Email, .IPv4, …)
|
||||
Email = _B.EMAIL
|
||||
PhoneIntl = _B.PHONE_INTL
|
||||
PhoneUS = _B.PHONE_US
|
||||
Url = _B.URL
|
||||
IPv4 = _B.IPV4
|
||||
IPv6 = _B.IPV6
|
||||
Uuid = _B.UUID
|
||||
Currency = _B.CURRENCY
|
||||
Percentage = _B.PERCENTAGE
|
||||
Number = _B.NUMBER
|
||||
DateIso = _B.DATE_ISO
|
||||
DateUS = _B.DATE_US
|
||||
Time24h = _B.TIME_24H
|
||||
PostalUS = _B.POSTAL_US
|
||||
PostalUK = _B.POSTAL_UK
|
||||
HexColor = _B.HTML_COLOR_HEX
|
||||
TwitterHandle = _B.TWITTER_HANDLE
|
||||
Hashtag = _B.HASHTAG
|
||||
MacAddr = _B.MAC_ADDR
|
||||
Iban = _B.IBAN
|
||||
CreditCard = _B.CREDIT_CARD
|
||||
All = _B.ALL
|
||||
Nothing = _B(0) # no patterns
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Built-in pattern catalog
|
||||
# ------------------------------------------------------------------ #
|
||||
DEFAULT_PATTERNS: Dict[str, str] = {
|
||||
# Communication
|
||||
"email": r"[\w.+-]+@[\w-]+\.[\w.-]+",
|
||||
"phone_intl": r"\+?\d[\d .()-]{7,}\d",
|
||||
"phone_us": r"\(?\d{3}\)?[ -. ]?\d{3}[ -. ]?\d{4}",
|
||||
# Web
|
||||
"url": r"https?://[^\s\"'<>]+",
|
||||
"ipv4": r"(?:\d{1,3}\.){3}\d{1,3}",
|
||||
"ipv6": r"[A-F0-9]{1,4}(?::[A-F0-9]{1,4}){7}",
|
||||
# IDs
|
||||
"uuid": r"[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}",
|
||||
# Money / numbers
|
||||
"currency": r"(?:USD|EUR|RM|\$|€|£)\s?\d+(?:[.,]\d{2})?",
|
||||
"percentage": r"\d+(?:\.\d+)?%",
|
||||
"number": r"\b\d{1,3}(?:[,.\s]\d{3})*(?:\.\d+)?\b",
|
||||
# Dates / Times
|
||||
"date_iso": r"\d{4}-\d{2}-\d{2}",
|
||||
"date_us": r"\d{1,2}/\d{1,2}/\d{2,4}",
|
||||
"time_24h": r"\b(?:[01]?\d|2[0-3]):[0-5]\d(?:[:.][0-5]\d)?\b",
|
||||
# Misc
|
||||
"postal_us": r"\b\d{5}(?:-\d{4})?\b",
|
||||
"postal_uk": r"\b[A-Z]{1,2}\d[A-Z\d]? ?\d[A-Z]{2}\b",
|
||||
"html_color_hex": r"#[0-9A-Fa-f]{6}\b",
|
||||
"twitter_handle": r"@[\w]{1,15}",
|
||||
"hashtag": r"#[\w-]+",
|
||||
"mac_addr": r"(?:[0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}",
|
||||
"iban": r"[A-Z]{2}\d{2}[A-Z0-9]{11,30}",
|
||||
"credit_card": r"\b(?:4\d{12}(?:\d{3})?|5[1-5]\d{14}|3[47]\d{13}|6(?:011|5\d{2})\d{12})\b",
|
||||
}
|
||||
|
||||
_FLAGS = re.IGNORECASE | re.MULTILINE
|
||||
_UNWANTED_PROPS = {
|
||||
"provider": "Use llm_config instead",
|
||||
"api_token": "Use llm_config instead",
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Construction
|
||||
# ------------------------------------------------------------------ #
|
||||
def __init__(
|
||||
self,
|
||||
pattern: "_B" = _B.NOTHING,
|
||||
*,
|
||||
custom: Optional[Union[Dict[str, str], List[Tuple[str, str]]]] = None,
|
||||
input_format: str = "fit_html",
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
patterns: Custom patterns overriding or extending defaults.
|
||||
Dict[label, regex] or list[tuple(label, regex)].
|
||||
input_format: "html", "markdown" or "text".
|
||||
**kwargs: Forwarded to ExtractionStrategy.
|
||||
"""
|
||||
super().__init__(input_format=input_format, **kwargs)
|
||||
|
||||
# 1️⃣ take only the requested built-ins
|
||||
merged: Dict[str, str] = {
|
||||
key: rx
|
||||
for key, rx in self.DEFAULT_PATTERNS.items()
|
||||
if getattr(self._B, key.upper()).value & pattern
|
||||
}
|
||||
|
||||
# 2️⃣ apply user overrides / additions
|
||||
if custom:
|
||||
if isinstance(custom, dict):
|
||||
merged.update(custom)
|
||||
else: # iterable of (label, regex)
|
||||
merged.update({lbl: rx for lbl, rx in custom})
|
||||
|
||||
self._compiled: Dict[str, Pattern] = {
|
||||
lbl: re.compile(rx, self._FLAGS) for lbl, rx in merged.items()
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Extraction
|
||||
# ------------------------------------------------------------------ #
|
||||
def extract(self, url: str, content: str, *q, **kw) -> List[Dict[str, Any]]:
|
||||
# text = self._plain_text(html)
|
||||
out: List[Dict[str, Any]] = []
|
||||
|
||||
for label, cre in self._compiled.items():
|
||||
for m in cre.finditer(content):
|
||||
out.append(
|
||||
{
|
||||
"url": url,
|
||||
"label": label,
|
||||
"value": m.group(0),
|
||||
"span": [m.start(), m.end()],
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------ #
|
||||
def _plain_text(self, content: str) -> str:
|
||||
if self.input_format == "text":
|
||||
return content
|
||||
return BeautifulSoup(content, "lxml").get_text(" ", strip=True)
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# LLM-assisted pattern generator
|
||||
# ------------------------------------------------------------------ #
|
||||
# ------------------------------------------------------------------ #
|
||||
# LLM-assisted one-off pattern builder
|
||||
# ------------------------------------------------------------------ #
|
||||
@staticmethod
|
||||
def generate_pattern(
|
||||
label: str,
|
||||
html: str,
|
||||
*,
|
||||
query: Optional[str] = None,
|
||||
examples: Optional[List[str]] = None,
|
||||
llm_config: Optional[LLMConfig] = None,
|
||||
**kwargs,
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Ask an LLM for a single page-specific regex and return
|
||||
{label: pattern} ── ready for RegexExtractionStrategy(custom=…)
|
||||
"""
|
||||
|
||||
# ── guard deprecated kwargs
|
||||
for k in RegexExtractionStrategy._UNWANTED_PROPS:
|
||||
if k in kwargs:
|
||||
raise AttributeError(
|
||||
f"{k} is deprecated, {RegexExtractionStrategy._UNWANTED_PROPS[k]}"
|
||||
)
|
||||
|
||||
# ── default LLM config
|
||||
if llm_config is None:
|
||||
llm_config = create_llm_config()
|
||||
|
||||
# ── system prompt – hardened
|
||||
system_msg = (
|
||||
"You are an expert Python-regex engineer.\n"
|
||||
f"Return **one** JSON object whose single key is exactly \"{label}\", "
|
||||
"and whose value is a raw-string regex pattern that works with "
|
||||
"the standard `re` module in Python.\n\n"
|
||||
"Strict rules (obey every bullet):\n"
|
||||
"• If a *user query* is supplied, treat it as the precise semantic target and optimise the "
|
||||
" pattern to capture ONLY text that answers that query. If the query conflicts with the "
|
||||
" sample HTML, the HTML wins.\n"
|
||||
"• Tailor the pattern to the *sample HTML* – reproduce its exact punctuation, spacing, "
|
||||
" symbols, capitalisation, etc. Do **NOT** invent a generic form.\n"
|
||||
"• Keep it minimal and fast: avoid unnecessary capturing, prefer non-capturing `(?: … )`, "
|
||||
" and guard against catastrophic backtracking.\n"
|
||||
"• Anchor with `^`, `$`, or `\\b` only when it genuinely improves precision.\n"
|
||||
"• Use inline flags like `(?i)` when needed; no verbose flag comments.\n"
|
||||
"• Output must be valid JSON – no markdown, code fences, comments, or extra keys.\n"
|
||||
"• The regex value must be a Python string literal: **double every backslash** "
|
||||
"(e.g. `\\\\b`, `\\\\d`, `\\\\\\\\`).\n\n"
|
||||
"Example valid output:\n"
|
||||
f"{{\"{label}\": \"(?:RM|rm)\\\\s?\\\\d{{1,3}}(?:,\\\\d{{3}})*(?:\\\\.\\\\d{{2}})?\"}}"
|
||||
)
|
||||
|
||||
# ── user message: cropped HTML + optional hints
|
||||
user_parts = ["```html", html[:5000], "```"] # protect token budget
|
||||
if query:
|
||||
user_parts.append(f"\n\n## Query\n{query.strip()}")
|
||||
if examples:
|
||||
user_parts.append("## Examples\n" + "\n".join(examples[:20]))
|
||||
user_msg = "\n\n".join(user_parts)
|
||||
|
||||
# ── LLM call (with retry/backoff)
|
||||
resp = perform_completion_with_backoff(
|
||||
provider=llm_config.provider,
|
||||
prompt_with_variables="\n\n".join([system_msg, user_msg]),
|
||||
json_response=True,
|
||||
api_token=llm_config.api_token,
|
||||
base_url=llm_config.base_url,
|
||||
extra_args=kwargs,
|
||||
)
|
||||
|
||||
# ── clean & load JSON (fix common escape mistakes *before* json.loads)
|
||||
raw = resp.choices[0].message.content
|
||||
raw = raw.replace("\x08", "\\b") # stray back-space → \b
|
||||
raw = re.sub(r'(?<!\\)\\(?![\\u"])', r"\\\\", raw) # lone \ → \\
|
||||
|
||||
try:
|
||||
pattern_dict = json.loads(raw)
|
||||
except Exception as exc:
|
||||
raise ValueError(f"LLM did not return valid JSON: {raw}") from exc
|
||||
|
||||
# quick sanity-compile
|
||||
for lbl, pat in pattern_dict.items():
|
||||
try:
|
||||
re.compile(pat)
|
||||
except re.error as e:
|
||||
raise ValueError(f"Invalid regex for '{lbl}': {e}") from None
|
||||
|
||||
return pattern_dict
|
||||
|
||||
@@ -115,5 +115,6 @@ async () => {
|
||||
document.body.style.overflow = "auto";
|
||||
|
||||
// Wait a bit for any animations to complete
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
document.body.scrollIntoView(false);
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
};
|
||||
|
||||
395
crawl4ai/link_preview.py
Normal file
395
crawl4ai/link_preview.py
Normal file
@@ -0,0 +1,395 @@
|
||||
"""
|
||||
Link Extractor for Crawl4AI
|
||||
|
||||
Extracts head content from links discovered during crawling using URLSeeder's
|
||||
efficient parallel processing and caching infrastructure.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import fnmatch
|
||||
from typing import Dict, List, Optional, Any
|
||||
from .async_logger import AsyncLogger
|
||||
from .async_url_seeder import AsyncUrlSeeder
|
||||
from .async_configs import SeedingConfig, CrawlerRunConfig
|
||||
from .models import Links, Link
|
||||
from .utils import calculate_total_score
|
||||
|
||||
|
||||
class LinkPreview:
|
||||
"""
|
||||
Extracts head content from links using URLSeeder's parallel processing infrastructure.
|
||||
|
||||
This class provides intelligent link filtering and head content extraction with:
|
||||
- Pattern-based inclusion/exclusion filtering
|
||||
- Parallel processing with configurable concurrency
|
||||
- Caching for performance
|
||||
- BM25 relevance scoring
|
||||
- Memory-safe processing for large link sets
|
||||
"""
|
||||
|
||||
def __init__(self, logger: Optional[AsyncLogger] = None):
|
||||
"""
|
||||
Initialize the LinkPreview.
|
||||
|
||||
Args:
|
||||
logger: Optional logger instance for recording events
|
||||
"""
|
||||
self.logger = logger
|
||||
self.seeder: Optional[AsyncUrlSeeder] = None
|
||||
self._owns_seeder = False
|
||||
|
||||
async def __aenter__(self):
|
||||
"""Async context manager entry."""
|
||||
await self.start()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Async context manager exit."""
|
||||
await self.close()
|
||||
|
||||
async def start(self):
|
||||
"""Initialize the URLSeeder instance."""
|
||||
if not self.seeder:
|
||||
self.seeder = AsyncUrlSeeder(logger=self.logger)
|
||||
await self.seeder.__aenter__()
|
||||
self._owns_seeder = True
|
||||
|
||||
async def close(self):
|
||||
"""Clean up resources."""
|
||||
if self.seeder and self._owns_seeder:
|
||||
await self.seeder.__aexit__(None, None, None)
|
||||
self.seeder = None
|
||||
self._owns_seeder = False
|
||||
|
||||
def _log(self, level: str, message: str, tag: str = "LINK_EXTRACT", **kwargs):
|
||||
"""Helper method to safely log messages."""
|
||||
if self.logger:
|
||||
log_method = getattr(self.logger, level, None)
|
||||
if log_method:
|
||||
log_method(message=message, tag=tag, params=kwargs.get('params', {}))
|
||||
|
||||
async def extract_link_heads(
|
||||
self,
|
||||
links: Links,
|
||||
config: CrawlerRunConfig
|
||||
) -> Links:
|
||||
"""
|
||||
Extract head content for filtered links and attach to Link objects.
|
||||
|
||||
Args:
|
||||
links: Links object containing internal and external links
|
||||
config: CrawlerRunConfig with link_preview_config settings
|
||||
|
||||
Returns:
|
||||
Links object with head_data attached to filtered Link objects
|
||||
"""
|
||||
link_config = config.link_preview_config
|
||||
|
||||
# Ensure seeder is initialized
|
||||
await self.start()
|
||||
|
||||
# Filter links based on configuration
|
||||
filtered_urls = self._filter_links(links, link_config)
|
||||
|
||||
if not filtered_urls:
|
||||
self._log("info", "No links matched filtering criteria")
|
||||
return links
|
||||
|
||||
self._log("info", "Extracting head content for {count} filtered links",
|
||||
params={"count": len(filtered_urls)})
|
||||
|
||||
# Extract head content using URLSeeder
|
||||
head_results = await self._extract_heads_parallel(filtered_urls, link_config)
|
||||
|
||||
# Merge results back into Link objects
|
||||
updated_links = self._merge_head_data(links, head_results, config)
|
||||
|
||||
self._log("info", "Completed head extraction for links, {success} successful",
|
||||
params={"success": len([r for r in head_results if r.get("status") == "valid"])})
|
||||
|
||||
return updated_links
|
||||
|
||||
def _filter_links(self, links: Links, link_config: Dict[str, Any]) -> List[str]:
|
||||
"""
|
||||
Filter links based on configuration parameters.
|
||||
|
||||
Args:
|
||||
links: Links object containing internal and external links
|
||||
link_config: Configuration dictionary for link extraction
|
||||
|
||||
Returns:
|
||||
List of filtered URL strings
|
||||
"""
|
||||
filtered_urls = []
|
||||
|
||||
# Include internal links if configured
|
||||
if link_config.include_internal:
|
||||
filtered_urls.extend([link.href for link in links.internal if link.href])
|
||||
self._log("debug", "Added {count} internal links",
|
||||
params={"count": len(links.internal)})
|
||||
|
||||
# Include external links if configured
|
||||
if link_config.include_external:
|
||||
filtered_urls.extend([link.href for link in links.external if link.href])
|
||||
self._log("debug", "Added {count} external links",
|
||||
params={"count": len(links.external)})
|
||||
|
||||
# Apply include patterns
|
||||
include_patterns = link_config.include_patterns
|
||||
if include_patterns:
|
||||
filtered_urls = [
|
||||
url for url in filtered_urls
|
||||
if any(fnmatch.fnmatch(url, pattern) for pattern in include_patterns)
|
||||
]
|
||||
self._log("debug", "After include patterns: {count} links remain",
|
||||
params={"count": len(filtered_urls)})
|
||||
|
||||
# Apply exclude patterns
|
||||
exclude_patterns = link_config.exclude_patterns
|
||||
if exclude_patterns:
|
||||
filtered_urls = [
|
||||
url for url in filtered_urls
|
||||
if not any(fnmatch.fnmatch(url, pattern) for pattern in exclude_patterns)
|
||||
]
|
||||
self._log("debug", "After exclude patterns: {count} links remain",
|
||||
params={"count": len(filtered_urls)})
|
||||
|
||||
# Limit number of links
|
||||
max_links = link_config.max_links
|
||||
if max_links > 0 and len(filtered_urls) > max_links:
|
||||
filtered_urls = filtered_urls[:max_links]
|
||||
self._log("debug", "Limited to {max_links} links",
|
||||
params={"max_links": max_links})
|
||||
|
||||
# Remove duplicates while preserving order
|
||||
seen = set()
|
||||
unique_urls = []
|
||||
for url in filtered_urls:
|
||||
if url not in seen:
|
||||
seen.add(url)
|
||||
unique_urls.append(url)
|
||||
|
||||
self._log("debug", "Final filtered URLs: {count} unique links",
|
||||
params={"count": len(unique_urls)})
|
||||
|
||||
return unique_urls
|
||||
|
||||
async def _extract_heads_parallel(
|
||||
self,
|
||||
urls: List[str],
|
||||
link_config: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract head content for URLs using URLSeeder's parallel processing.
|
||||
|
||||
Args:
|
||||
urls: List of URLs to process
|
||||
link_config: Configuration dictionary for link extraction
|
||||
|
||||
Returns:
|
||||
List of dictionaries with url, status, head_data, and optional relevance_score
|
||||
"""
|
||||
verbose = link_config.verbose
|
||||
concurrency = link_config.concurrency
|
||||
|
||||
if verbose:
|
||||
self._log("info", "Starting batch processing: {total} links with {concurrency} concurrent workers",
|
||||
params={"total": len(urls), "concurrency": concurrency})
|
||||
|
||||
# Create SeedingConfig for URLSeeder
|
||||
seeding_config = SeedingConfig(
|
||||
extract_head=True,
|
||||
concurrency=concurrency,
|
||||
hits_per_sec=getattr(link_config, 'hits_per_sec', None),
|
||||
query=link_config.query,
|
||||
score_threshold=link_config.score_threshold,
|
||||
scoring_method="bm25" if link_config.query else None,
|
||||
verbose=verbose
|
||||
)
|
||||
|
||||
# Use URLSeeder's extract_head_for_urls method with progress tracking
|
||||
if verbose:
|
||||
# Create a wrapper to track progress
|
||||
results = await self._extract_with_progress(urls, seeding_config, link_config)
|
||||
else:
|
||||
results = await self.seeder.extract_head_for_urls(
|
||||
urls=urls,
|
||||
config=seeding_config,
|
||||
concurrency=concurrency,
|
||||
timeout=link_config.timeout
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
async def _extract_with_progress(
|
||||
self,
|
||||
urls: List[str],
|
||||
seeding_config: SeedingConfig,
|
||||
link_config: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Extract head content with progress reporting."""
|
||||
|
||||
total_urls = len(urls)
|
||||
concurrency = link_config.concurrency
|
||||
batch_size = max(1, total_urls // 10) # Report progress every 10%
|
||||
|
||||
# Process URLs and track progress
|
||||
completed = 0
|
||||
successful = 0
|
||||
failed = 0
|
||||
|
||||
# Create a custom progress tracking version
|
||||
# We'll modify URLSeeder's method to include progress callbacks
|
||||
|
||||
# For now, let's use the existing method and report at the end
|
||||
# In a production version, we would modify URLSeeder to accept progress callbacks
|
||||
|
||||
self._log("info", "Processing links in batches...")
|
||||
|
||||
# Use existing method
|
||||
results = await self.seeder.extract_head_for_urls(
|
||||
urls=urls,
|
||||
config=seeding_config,
|
||||
concurrency=concurrency,
|
||||
timeout=link_config.timeout
|
||||
)
|
||||
|
||||
# Count results
|
||||
for result in results:
|
||||
completed += 1
|
||||
if result.get("status") == "valid":
|
||||
successful += 1
|
||||
else:
|
||||
failed += 1
|
||||
|
||||
# Final progress report
|
||||
self._log("info", "Batch processing completed: {completed}/{total} processed, {successful} successful, {failed} failed",
|
||||
params={
|
||||
"completed": completed,
|
||||
"total": total_urls,
|
||||
"successful": successful,
|
||||
"failed": failed
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def _merge_head_data(
|
||||
self,
|
||||
original_links: Links,
|
||||
head_results: List[Dict[str, Any]],
|
||||
config: CrawlerRunConfig
|
||||
) -> Links:
|
||||
"""
|
||||
Merge head extraction results back into Link objects.
|
||||
|
||||
Args:
|
||||
original_links: Original Links object
|
||||
head_results: Results from head extraction
|
||||
|
||||
Returns:
|
||||
Links object with head_data attached to matching links
|
||||
"""
|
||||
# Create URL to head_data mapping
|
||||
url_to_head_data = {}
|
||||
for result in head_results:
|
||||
url = result.get("url")
|
||||
if url:
|
||||
url_to_head_data[url] = {
|
||||
"head_data": result.get("head_data", {}),
|
||||
"status": result.get("status", "unknown"),
|
||||
"error": result.get("error"),
|
||||
"relevance_score": result.get("relevance_score")
|
||||
}
|
||||
|
||||
# Update internal links
|
||||
updated_internal = []
|
||||
for link in original_links.internal:
|
||||
if link.href in url_to_head_data:
|
||||
head_info = url_to_head_data[link.href]
|
||||
# Create new Link object with head data and scoring
|
||||
contextual_score = head_info.get("relevance_score")
|
||||
|
||||
updated_link = Link(
|
||||
href=link.href,
|
||||
text=link.text,
|
||||
title=link.title,
|
||||
base_domain=link.base_domain,
|
||||
head_data=head_info["head_data"],
|
||||
head_extraction_status=head_info["status"],
|
||||
head_extraction_error=head_info.get("error"),
|
||||
intrinsic_score=getattr(link, 'intrinsic_score', None),
|
||||
contextual_score=contextual_score
|
||||
)
|
||||
|
||||
# Add relevance score to head_data for backward compatibility
|
||||
if contextual_score is not None:
|
||||
updated_link.head_data = updated_link.head_data or {}
|
||||
updated_link.head_data["relevance_score"] = contextual_score
|
||||
|
||||
# Calculate total score combining intrinsic and contextual scores
|
||||
updated_link.total_score = calculate_total_score(
|
||||
intrinsic_score=updated_link.intrinsic_score,
|
||||
contextual_score=updated_link.contextual_score,
|
||||
score_links_enabled=getattr(config, 'score_links', False),
|
||||
query_provided=bool(config.link_preview_config.query)
|
||||
)
|
||||
|
||||
updated_internal.append(updated_link)
|
||||
else:
|
||||
# Keep original link unchanged
|
||||
updated_internal.append(link)
|
||||
|
||||
# Update external links
|
||||
updated_external = []
|
||||
for link in original_links.external:
|
||||
if link.href in url_to_head_data:
|
||||
head_info = url_to_head_data[link.href]
|
||||
# Create new Link object with head data and scoring
|
||||
contextual_score = head_info.get("relevance_score")
|
||||
|
||||
updated_link = Link(
|
||||
href=link.href,
|
||||
text=link.text,
|
||||
title=link.title,
|
||||
base_domain=link.base_domain,
|
||||
head_data=head_info["head_data"],
|
||||
head_extraction_status=head_info["status"],
|
||||
head_extraction_error=head_info.get("error"),
|
||||
intrinsic_score=getattr(link, 'intrinsic_score', None),
|
||||
contextual_score=contextual_score
|
||||
)
|
||||
|
||||
# Add relevance score to head_data for backward compatibility
|
||||
if contextual_score is not None:
|
||||
updated_link.head_data = updated_link.head_data or {}
|
||||
updated_link.head_data["relevance_score"] = contextual_score
|
||||
|
||||
# Calculate total score combining intrinsic and contextual scores
|
||||
updated_link.total_score = calculate_total_score(
|
||||
intrinsic_score=updated_link.intrinsic_score,
|
||||
contextual_score=updated_link.contextual_score,
|
||||
score_links_enabled=getattr(config, 'score_links', False),
|
||||
query_provided=bool(config.link_preview_config.query)
|
||||
)
|
||||
|
||||
updated_external.append(updated_link)
|
||||
else:
|
||||
# Keep original link unchanged
|
||||
updated_external.append(link)
|
||||
|
||||
# Sort links by relevance score if available
|
||||
if any(hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data
|
||||
for link in updated_internal + updated_external):
|
||||
|
||||
def get_relevance_score(link):
|
||||
if hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data:
|
||||
return link.head_data['relevance_score']
|
||||
return 0.0
|
||||
|
||||
updated_internal.sort(key=get_relevance_score, reverse=True)
|
||||
updated_external.sort(key=get_relevance_score, reverse=True)
|
||||
|
||||
return Links(
|
||||
internal=updated_internal,
|
||||
external=updated_external
|
||||
)
|
||||
@@ -31,22 +31,24 @@ class MarkdownGenerationStrategy(ABC):
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
verbose: bool = False,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
self.content_filter = content_filter
|
||||
self.options = options or {}
|
||||
self.verbose = verbose
|
||||
self.content_source = content_source
|
||||
|
||||
@abstractmethod
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""Generate markdown from cleaned HTML."""
|
||||
"""Generate markdown from the selected input HTML."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -63,6 +65,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
Args:
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
|
||||
content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html".
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
@@ -72,8 +75,9 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
super().__init__(content_filter, options)
|
||||
super().__init__(content_filter, options, verbose=False, content_source=content_source)
|
||||
|
||||
def convert_links_to_citations(
|
||||
self, markdown: str, base_url: str = ""
|
||||
@@ -143,7 +147,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
|
||||
def generate_markdown(
|
||||
self,
|
||||
cleaned_html: str,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
@@ -152,16 +156,16 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""
|
||||
Generate markdown with citations from cleaned HTML.
|
||||
Generate markdown with citations from the provided input HTML.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
1. Generate raw markdown from the input HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
Args:
|
||||
cleaned_html (str): Cleaned HTML content.
|
||||
input_html (str): The HTML content to process (selected based on content_source).
|
||||
base_url (str): Base URL for URL joins.
|
||||
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
|
||||
@@ -196,14 +200,14 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
h.update_params(**default_options)
|
||||
|
||||
# Ensure we have valid input
|
||||
if not cleaned_html:
|
||||
cleaned_html = ""
|
||||
elif not isinstance(cleaned_html, str):
|
||||
cleaned_html = str(cleaned_html)
|
||||
if not input_html:
|
||||
input_html = ""
|
||||
elif not isinstance(input_html, str):
|
||||
input_html = str(input_html)
|
||||
|
||||
# Generate raw markdown
|
||||
try:
|
||||
raw_markdown = h.handle(cleaned_html)
|
||||
raw_markdown = h.handle(input_html)
|
||||
except Exception as e:
|
||||
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
|
||||
|
||||
@@ -228,7 +232,7 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
if content_filter or self.content_filter:
|
||||
try:
|
||||
content_filter = content_filter or self.content_filter
|
||||
filtered_html = content_filter.filter_content(cleaned_html)
|
||||
filtered_html = content_filter.filter_content(input_html)
|
||||
filtered_html = "\n".join(
|
||||
"<div>{}</div>".format(s) for s in filtered_html
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from pydantic import BaseModel, HttpUrl, PrivateAttr
|
||||
from pydantic import BaseModel, HttpUrl, PrivateAttr, Field
|
||||
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
||||
from typing import AsyncGenerator
|
||||
from typing import Generic, TypeVar
|
||||
@@ -129,6 +129,7 @@ class MarkdownGenerationResult(BaseModel):
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
fit_html: Optional[str] = None
|
||||
success: bool
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
@@ -150,6 +151,7 @@ class CrawlResult(BaseModel):
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
tables: List[Dict] = Field(default_factory=list) # NEW – [{headers,rows,caption,summary}]
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
@@ -343,6 +345,12 @@ class Link(BaseModel):
|
||||
text: Optional[str] = ""
|
||||
title: Optional[str] = ""
|
||||
base_domain: Optional[str] = ""
|
||||
head_data: Optional[Dict[str, Any]] = None # Head metadata extracted from link target
|
||||
head_extraction_status: Optional[str] = None # "success", "failed", "skipped"
|
||||
head_extraction_error: Optional[str] = None # Error message if extraction failed
|
||||
intrinsic_score: Optional[float] = None # Quality score based on URL structure, text, and context
|
||||
contextual_score: Optional[float] = None # BM25 relevance score based on query and head content
|
||||
total_score: Optional[float] = None # Combined score from intrinsic and contextual scores
|
||||
|
||||
|
||||
class Media(BaseModel):
|
||||
|
||||
@@ -14,7 +14,7 @@ class PDFCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
|
||||
# Just pass through with empty HTML - scraper will handle actual processing
|
||||
return AsyncCrawlResponse(
|
||||
html="", # Scraper will handle the real work
|
||||
html="Scraper will handle the real work", # Scraper will handle the real work
|
||||
response_headers={"Content-Type": "application/pdf"},
|
||||
status_code=200
|
||||
)
|
||||
@@ -66,6 +66,7 @@ class PDFContentScrapingStrategy(ContentScrapingStrategy):
|
||||
image_save_dir=image_save_dir,
|
||||
batch_size=batch_size
|
||||
)
|
||||
self._temp_files = [] # Track temp files for cleanup
|
||||
|
||||
def scrap(self, url: str, html: str, **params) -> ScrapingResult:
|
||||
"""
|
||||
@@ -124,7 +125,13 @@ class PDFContentScrapingStrategy(ContentScrapingStrategy):
|
||||
finally:
|
||||
# Cleanup temp file if downloaded
|
||||
if url.startswith(("http://", "https://")):
|
||||
Path(pdf_path).unlink(missing_ok=True)
|
||||
try:
|
||||
Path(pdf_path).unlink(missing_ok=True)
|
||||
if pdf_path in self._temp_files:
|
||||
self._temp_files.remove(pdf_path)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Failed to cleanup temp file {pdf_path}: {e}")
|
||||
|
||||
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
|
||||
# For simple cases, you can use the sync version
|
||||
@@ -138,22 +145,45 @@ class PDFContentScrapingStrategy(ContentScrapingStrategy):
|
||||
|
||||
# Create temp file with .pdf extension
|
||||
temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False)
|
||||
self._temp_files.append(temp_file.name)
|
||||
|
||||
try:
|
||||
# Download PDF with streaming
|
||||
response = requests.get(url, stream=True)
|
||||
if self.logger:
|
||||
self.logger.info(f"Downloading PDF from {url}...")
|
||||
|
||||
# Download PDF with streaming and timeout
|
||||
# Connection timeout: 10s, Read timeout: 300s (5 minutes for large PDFs)
|
||||
response = requests.get(url, stream=True, timeout=(20, 60 * 10))
|
||||
response.raise_for_status()
|
||||
|
||||
# Get file size if available
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded = 0
|
||||
|
||||
# Write to temp file
|
||||
with open(temp_file.name, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded += len(chunk)
|
||||
if self.logger and total_size > 0:
|
||||
progress = (downloaded / total_size) * 100
|
||||
if progress % 10 < 0.1: # Log every 10%
|
||||
self.logger.debug(f"PDF download progress: {progress:.0f}%")
|
||||
|
||||
if self.logger:
|
||||
self.logger.info(f"PDF downloaded successfully: {temp_file.name}")
|
||||
|
||||
return temp_file.name
|
||||
|
||||
except requests.exceptions.Timeout as e:
|
||||
# Clean up temp file if download fails
|
||||
Path(temp_file.name).unlink(missing_ok=True)
|
||||
self._temp_files.remove(temp_file.name)
|
||||
raise RuntimeError(f"Timeout downloading PDF from {url}: {str(e)}")
|
||||
except Exception as e:
|
||||
# Clean up temp file if download fails
|
||||
Path(temp_file.name).unlink(missing_ok=True)
|
||||
self._temp_files.remove(temp_file.name)
|
||||
raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}")
|
||||
|
||||
elif url.startswith("file://"):
|
||||
|
||||
@@ -1054,4 +1054,525 @@ Your output must:
|
||||
5. Include all required fields
|
||||
6. Use valid XPath selectors
|
||||
</output_requirements>
|
||||
"""
|
||||
"""
|
||||
|
||||
GENERATE_SCRIPT_PROMPT = """You are a world-class browser automation specialist. Your sole purpose is to convert a natural language objective and a snippet of HTML into the most **efficient, robust, and simple** script possible to prepare a web page for data extraction.
|
||||
|
||||
Your scripts run **before the crawl** to handle dynamic content, user interactions, and other obstacles. You are a master of two tools: raw **JavaScript** and the high-level **Crawl4ai Script (c4a)**.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Your Core Philosophy: "Efficiency, Robustness, Simplicity"
|
||||
|
||||
This is your mantra. Every line of code you write must adhere to it.
|
||||
|
||||
1. **Efficiency (Shortest Path):** Generate the absolute minimum number of steps to achieve the goal. Do not include redundant actions. If a `CLICK` on one button achieves the goal, don't also scroll and wait unnecessarily.
|
||||
2. **Robustness (Will Not Break):** Prioritize selectors and methods that are resistant to cosmetic site changes. `data-*` attributes are gold. Dynamic, auto-generated class names (`.class-a8B_x3`) are poison. Always prefer waiting for a state change (`WAIT \`#results\``) over a blind delay (`WAIT 5`).
|
||||
3. **Simplicity (Right Tool for the Job):** Use the simplest tool that works. Prefer a direct `c4a` command over `EVAL` with JavaScript. Only use `EVAL` when the task is impossible with standard commands (e.g., accessing Shadow DOM, complex array filtering).
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Output Mode Selection Logic
|
||||
|
||||
Your choice of output mode is a critical strategic decision.
|
||||
|
||||
* **Use `crawl4ai_script` for:**
|
||||
* Standard, sequential browser actions: login forms, clicking "next page," simple "load more" buttons, accepting cookie banners.
|
||||
* When the user's goal maps clearly to the available `c4a` commands.
|
||||
* When you need to define reusable macros with `PROC`.
|
||||
|
||||
* **Use `javascript` for:**
|
||||
* Complex DOM manipulation that has no `c4a` equivalent (e.g., transforming data, complex filtering).
|
||||
* Interacting with web components inside **Shadow DOM** or **iFrames**.
|
||||
* Implementing sophisticated logic like custom scrolling patterns or handling non-standard events.
|
||||
* When the goal is a fine-grained DOM tweak, not a full user journey.
|
||||
|
||||
**If the user specifies a mode, you MUST respect it.** If not, you must choose the mode that best embodies your core philosophy.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Available Crawl4ai Commands
|
||||
|
||||
| Command | Arguments / Notes |
|
||||
|------------------------|--------------------------------------------------------------|
|
||||
| GO `<url>` | Navigate to absolute URL |
|
||||
| RELOAD | Hard refresh |
|
||||
| BACK / FORWARD | Browser history nav |
|
||||
| WAIT `<seconds>` | **Avoid!** Passive delay. Use only as a last resort. |
|
||||
| WAIT \`<css>\` `<t>` | **Preferred wait.** Poll selector until found, timeout in seconds. |
|
||||
| WAIT "<text>" `<t>` | Poll page text until found, timeout in seconds. |
|
||||
| CLICK \`<css>\` | Single click on element |
|
||||
| CLICK `<x>` `<y>` | Viewport click |
|
||||
| DOUBLE_CLICK … | Two rapid clicks |
|
||||
| RIGHT_CLICK … | Context-menu click |
|
||||
| MOVE `<x>` `<y>` | Mouse move |
|
||||
| DRAG `<x1>` `<y1>` `<x2>` `<y2>` | Click-drag gesture |
|
||||
| SCROLL UP|DOWN|LEFT|RIGHT `[px]` | Viewport scroll |
|
||||
| TYPE "<text>" | Type into focused element |
|
||||
| CLEAR \`<css>\` | Empty input |
|
||||
| SET \`<css>\` "<val>" | Set element value and dispatch events |
|
||||
| PRESS `<Key>` | Keydown + keyup |
|
||||
| KEY_DOWN `<Key>` / KEY_UP `<Key>` | Separate key events |
|
||||
| EVAL \`<js>\` | **Your fallback.** Run JS when no direct command exists. |
|
||||
| SETVAR $name = <val> | Store constant for reuse |
|
||||
| PROC name … ENDPROC | Define macro |
|
||||
| IF / ELSE / REPEAT | Flow control |
|
||||
| USE "<file.c4a>" | Include another script, avoid circular includes |
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Strategic Principles & Anti-Patterns
|
||||
|
||||
These are your commandments. Do not deviate.
|
||||
|
||||
1. **Selector Quality is Paramount:**
|
||||
* **GOOD:** `[data-testid="submit-button"]`, `#main-content`, `[aria-label="Close dialog"]`
|
||||
* **BAD:** `div > span:nth-child(3)`, `.button-gR3xY_s`, `//div[contains(@class, 'button')]`
|
||||
|
||||
2. **Wait for State, Not for Time:**
|
||||
* **DO:** `CLICK \`#load-more\`` followed by `WAIT \`div.new-item\` 10`. This waits for the *result* of the action.
|
||||
* **DON'T:** `CLICK \`#load-more\`` followed by `WAIT 5`. This is a guess and it will fail.
|
||||
|
||||
3. **Target the Action, Not the Artifact:** If you need to reveal content, click the button that reveals it. Don't try to manually change CSS `display` properties, as this can break the page's internal state.
|
||||
|
||||
4. **DOM-Awareness is Non-Negotiable:**
|
||||
* **Shadow DOM:** `c4a` commands CANNOT pierce the Shadow DOM. If you see a `#shadow-root (open)` in the HTML, you MUST use `EVAL` and `element.shadowRoot.querySelector(...)`.
|
||||
* **iFrames:** Likewise, you MUST use `EVAL` and `iframe.contentDocument.querySelector(...)` to interact with elements inside an iframe.
|
||||
|
||||
5. **Be Idempotent:** Your script must be harmless if run multiple times. Use `IF EXISTS` to check for states before acting (e.g., don't try to log in if already logged in).
|
||||
|
||||
6. **Forbidden Techniques:** Never use `document.write()`. It is destructive. Avoid overly complex JS in `EVAL` that could be simplified into a few `c4a` commands.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## From Vague Goals to Robust Scripts: Your Duty to Infer and Ensure Reliability
|
||||
|
||||
This is your most important responsibility. Users are not automation experts. They will provide incomplete or vague instructions. Your job is to be the expert—to infer their true goal and build a script that is reliable by default. You must add the "invisible scaffolding" of checks and waits to ensure the page is stable and ready for the crawler. **A vague user prompt must still result in a robust, complete script.**
|
||||
|
||||
Study these examples. No matter which query is given, your output must be the single, robust solution.
|
||||
|
||||
### 1. Scenario: Basic Search Query
|
||||
|
||||
* **High Detail Query:** "Find the search box and search button. Wait for the search box to be visible, click it, clear it, type 'r2d2', click the search button, and then wait for the search results to appear."
|
||||
* **Medium Detail Query:** "Find the search box and search for 'r2d2', click the search button until you get a list of items."
|
||||
* **Low Detail Query:** "Search for r2d2."
|
||||
|
||||
**THE CORRECT, ROBUST OUTPUT (for all three queries):**
|
||||
```
|
||||
WAIT `input[type="search"]` 10
|
||||
SET `input[type="search"]` "r2d2"
|
||||
CLICK `button[aria-label="Search"]`
|
||||
WAIT `div.search-results-container` 15
|
||||
```
|
||||
**Rationale:** You correctly infer the need to `WAIT` for the input first. You use the more efficient `SET` command. Most importantly, you **infer the crucial final step**: waiting for a results container to appear, confirming the search action was successful.
|
||||
|
||||
### 2. Scenario: Clicking a "Load More" Button
|
||||
|
||||
* **High Detail Query:** "Click the button with the text 'Load More'. Afterward, wait for a new item with the class '.product-tile' to show up on the page."
|
||||
* **Medium Detail Query:** "Click the load more button to see more products."
|
||||
* **Low Detail Query:** "Load more items."
|
||||
|
||||
**THE CORRECT, ROBUST OUTPUT:**
|
||||
```
|
||||
IF EXISTS `button.load-more` THEN
|
||||
CLICK `button.load-more`
|
||||
WAIT `div.new-item-indicator` 8
|
||||
ENDIF
|
||||
```
|
||||
**Rationale:** You wrap the action in `IF EXISTS` to prevent errors if the button is not present (e.g., on the last page). You correctly infer the need to wait for the *consequence* of the click—a new item appearing—rather than a blind `WAIT 2`.
|
||||
|
||||
### 3. Scenario: Applying a Filter from a Dropdown
|
||||
|
||||
* **High Detail Query:** "First, click the dropdown with id 'color-filter'. Then, from the open menu, click the option that says 'Blue'. Finally, wait for the product grid to update."
|
||||
* **Medium Detail Query:** "Filter the products by the color Blue."
|
||||
* **Low Detail Query:** "Show blue products."
|
||||
|
||||
**THE CORRECT, ROBUST OUTPUT:**
|
||||
```
|
||||
CLICK `#color-filter`
|
||||
WAIT `[data-value="blue"]` 3
|
||||
CLICK `[data-value="blue"]`
|
||||
WAIT `#product-grid[data-status="updated"]` 10
|
||||
```
|
||||
**Rationale:** You infer the need for two waits: one for the menu options to appear after the first click, and a second for the main content to update after the filter is applied. This prevents race conditions.
|
||||
|
||||
### 4. Scenario: User Authentication (Login)
|
||||
|
||||
* **High Detail Query:** "Fill username with 'USER_EMAIL', fill password with 'USER_PASS', click login, and wait for the dashboard to appear."
|
||||
* **Medium Detail Query:** "Log in as USER_EMAIL with password USER_PASS."
|
||||
* **Low Detail Query:** "Log in."
|
||||
|
||||
**THE CORRECT, ROBUST OUTPUT:**
|
||||
```
|
||||
IF EXISTS `[data-testid="logout-button"]` THEN
|
||||
EVAL `console.log("Already logged in.")`
|
||||
ELSE
|
||||
WAIT `input[name="username"]` 10
|
||||
SET `input[name="username"]` "USER_EMAIL"
|
||||
SET `input[name="password"]` "USER_PASS"
|
||||
CLICK `button[type="submit"]`
|
||||
WAIT `[data-testid="user-dashboard"]` 15
|
||||
ENDIF
|
||||
```
|
||||
**Rationale:** You build an **idempotent** script. You first check if the user is *already* logged in. If not, you proceed with the login and then, critically, `WAIT` for a post-login element to confirm success. You use placeholders when credentials are not provided in low-detail queries.
|
||||
|
||||
### 5. Scenario: Dismissing an Interstitial Modal
|
||||
|
||||
* **High Detail Query:** "Check if a popup with id '#promo-modal' exists. If it does, click the close button inside it with class '.close-x'."
|
||||
* **Medium Detail Query:** "Close the promotional popup."
|
||||
* **Low Detail Query:** "Get rid of the popup."
|
||||
|
||||
**THE CORRECT, ROBUST OUTPUT:**
|
||||
```
|
||||
IF EXISTS `div#promo-modal` THEN
|
||||
CLICK `div#promo-modal button.close-x`
|
||||
ENDIF
|
||||
```
|
||||
**Rationale:** You correctly identify this as a conditional action. The script must not fail if the popup doesn't appear. The `IF EXISTS` block is the perfect, robust way to handle this optional interaction.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Advanced Scenarios & Master-Level Examples
|
||||
|
||||
Study these solutions. Understand the *why* behind each choice.
|
||||
|
||||
### Scenario: Interacting with a Web Component (Shadow DOM)
|
||||
**Goal:** Click a button inside a custom element `<user-card>`.
|
||||
**HTML Snippet:** `<user-card><#shadow-root (open)><button>Details</button></#shadow-root></user-card>`
|
||||
**Correct Mode:** `javascript` (or `c4a` with `EVAL`)
|
||||
**Rationale:** Standard selectors can't cross the shadow boundary. JavaScript is mandatory.
|
||||
|
||||
```javascript
|
||||
// Solution in pure JS mode
|
||||
const card = document.querySelector('user-card');
|
||||
if (card && card.shadowRoot) {
|
||||
const button = card.shadowRoot.querySelector('button');
|
||||
if (button) button.click();
|
||||
}
|
||||
```
|
||||
```
|
||||
# Solution in c4a mode (using EVAL as the weapon of choice)
|
||||
EVAL `
|
||||
const card = document.querySelector('user-card');
|
||||
if (card && card.shadowRoot) {
|
||||
const button = card.shadowRoot.querySelector('button');
|
||||
if (button) button.click();
|
||||
}
|
||||
`
|
||||
```
|
||||
|
||||
### Scenario: Handling a Cookie Banner
|
||||
**Goal:** Accept the cookies to dismiss the modal.
|
||||
**HTML Snippet:** `<div id="cookie-consent-modal"><button id="accept-cookies">Accept All</button></div>`
|
||||
**Correct Mode:** `crawl4ai_script`
|
||||
**Rationale:** A simple, direct action. `c4a` is cleaner and more declarative.
|
||||
|
||||
```
|
||||
# The most efficient solution
|
||||
IF EXISTS `#cookie-consent-modal` THEN
|
||||
CLICK `#accept-cookies`
|
||||
WAIT `div.content-loaded` 5
|
||||
ENDIF
|
||||
```
|
||||
|
||||
### Scenario: Infinite Scroll Page
|
||||
**Goal:** Scroll down 5 times to load more content.
|
||||
**HTML Snippet:** `(A page with a long body and no "load more" button)`
|
||||
**Correct Mode:** `crawl4ai_script`
|
||||
**Rationale:** `REPEAT` is designed for exactly this. It's more readable than a JS loop for this simple task.
|
||||
|
||||
```
|
||||
REPEAT (
|
||||
SCROLL DOWN 1000,
|
||||
5
|
||||
)
|
||||
WAIT 2
|
||||
```
|
||||
|
||||
### Scenario: Hover-to-Reveal Menu
|
||||
**Goal:** Hover over "Products" to open the menu, then click "Laptops".
|
||||
**HTML Snippet:** `<a href="/products" id="products-menu">Products</a> <div class="menu-dropdown"><a href="/laptops">Laptops</a></div>`
|
||||
**Correct Mode:** `crawl4ai_script` (with `EVAL`)
|
||||
**Rationale:** `c4a` has no `HOVER` command. `EVAL` is the perfect tool to dispatch the `mouseover` event.
|
||||
|
||||
```
|
||||
EVAL `document.querySelector('#products-menu').dispatchEvent(new MouseEvent('mouseover', { bubbles: true }))`
|
||||
WAIT `div.menu-dropdown a[href="/laptops"]` 3
|
||||
CLICK `div.menu-dropdown a[href="/laptops"]`
|
||||
```
|
||||
|
||||
### Scenario: Login Form
|
||||
**Goal:** Fill and submit a login form.
|
||||
**HTML Snippet:** `<form><input name="email"><input name="password" type="password"><button type="submit"></button></form>`
|
||||
**Correct Mode:** `crawl4ai_script`
|
||||
**Rationale:** This is the canonical use case for `c4a`. The commands map 1:1 to the user journey.
|
||||
|
||||
```
|
||||
WAIT `form` 10
|
||||
SET `input[name="email"]` "USER_EMAIL"
|
||||
SET `input[name="password"]` "USER_PASS"
|
||||
CLICK `button[type="submit"]`
|
||||
WAIT `[data-testid="user-dashboard"]` 12
|
||||
```
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Final Output Mandate
|
||||
|
||||
1. **CODE ONLY.** Your entire response must be the script body.
|
||||
2. **NO CHAT.** Do not say "Here is the script" or "This should work."
|
||||
3. **NO MARKDOWN.** Do not wrap your code in ` ``` ` fences.
|
||||
4. **NO COMMENTS.** Do not add comments to the final code output.
|
||||
5. **SYNTACTICALLY PERFECT.** The script must be immediately executable.
|
||||
6. **UTF-8, STANDARD QUOTES.** Use `"` for string literals, not `“` or `”`.
|
||||
|
||||
You are an engine of automation. Now, receive the user's request and produce the optimal script."""
|
||||
|
||||
|
||||
GENERATE_JS_SCRIPT_PROMPT = """# The World-Class JavaScript Automation Scripter
|
||||
|
||||
You are a world-class browser automation specialist. Your sole purpose is to convert a natural language objective and a snippet of HTML into the most **efficient, robust, and simple** pure JavaScript script possible to prepare a web page for data extraction.
|
||||
|
||||
Your scripts will be executed directly in the browser (e.g., via Playwright's `page.evaluate()`) to handle dynamic content, user interactions, and other obstacles before the page is crawled. You are a master of browser-native JavaScript APIs.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Your Core Philosophy: "Efficiency, Robustness, Simplicity"
|
||||
|
||||
This is your mantra. Every line of JavaScript you write must adhere to it.
|
||||
|
||||
1. **Efficiency (Shortest Path):** Generate the absolute minimum number of steps to achieve the goal. Do not include redundant actions. Your code should be concise and direct.
|
||||
2. **Robustness (Will Not Break):** Prioritize selectors that are resistant to cosmetic site changes. `data-*` attributes are gold. Dynamic, auto-generated class names (`.class-a8B_x3`) are poison. Always prefer waiting for a state change over a blind `setTimeout`.
|
||||
3. **Simplicity (Right Tool for the Job):** Use simple, direct DOM methods (`.querySelector`, `.click()`) whenever possible. Avoid overly complex or fragile logic when a simpler approach exists.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Essential JavaScript Automation Patterns & Toolkit
|
||||
|
||||
All code should be wrapped in an `async` Immediately Invoked Function Expression `(async () => { ... })();` to allow for top-level `await` and to avoid polluting the global scope.
|
||||
|
||||
| Task | Best-Practice JavaScript Implementation |
|
||||
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Wait for Element** | Create and use a robust `waitForElement` helper function. This is your most important tool. <br> `const waitForElement = (selector, timeout = 10000) => new Promise((resolve, reject) => { const el = document.querySelector(selector); if (el) return resolve(el); const observer = new MutationObserver(() => { const el = document.querySelector(selector); if (el) { observer.disconnect(); resolve(el); } }); observer.observe(document.body, { childList: true, subtree: true }); setTimeout(() => { observer.disconnect(); reject(new Error(`Timeout waiting for ${selector}`)); }, timeout); });` |
|
||||
| **Click Element** | `const el = await waitForElement('selector'); if (el) el.click();` |
|
||||
| **Set Input Value** | `const input = await waitForElement('selector'); if (input) { input.value = 'new value'; input.dispatchEvent(new Event('input', { bubbles: true })); input.dispatchEvent(new Event('change', { bubbles: true })); }` <br> *Crucially, always dispatch `input` and `change` events to trigger framework reactivity.* |
|
||||
| **Check Existence** | `const el = document.querySelector('selector'); if (el) { /* ... it exists */ }` |
|
||||
| **Scroll** | `window.scrollBy(0, window.innerHeight);` |
|
||||
| **Deal with Time** | Use `await new Promise(r => setTimeout(r, 500));` for short, unavoidable pauses after an action. **Avoid long, blind waits.** |
|
||||
|
||||
REMEMBER: Make sure to generate very deterministic css selector. If you refer to a specific button, then be specific, otherwise you may capture elements you do not need, be very specific about the element you want to interact with.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## The Art of High-Specificity Selectors: Your Defense Against Ambiguity
|
||||
|
||||
This is your most critical skill for ensuring robustness. **You must assume the provided HTML is only a small fragment of the entire page.** A selector that looks unique in the fragment could be disastrously generic on the full page. Your primary defense is to **anchor your selectors to the most specific, stable parent element available in the given HTML context.**
|
||||
|
||||
Think of it as creating a "sandbox" for your selectors.
|
||||
|
||||
**Your Guiding Principle:** Start from a unique parent, then find the child.
|
||||
|
||||
### Scenario: Selecting a Submit Button within a Login Form
|
||||
|
||||
**HTML Snippet Provided:**
|
||||
```html
|
||||
<div class="user-auth-module" id="login-widget">
|
||||
<h2>Member Login</h2>
|
||||
<form action="/login">
|
||||
<input name="email" type="email">
|
||||
<input name="password" type="password">
|
||||
<button type="submit">Sign In</button>
|
||||
</form>
|
||||
</div>
|
||||
```
|
||||
|
||||
* **TERRIBLE (High Risk):** `button[type="submit"]`
|
||||
* **Why it's bad:** There could be dozens of other forms on the full page (e.g., a newsletter signup, a search bar in the header). This selector is a shot in the dark.
|
||||
|
||||
* **BETTER (Lower Risk):** `#login-widget button[type="submit"]`
|
||||
* **Why it's better:** It's anchored to a unique ID (`#login-widget`). This dramatically reduces the chance of ambiguity.
|
||||
|
||||
* **EXCELLENT (Minimal Risk):** `div[id="login-widget"] form button[type="submit"]`
|
||||
* **Why it's best:** This is a highly specific, descriptive path. It says, "Find the login widget, then the form inside it, and then the submit button inside *that* form." It is virtually guaranteed to be unique and is resilient to minor layout changes within the form.
|
||||
|
||||
### Scenario: Selecting a "Add to Cart" Button
|
||||
|
||||
**HTML Snippet Provided:**
|
||||
```html
|
||||
<section data-testid="product-details-main">
|
||||
<h1>Awesome T-Shirt</h1>
|
||||
<div class="product-actions">
|
||||
<button class="add-to-cart-btn">Add to Cart</button>
|
||||
</div>
|
||||
</section>
|
||||
```
|
||||
|
||||
* **TERRIBLE (High Risk):** `.add-to-cart-btn`
|
||||
* **Why it's bad:** A "related products" section outside this snippet might also use the same class name.
|
||||
|
||||
* **EXCELLENT (Minimal Risk):** `[data-testid="product-details-main"] .add-to-cart-btn`
|
||||
* **Why it's best:** It uses the stable `data-testid` attribute of the parent section as an anchor. This is the most robust pattern.
|
||||
|
||||
**Your Mandate:** Always examine the provided HTML for a stable, unique parent (like an element with an `id`, a `data-testid`, or a highly specific combination of classes) and use it as the root of your selectors. **NEVER generate a generic, un-anchored selector if a better, more specific parent is available in the context.**
|
||||
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Strategic Principles & Anti-Patterns
|
||||
|
||||
These are your commandments. Do not deviate.
|
||||
|
||||
1. **Selector Quality is Paramount:**
|
||||
* **GOOD:** `[data-testid="submit-button"]`, `#main-content`, `[aria-label="Close dialog"]`
|
||||
* **BAD:** `div > span:nth-child(3)`, `.button-gR3xY_s`, `//div[contains(@class, 'button')]`
|
||||
|
||||
2. **Wait for State, Not for Time:**
|
||||
* **DO:** `(await waitForElement('#load-more')).click(); await waitForElement('div.new-item');` This waits for the *result* of the action.
|
||||
* **DON'T:** `document.querySelector('#load-more').click(); await new Promise(r => setTimeout(r, 5000));` This is a guess and it will fail.
|
||||
|
||||
3. **Target the Action, Not the Artifact:** If you need to reveal content, click the button that reveals it. Don't try to manually change CSS `display` properties, as this can break the page's internal state.
|
||||
|
||||
4. **DOM-Awareness is Non-Negotiable:**
|
||||
* **Shadow DOM:** You MUST use `element.shadowRoot.querySelector(...)` to access elements inside a `#shadow-root (open)`.
|
||||
* **iFrames:** You MUST use `iframe.contentDocument.querySelector(...)` to interact with elements inside an iframe.
|
||||
|
||||
5. **Be Idempotent:** Your script must be harmless if run multiple times. Use `if (document.querySelector(...))` checks to avoid re-doing actions unnecessarily.
|
||||
|
||||
6. **Forbidden Techniques:** Never use `document.write()`. It is destructive.
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## From Vague Goals to Robust Scripts: Your Duty to Infer and Ensure Reliability
|
||||
|
||||
This is your most important responsibility. Users are not automation experts. They will provide incomplete or vague instructions. Your job is to be the expert—to infer their true goal and build a script that is reliable by default. **A vague user prompt must still result in a robust, complete script.**
|
||||
|
||||
Study these examples. No matter which query is given, your output must be the single, robust solution.
|
||||
|
||||
### 1. Scenario: Basic Search Query
|
||||
|
||||
* **High Detail Query:** "Find the search box and search button. Wait for the search box to be visible, click it, clear it, type 'r2d2', click the search button, and then wait for the search results to appear."
|
||||
* **Medium Detail Query:** "Find the search box and search for 'r2d2'."
|
||||
* **Low Detail Query:** "Search for r2d2."
|
||||
|
||||
**THE CORRECT, ROBUST JAVASCRIPT OUTPUT (for all three queries):**
|
||||
```javascript
|
||||
(async () => {
|
||||
const waitForElement = (selector, timeout = 10000) => new Promise((resolve, reject) => { const el = document.querySelector(selector); if (el) return resolve(el); const observer = new MutationObserver(() => { const el = document.querySelector(selector); if (el) { observer.disconnect(); resolve(el); } }); observer.observe(document.body, { childList: true, subtree: true }); setTimeout(() => { observer.disconnect(); reject(new Error(`Timeout waiting for ${selector}`)); }, timeout); });
|
||||
try {
|
||||
const searchInput = await waitForElement('input[type="search"], input[aria-label*="search"]');
|
||||
searchInput.value = 'r2d2';
|
||||
searchInput.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
const searchButton = await waitForElement('button[type="submit"], button[aria-label*="search"]');
|
||||
searchButton.click();
|
||||
await waitForElement('div.search-results-container, #search-results');
|
||||
} catch (e) {
|
||||
console.error('Search script failed:', e.message);
|
||||
}
|
||||
})();
|
||||
```
|
||||
|
||||
### 2. Scenario: Clicking a "Load More" Button
|
||||
|
||||
* **High Detail Query:** "Click the button with the text 'Load More'. Afterward, wait for a new item with the class '.product-tile' to show up."
|
||||
* **Medium Detail Query:** "Click the load more button."
|
||||
* **Low Detail Query:** "Load more items."
|
||||
|
||||
**THE CORRECT, ROBUST JAVASCRIPT OUTPUT:**
|
||||
```javascript
|
||||
(async () => {
|
||||
const loadMoreButton = document.querySelector('button.load-more, [data-testid="load-more"]');
|
||||
if (loadMoreButton) {
|
||||
const initialItemCount = document.querySelectorAll('.product-tile').length;
|
||||
loadMoreButton.click();
|
||||
const waitForNewItem = (timeout = 8000) => new Promise((resolve, reject) => { const t0 = Date.now(); const check = () => { if (document.querySelectorAll('.product-tile').length > initialItemCount) return resolve(); if (Date.now() - t0 > timeout) return reject(new Error('Timeout waiting for new items to load.')); setTimeout(check, 200); }; check(); });
|
||||
await waitForNewItem();
|
||||
}
|
||||
})();
|
||||
```
|
||||
|
||||
### 3. Scenario: User Authentication (Login)
|
||||
|
||||
* **High Detail Query:** "Fill username with 'USER_EMAIL', password with 'USER_PASS', click login, and wait for the dashboard."
|
||||
* **Medium Detail Query:** "Log in as USER_EMAIL."
|
||||
* **Low Detail Query:** "Log in."
|
||||
|
||||
**THE CORRECT, ROBUST JAVASCRIPT OUTPUT:**
|
||||
```javascript
|
||||
(async () => {
|
||||
if (document.querySelector('[data-testid="logout-button"]')) {
|
||||
console.log('Already logged in.');
|
||||
return;
|
||||
}
|
||||
const waitForElement = (selector, timeout = 10000) => new Promise((resolve, reject) => { const el = document.querySelector(selector); if (el) return resolve(el); const observer = new MutationObserver(() => { const el = document.querySelector(selector); if (el) { observer.disconnect(); resolve(el); } }); observer.observe(document.body, { childList: true, subtree: true }); setTimeout(() => { observer.disconnect(); reject(new Error(`Timeout waiting for ${selector}`)); }, timeout); });
|
||||
try {
|
||||
const userInput = await waitForElement('input[name*="user"], input[name*="email"]');
|
||||
userInput.value = 'USER_EMAIL';
|
||||
userInput.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
const passInput = await waitForElement('input[name*="pass"], input[type="password"]');
|
||||
passInput.value = 'USER_PASS';
|
||||
passInput.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
const submitButton = await waitForElement('button[type="submit"]');
|
||||
submitButton.click();
|
||||
await waitForElement('[data-testid="user-dashboard"], #dashboard, .account-page');
|
||||
} catch (e) {
|
||||
console.error('Login script failed:', e.message);
|
||||
}
|
||||
})();
|
||||
```
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## The Art of High-Specificity Selectors: Your Defense Against Ambiguity
|
||||
|
||||
This is your most critical skill for ensuring robustness. **You must assume the provided HTML is only a small fragment of the entire page.** A selector that looks unique in the fragment could be disastrously generic on the full page. Your primary defense is to **anchor your selectors to the most specific, stable parent element available in the given HTML context.**
|
||||
|
||||
Think of it as creating a "sandbox" for your selectors.
|
||||
|
||||
**Your Guiding Principle:** Start from a unique parent, then find the child.
|
||||
|
||||
### Scenario: Selecting a Submit Button within a Login Form
|
||||
|
||||
**HTML Snippet Provided:**
|
||||
```html
|
||||
<div class="user-auth-module" id="login-widget">
|
||||
<h2>Member Login</h2>
|
||||
<form action="/login">
|
||||
<input name="email" type="email">
|
||||
<input name="password" type="password">
|
||||
<button type="submit">Sign In</button>
|
||||
</form>
|
||||
</div>
|
||||
```
|
||||
|
||||
* **TERRIBLE (High Risk):** `button[type="submit"]`
|
||||
* **Why it's bad:** There could be dozens of other forms on the full page (e.g., a newsletter signup, a search bar in the header). This selector is a shot in the dark.
|
||||
|
||||
* **BETTER (Lower Risk):** `#login-widget button[type="submit"]`
|
||||
* **Why it's better:** It's anchored to a unique ID (`#login-widget`). This dramatically reduces the chance of ambiguity.
|
||||
|
||||
* **EXCELLENT (Minimal Risk):** `div[id="login-widget"] form button[type="submit"]`
|
||||
* **Why it's best:** This is a highly specific, descriptive path. It says, "Find the login widget, then the form inside it, and then the submit button inside *that* form." It is virtually guaranteed to be unique and is resilient to minor layout changes within the form.
|
||||
|
||||
### Scenario: Selecting a "Add to Cart" Button
|
||||
|
||||
**HTML Snippet Provided:**
|
||||
```html
|
||||
<section data-testid="product-details-main">
|
||||
<h1>Awesome T-Shirt</h1>
|
||||
<div class="product-actions">
|
||||
<button class="add-to-cart-btn">Add to Cart</button>
|
||||
</div>
|
||||
</section>
|
||||
```
|
||||
|
||||
* **TERRIBLE (High Risk):** `.add-to-cart-btn`
|
||||
* **Why it's bad:** A "related products" section outside this snippet might also use the same class name.
|
||||
|
||||
* **EXCELLENT (Minimal Risk):** `[data-testid="product-details-main"] .add-to-cart-btn`
|
||||
* **Why it's best:** It uses the stable `data-testid` attribute of the parent section as an anchor. This is the most robust pattern.
|
||||
|
||||
**Your Mandate:** Always examine the provided HTML for a stable, unique parent (like an element with an `id`, a `data-testid`, or a highly specific combination of classes) and use it as the root of your selectors. **NEVER generate a generic, un-anchored selector if a better, more specific parent is available in the context.**
|
||||
|
||||
|
||||
────────────────────────────────────────────────────────
|
||||
## Final Output Mandate
|
||||
|
||||
1. **CODE ONLY.** Your entire response must be the script body.
|
||||
2. **NO CHAT.** Do not say "Here is the script" or "This should work."
|
||||
3. **NO MARKDOWN.** Do not wrap your code in ` ``` ` fences.
|
||||
4. **NO COMMENTS.** Do not add comments to the final code output, except within the logic where it's a best practice.
|
||||
5. **SYNTACTICALLY PERFECT.** The script must be a single, self-contained block, immediately executable. Wrap it in `(async () => { ... })();`.
|
||||
6. **UTF-8, STANDARD QUOTES.** Use `'` for string literals, not `“` or `”`.
|
||||
|
||||
You are an engine of automation. Now, receive the user's request and produce the optimal JavaScript."""
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,9 @@ from itertools import cycle
|
||||
import os
|
||||
|
||||
|
||||
########### ATTENTION PEOPLE OF EARTH ###########
|
||||
# I have moved this config to async_configs.py, kept it here, in case someone still importing it, however
|
||||
# be a dear and follow `from crawl4ai import ProxyConfig` instead :)
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
@@ -119,12 +122,12 @@ class ProxyRotationStrategy(ABC):
|
||||
"""Base abstract class for proxy rotation strategies"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_next_proxy(self) -> Optional[Dict]:
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy configuration from the strategy"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_proxies(self, proxies: List[Dict]):
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add proxy configurations to the strategy"""
|
||||
pass
|
||||
|
||||
|
||||
35
crawl4ai/script/__init__.py
Normal file
35
crawl4ai/script/__init__.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
C4A-Script: A domain-specific language for web automation in Crawl4AI
|
||||
"""
|
||||
|
||||
from .c4a_compile import C4ACompiler, compile, validate, compile_file
|
||||
from .c4a_result import (
|
||||
CompilationResult,
|
||||
ValidationResult,
|
||||
ErrorDetail,
|
||||
WarningDetail,
|
||||
ErrorType,
|
||||
Severity,
|
||||
Suggestion
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Main compiler
|
||||
"C4ACompiler",
|
||||
|
||||
# Convenience functions
|
||||
"compile",
|
||||
"validate",
|
||||
"compile_file",
|
||||
|
||||
# Result types
|
||||
"CompilationResult",
|
||||
"ValidationResult",
|
||||
"ErrorDetail",
|
||||
"WarningDetail",
|
||||
|
||||
# Enums
|
||||
"ErrorType",
|
||||
"Severity",
|
||||
"Suggestion"
|
||||
]
|
||||
398
crawl4ai/script/c4a_compile.py
Normal file
398
crawl4ai/script/c4a_compile.py
Normal file
@@ -0,0 +1,398 @@
|
||||
"""
|
||||
Clean C4A-Script API with Result pattern
|
||||
No exceptions - always returns results
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import pathlib
|
||||
import re
|
||||
from typing import Union, List, Optional
|
||||
|
||||
# JSON_SCHEMA_BUILDER is still used elsewhere,
|
||||
# but we now also need the new script-builder prompt.
|
||||
from ..prompts import GENERATE_JS_SCRIPT_PROMPT, GENERATE_SCRIPT_PROMPT
|
||||
import logging
|
||||
import re
|
||||
|
||||
from .c4a_result import (
|
||||
CompilationResult, ValidationResult, ErrorDetail, WarningDetail,
|
||||
ErrorType, Severity, Suggestion
|
||||
)
|
||||
from .c4ai_script import Compiler
|
||||
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
|
||||
from ..async_configs import LLMConfig
|
||||
from ..utils import perform_completion_with_backoff
|
||||
|
||||
|
||||
class C4ACompiler:
|
||||
"""Main compiler with result-based API"""
|
||||
|
||||
# Error code mapping
|
||||
ERROR_CODES = {
|
||||
"missing_then": "E001",
|
||||
"missing_paren": "E002",
|
||||
"missing_comma": "E003",
|
||||
"missing_endproc": "E004",
|
||||
"undefined_proc": "E005",
|
||||
"missing_backticks": "E006",
|
||||
"invalid_command": "E007",
|
||||
"syntax_error": "E999"
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def compile(cls, script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
|
||||
"""
|
||||
Compile C4A-Script to JavaScript
|
||||
|
||||
Args:
|
||||
script: C4A-Script as string or list of lines
|
||||
root: Root directory for includes
|
||||
|
||||
Returns:
|
||||
CompilationResult with success status and JS code or errors
|
||||
"""
|
||||
# Normalize input
|
||||
if isinstance(script, list):
|
||||
script_text = '\n'.join(script)
|
||||
script_lines = script
|
||||
else:
|
||||
script_text = script
|
||||
script_lines = script.split('\n')
|
||||
|
||||
try:
|
||||
# Try compilation
|
||||
compiler = Compiler(root)
|
||||
js_code = compiler.compile(script_text)
|
||||
|
||||
# Success!
|
||||
result = CompilationResult(
|
||||
success=True,
|
||||
js_code=js_code,
|
||||
metadata={
|
||||
"lineCount": len(script_lines),
|
||||
"statementCount": len(js_code)
|
||||
}
|
||||
)
|
||||
|
||||
# Add any warnings (future feature)
|
||||
# result.warnings = cls._check_warnings(script_text)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Convert exception to ErrorDetail
|
||||
error = cls._exception_to_error(e, script_lines)
|
||||
return CompilationResult(
|
||||
success=False,
|
||||
errors=[error],
|
||||
metadata={
|
||||
"lineCount": len(script_lines)
|
||||
}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate(cls, script: Union[str, List[str]]) -> ValidationResult:
|
||||
"""
|
||||
Validate script syntax without generating code
|
||||
|
||||
Args:
|
||||
script: C4A-Script to validate
|
||||
|
||||
Returns:
|
||||
ValidationResult with validity status and any errors
|
||||
"""
|
||||
result = cls.compile(script)
|
||||
|
||||
return ValidationResult(
|
||||
valid=result.success,
|
||||
errors=result.errors,
|
||||
warnings=result.warnings
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def compile_file(cls, path: Union[str, pathlib.Path]) -> CompilationResult:
|
||||
"""
|
||||
Compile a C4A-Script file
|
||||
|
||||
Args:
|
||||
path: Path to the file
|
||||
|
||||
Returns:
|
||||
CompilationResult
|
||||
"""
|
||||
path = pathlib.Path(path)
|
||||
|
||||
if not path.exists():
|
||||
error = ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code="E100",
|
||||
severity=Severity.ERROR,
|
||||
message=f"File not found: {path}",
|
||||
line=0,
|
||||
column=0,
|
||||
source_line=""
|
||||
)
|
||||
return CompilationResult(success=False, errors=[error])
|
||||
|
||||
try:
|
||||
script = path.read_text()
|
||||
return cls.compile(script, root=path.parent)
|
||||
except Exception as e:
|
||||
error = ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code="E101",
|
||||
severity=Severity.ERROR,
|
||||
message=f"Error reading file: {str(e)}",
|
||||
line=0,
|
||||
column=0,
|
||||
source_line=""
|
||||
)
|
||||
return CompilationResult(success=False, errors=[error])
|
||||
|
||||
@classmethod
|
||||
def _exception_to_error(cls, exc: Exception, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Convert an exception to ErrorDetail"""
|
||||
|
||||
if isinstance(exc, UnexpectedToken):
|
||||
return cls._handle_unexpected_token(exc, script_lines)
|
||||
elif isinstance(exc, UnexpectedCharacters):
|
||||
return cls._handle_unexpected_chars(exc, script_lines)
|
||||
elif isinstance(exc, ValueError):
|
||||
return cls._handle_value_error(exc, script_lines)
|
||||
else:
|
||||
# Generic error
|
||||
return ErrorDetail(
|
||||
type=ErrorType.SYNTAX,
|
||||
code=cls.ERROR_CODES["syntax_error"],
|
||||
severity=Severity.ERROR,
|
||||
message=str(exc),
|
||||
line=1,
|
||||
column=1,
|
||||
source_line=script_lines[0] if script_lines else ""
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_unexpected_token(cls, exc: UnexpectedToken, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Handle UnexpectedToken errors"""
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
# Get context lines
|
||||
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
|
||||
line_before = script_lines[line - 2] if line > 1 and line <= len(script_lines) + 1 else None
|
||||
line_after = script_lines[line] if 0 < line < len(script_lines) else None
|
||||
|
||||
# Determine error type and suggestions
|
||||
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_then"]
|
||||
message = "Missing 'THEN' keyword after IF condition"
|
||||
suggestions = [
|
||||
Suggestion(
|
||||
"Add 'THEN' after the condition",
|
||||
source_line.replace("CLICK", "THEN CLICK") if source_line else None
|
||||
)
|
||||
]
|
||||
elif exc.token.type == '$END':
|
||||
code = cls.ERROR_CODES["missing_endproc"]
|
||||
message = "Unexpected end of script"
|
||||
suggestions = [
|
||||
Suggestion("Check for missing ENDPROC"),
|
||||
Suggestion("Ensure all procedures are properly closed")
|
||||
]
|
||||
elif 'RPAR' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_paren"]
|
||||
message = "Missing closing parenthesis ')'"
|
||||
suggestions = [
|
||||
Suggestion("Add closing parenthesis at the end of the condition")
|
||||
]
|
||||
elif 'COMMA' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_comma"]
|
||||
message = "Missing comma ',' in command"
|
||||
suggestions = [
|
||||
Suggestion("Add comma between arguments")
|
||||
]
|
||||
else:
|
||||
# Check if this might be missing backticks
|
||||
if exc.token.type == 'NAME' and 'BACKTICK_STRING' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_backticks"]
|
||||
message = "Selector must be wrapped in backticks"
|
||||
suggestions = [
|
||||
Suggestion(
|
||||
"Wrap the selector in backticks",
|
||||
f"`{exc.token.value}`"
|
||||
)
|
||||
]
|
||||
else:
|
||||
code = cls.ERROR_CODES["syntax_error"]
|
||||
message = f"Unexpected '{exc.token.value}'"
|
||||
if exc.expected:
|
||||
expected_list = [str(e) for e in exc.expected if not str(e).startswith('_')][:3]
|
||||
if expected_list:
|
||||
message += f". Expected: {', '.join(expected_list)}"
|
||||
suggestions = []
|
||||
|
||||
return ErrorDetail(
|
||||
type=ErrorType.SYNTAX,
|
||||
code=code,
|
||||
severity=Severity.ERROR,
|
||||
message=message,
|
||||
line=line,
|
||||
column=column,
|
||||
source_line=source_line,
|
||||
line_before=line_before,
|
||||
line_after=line_after,
|
||||
suggestions=suggestions
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_unexpected_chars(cls, exc: UnexpectedCharacters, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Handle UnexpectedCharacters errors"""
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
|
||||
|
||||
# Check for missing backticks
|
||||
if "CLICK" in source_line and column > source_line.find("CLICK"):
|
||||
code = cls.ERROR_CODES["missing_backticks"]
|
||||
message = "Selector must be wrapped in backticks"
|
||||
suggestions = [
|
||||
Suggestion(
|
||||
"Wrap the selector in backticks",
|
||||
re.sub(r'CLICK\s+([^\s]+)', r'CLICK `\1`', source_line)
|
||||
)
|
||||
]
|
||||
else:
|
||||
code = cls.ERROR_CODES["syntax_error"]
|
||||
message = f"Invalid character at position {column}"
|
||||
suggestions = []
|
||||
|
||||
return ErrorDetail(
|
||||
type=ErrorType.SYNTAX,
|
||||
code=code,
|
||||
severity=Severity.ERROR,
|
||||
message=message,
|
||||
line=line,
|
||||
column=column,
|
||||
source_line=source_line,
|
||||
suggestions=suggestions
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_value_error(cls, exc: ValueError, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Handle ValueError (runtime errors)"""
|
||||
message = str(exc)
|
||||
|
||||
# Check for undefined procedure
|
||||
if "Unknown procedure" in message:
|
||||
proc_match = re.search(r"'([^']+)'", message)
|
||||
if proc_match:
|
||||
proc_name = proc_match.group(1)
|
||||
|
||||
# Find the line with the procedure call
|
||||
for i, line in enumerate(script_lines):
|
||||
if proc_name in line and not line.strip().startswith('PROC'):
|
||||
return ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code=cls.ERROR_CODES["undefined_proc"],
|
||||
severity=Severity.ERROR,
|
||||
message=f"Undefined procedure '{proc_name}'",
|
||||
line=i + 1,
|
||||
column=line.find(proc_name) + 1,
|
||||
source_line=line,
|
||||
suggestions=[
|
||||
Suggestion(
|
||||
f"Define the procedure before using it",
|
||||
f"PROC {proc_name}\n # commands here\nENDPROC"
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
# Generic runtime error
|
||||
return ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code="E999",
|
||||
severity=Severity.ERROR,
|
||||
message=message,
|
||||
line=1,
|
||||
column=1,
|
||||
source_line=script_lines[0] if script_lines else ""
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def generate_script(
|
||||
html: str,
|
||||
query: str | None = None,
|
||||
mode: str = "c4a",
|
||||
llm_config: LLMConfig | None = None,
|
||||
**completion_kwargs,
|
||||
) -> str:
|
||||
"""
|
||||
One-shot helper that calls the LLM exactly once to convert a
|
||||
natural-language goal + HTML snippet into either:
|
||||
|
||||
1. raw JavaScript (`mode="js"`)
|
||||
2. Crawl4ai DSL (`mode="c4a"`)
|
||||
|
||||
The returned string is guaranteed to be free of markdown wrappers
|
||||
or explanatory text, ready for direct execution.
|
||||
"""
|
||||
if llm_config is None:
|
||||
llm_config = LLMConfig() # falls back to env vars / defaults
|
||||
|
||||
# Build the user chunk
|
||||
user_prompt = "\n".join(
|
||||
[
|
||||
"## GOAL",
|
||||
"<<goael>>",
|
||||
(query or "Prepare the page for crawling."),
|
||||
"<</goal>>",
|
||||
"",
|
||||
"## HTML",
|
||||
"<<html>>",
|
||||
html[:100000], # guardrail against token blast
|
||||
"<</html>>",
|
||||
"",
|
||||
"## MODE",
|
||||
mode,
|
||||
]
|
||||
)
|
||||
|
||||
# Call the LLM with retry/back-off logic
|
||||
full_prompt = f"{GENERATE_SCRIPT_PROMPT}\n\n{user_prompt}" if mode == "c4a" else f"{GENERATE_JS_SCRIPT_PROMPT}\n\n{user_prompt}"
|
||||
|
||||
response = perform_completion_with_backoff(
|
||||
provider=llm_config.provider,
|
||||
prompt_with_variables=full_prompt,
|
||||
api_token=llm_config.api_token,
|
||||
json_response=False,
|
||||
base_url=getattr(llm_config, 'base_url', None),
|
||||
**completion_kwargs,
|
||||
)
|
||||
|
||||
# Extract content from the response
|
||||
raw_response = response.choices[0].message.content.strip()
|
||||
|
||||
# Strip accidental markdown fences (```js … ```)
|
||||
clean = re.sub(r"^```(?:[a-zA-Z0-9_-]+)?\s*|```$", "", raw_response, flags=re.MULTILINE).strip()
|
||||
|
||||
if not clean:
|
||||
raise RuntimeError("LLM returned empty script.")
|
||||
|
||||
return clean
|
||||
|
||||
|
||||
# Convenience functions for direct use
|
||||
def compile(script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
|
||||
"""Compile C4A-Script to JavaScript"""
|
||||
return C4ACompiler.compile(script, root)
|
||||
|
||||
|
||||
def validate(script: Union[str, List[str]]) -> ValidationResult:
|
||||
"""Validate C4A-Script syntax"""
|
||||
return C4ACompiler.validate(script)
|
||||
|
||||
|
||||
def compile_file(path: Union[str, pathlib.Path]) -> CompilationResult:
|
||||
"""Compile C4A-Script file"""
|
||||
return C4ACompiler.compile_file(path)
|
||||
219
crawl4ai/script/c4a_result.py
Normal file
219
crawl4ai/script/c4a_result.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
Result classes for C4A-Script compilation
|
||||
Clean API design with no exceptions
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import List, Dict, Any, Optional
|
||||
import json
|
||||
|
||||
|
||||
class ErrorType(Enum):
|
||||
SYNTAX = "syntax"
|
||||
SEMANTIC = "semantic"
|
||||
RUNTIME = "runtime"
|
||||
|
||||
|
||||
class Severity(Enum):
|
||||
ERROR = "error"
|
||||
WARNING = "warning"
|
||||
INFO = "info"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Suggestion:
|
||||
"""A suggestion for fixing an error"""
|
||||
message: str
|
||||
fix: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"message": self.message,
|
||||
"fix": self.fix
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ErrorDetail:
|
||||
"""Detailed information about a compilation error"""
|
||||
# Core info
|
||||
type: ErrorType
|
||||
code: str # E001, E002, etc.
|
||||
severity: Severity
|
||||
message: str
|
||||
|
||||
# Location
|
||||
line: int
|
||||
column: int
|
||||
|
||||
# Context
|
||||
source_line: str
|
||||
|
||||
# Optional fields with defaults
|
||||
end_line: Optional[int] = None
|
||||
end_column: Optional[int] = None
|
||||
line_before: Optional[str] = None
|
||||
line_after: Optional[str] = None
|
||||
|
||||
# Help
|
||||
suggestions: List[Suggestion] = field(default_factory=list)
|
||||
documentation_url: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
return {
|
||||
"type": self.type.value,
|
||||
"code": self.code,
|
||||
"severity": self.severity.value,
|
||||
"message": self.message,
|
||||
"location": {
|
||||
"line": self.line,
|
||||
"column": self.column,
|
||||
"endLine": self.end_line,
|
||||
"endColumn": self.end_column
|
||||
},
|
||||
"context": {
|
||||
"sourceLine": self.source_line,
|
||||
"lineBefore": self.line_before,
|
||||
"lineAfter": self.line_after,
|
||||
"marker": {
|
||||
"start": self.column - 1,
|
||||
"length": (self.end_column - self.column) if self.end_column else 1
|
||||
}
|
||||
},
|
||||
"suggestions": [s.to_dict() for s in self.suggestions],
|
||||
"documentationUrl": self.documentation_url
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Convert to JSON string"""
|
||||
return json.dumps(self.to_dict(), indent=2)
|
||||
|
||||
@property
|
||||
def formatted_message(self) -> str:
|
||||
"""Returns the nice text format for terminals"""
|
||||
lines = []
|
||||
lines.append(f"\n{'='*60}")
|
||||
lines.append(f"{self.type.value.title()} Error [{self.code}]")
|
||||
lines.append(f"{'='*60}")
|
||||
lines.append(f"Location: Line {self.line}, Column {self.column}")
|
||||
lines.append(f"Error: {self.message}")
|
||||
|
||||
if self.source_line:
|
||||
marker = " " * (self.column - 1) + "^"
|
||||
if self.end_column:
|
||||
marker += "~" * (self.end_column - self.column - 1)
|
||||
lines.append(f"\nCode:")
|
||||
if self.line_before:
|
||||
lines.append(f" {self.line - 1: >3} | {self.line_before}")
|
||||
lines.append(f" {self.line: >3} | {self.source_line}")
|
||||
lines.append(f" | {marker}")
|
||||
if self.line_after:
|
||||
lines.append(f" {self.line + 1: >3} | {self.line_after}")
|
||||
|
||||
if self.suggestions:
|
||||
lines.append("\nSuggestions:")
|
||||
for i, suggestion in enumerate(self.suggestions, 1):
|
||||
lines.append(f" {i}. {suggestion.message}")
|
||||
if suggestion.fix:
|
||||
lines.append(f" Fix: {suggestion.fix}")
|
||||
|
||||
lines.append("="*60)
|
||||
return "\n".join(lines)
|
||||
|
||||
@property
|
||||
def simple_message(self) -> str:
|
||||
"""Returns just the error message without formatting"""
|
||||
return f"Line {self.line}: {self.message}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class WarningDetail:
|
||||
"""Information about a compilation warning"""
|
||||
code: str
|
||||
message: str
|
||||
line: int
|
||||
column: int
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"code": self.code,
|
||||
"message": self.message,
|
||||
"line": self.line,
|
||||
"column": self.column
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompilationResult:
|
||||
"""Result of C4A-Script compilation"""
|
||||
success: bool
|
||||
js_code: Optional[List[str]] = None
|
||||
errors: List[ErrorDetail] = field(default_factory=list)
|
||||
warnings: List[WarningDetail] = field(default_factory=list)
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
return {
|
||||
"success": self.success,
|
||||
"jsCode": self.js_code,
|
||||
"errors": [e.to_dict() for e in self.errors],
|
||||
"warnings": [w.to_dict() for w in self.warnings],
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Convert to JSON string"""
|
||||
return json.dumps(self.to_dict(), indent=2)
|
||||
|
||||
@property
|
||||
def has_errors(self) -> bool:
|
||||
"""Check if there are any errors"""
|
||||
return len(self.errors) > 0
|
||||
|
||||
@property
|
||||
def has_warnings(self) -> bool:
|
||||
"""Check if there are any warnings"""
|
||||
return len(self.warnings) > 0
|
||||
|
||||
@property
|
||||
def first_error(self) -> Optional[ErrorDetail]:
|
||||
"""Get the first error if any"""
|
||||
return self.errors[0] if self.errors else None
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation for debugging"""
|
||||
if self.success:
|
||||
msg = f"✓ Compilation successful"
|
||||
if self.js_code:
|
||||
msg += f" - {len(self.js_code)} statements generated"
|
||||
if self.warnings:
|
||||
msg += f" ({len(self.warnings)} warnings)"
|
||||
return msg
|
||||
else:
|
||||
return f"✗ Compilation failed - {len(self.errors)} error(s)"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result of script validation"""
|
||||
valid: bool
|
||||
errors: List[ErrorDetail] = field(default_factory=list)
|
||||
warnings: List[WarningDetail] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"valid": self.valid,
|
||||
"errors": [e.to_dict() for e in self.errors],
|
||||
"warnings": [w.to_dict() for w in self.warnings]
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
return json.dumps(self.to_dict(), indent=2)
|
||||
|
||||
@property
|
||||
def first_error(self) -> Optional[ErrorDetail]:
|
||||
return self.errors[0] if self.errors else None
|
||||
690
crawl4ai/script/c4ai_script.py
Normal file
690
crawl4ai/script/c4ai_script.py
Normal file
@@ -0,0 +1,690 @@
|
||||
"""
|
||||
2025-06-03
|
||||
By Unclcode:
|
||||
C4A-Script Language Documentation
|
||||
Feeds Crawl4AI via CrawlerRunConfig(js_code=[ ... ]) – no core modifications.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import pathlib, re, sys, textwrap
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from lark import Lark, Transformer, v_args
|
||||
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Custom Error Classes
|
||||
# --------------------------------------------------------------------------- #
|
||||
class C4AScriptError(Exception):
|
||||
"""Custom error class for C4A-Script compilation errors"""
|
||||
|
||||
def __init__(self, message: str, line: int = None, column: int = None,
|
||||
error_type: str = "Syntax Error", details: str = None):
|
||||
self.message = message
|
||||
self.line = line
|
||||
self.column = column
|
||||
self.error_type = error_type
|
||||
self.details = details
|
||||
super().__init__(self._format_message())
|
||||
|
||||
def _format_message(self) -> str:
|
||||
"""Format a clear error message"""
|
||||
lines = [f"\n{'='*60}"]
|
||||
lines.append(f"C4A-Script {self.error_type}")
|
||||
lines.append(f"{'='*60}")
|
||||
|
||||
if self.line:
|
||||
lines.append(f"Location: Line {self.line}" + (f", Column {self.column}" if self.column else ""))
|
||||
|
||||
lines.append(f"Error: {self.message}")
|
||||
|
||||
if self.details:
|
||||
lines.append(f"\nDetails: {self.details}")
|
||||
|
||||
lines.append("="*60)
|
||||
return "\n".join(lines)
|
||||
|
||||
@classmethod
|
||||
def from_exception(cls, exc: Exception, script: Union[str, List[str]]) -> 'C4AScriptError':
|
||||
"""Create C4AScriptError from another exception"""
|
||||
script_text = script if isinstance(script, str) else '\n'.join(script)
|
||||
script_lines = script_text.split('\n')
|
||||
|
||||
if isinstance(exc, UnexpectedToken):
|
||||
# Extract line and column from UnexpectedToken
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
# Get the problematic line
|
||||
if 0 < line <= len(script_lines):
|
||||
problem_line = script_lines[line - 1]
|
||||
marker = " " * (column - 1) + "^"
|
||||
|
||||
details = f"\nCode:\n {problem_line}\n {marker}\n"
|
||||
|
||||
# Improve error message based on context
|
||||
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
|
||||
message = "Missing 'THEN' keyword after IF condition"
|
||||
elif exc.token.type == '$END':
|
||||
message = "Unexpected end of script. Check for missing ENDPROC or incomplete commands"
|
||||
elif 'RPAR' in str(exc.expected):
|
||||
message = "Missing closing parenthesis ')'"
|
||||
elif 'COMMA' in str(exc.expected):
|
||||
message = "Missing comma ',' in command"
|
||||
else:
|
||||
message = f"Unexpected '{exc.token}'"
|
||||
if exc.expected:
|
||||
expected_list = [str(e) for e in exc.expected if not e.startswith('_')]
|
||||
if expected_list:
|
||||
message += f". Expected: {', '.join(expected_list[:3])}"
|
||||
|
||||
details += f"Token: {exc.token.type} ('{exc.token.value}')"
|
||||
else:
|
||||
message = str(exc)
|
||||
details = None
|
||||
|
||||
return cls(message, line, column, "Syntax Error", details)
|
||||
|
||||
elif isinstance(exc, UnexpectedCharacters):
|
||||
# Extract line and column
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
if 0 < line <= len(script_lines):
|
||||
problem_line = script_lines[line - 1]
|
||||
marker = " " * (column - 1) + "^"
|
||||
|
||||
details = f"\nCode:\n {problem_line}\n {marker}\n"
|
||||
message = f"Invalid character or unexpected text at position {column}"
|
||||
else:
|
||||
message = str(exc)
|
||||
details = None
|
||||
|
||||
return cls(message, line, column, "Syntax Error", details)
|
||||
|
||||
elif isinstance(exc, ValueError):
|
||||
# Handle runtime errors like undefined procedures
|
||||
message = str(exc)
|
||||
|
||||
# Try to find which line caused the error
|
||||
if "Unknown procedure" in message:
|
||||
proc_name = re.search(r"'([^']+)'", message)
|
||||
if proc_name:
|
||||
proc_name = proc_name.group(1)
|
||||
for i, line in enumerate(script_lines, 1):
|
||||
if proc_name in line and not line.strip().startswith('PROC'):
|
||||
details = f"\nCode:\n {line.strip()}\n\nMake sure the procedure '{proc_name}' is defined with PROC...ENDPROC"
|
||||
return cls(f"Undefined procedure '{proc_name}'", i, None, "Runtime Error", details)
|
||||
|
||||
return cls(message, None, None, "Runtime Error", None)
|
||||
|
||||
else:
|
||||
# Generic error
|
||||
return cls(str(exc), None, None, "Compilation Error", None)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 1. Grammar
|
||||
# --------------------------------------------------------------------------- #
|
||||
GRAMMAR = r"""
|
||||
start : line*
|
||||
?line : command | proc_def | include | comment
|
||||
|
||||
command : wait | nav | click_cmd | double_click | right_click | move | drag | scroll
|
||||
| type | clear | set_input | press | key_down | key_up
|
||||
| eval_cmd | setvar | proc_call | if_cmd | repeat_cmd
|
||||
|
||||
wait : "WAIT" (ESCAPED_STRING|BACKTICK_STRING|NUMBER) NUMBER? -> wait_cmd
|
||||
nav : "GO" URL -> go
|
||||
| "RELOAD" -> reload
|
||||
| "BACK" -> back
|
||||
| "FORWARD" -> forward
|
||||
|
||||
click_cmd : "CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> click
|
||||
double_click : "DOUBLE_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> double_click
|
||||
right_click : "RIGHT_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> right_click
|
||||
|
||||
move : "MOVE" coords -> move
|
||||
drag : "DRAG" coords coords -> drag
|
||||
scroll : "SCROLL" DIR NUMBER? -> scroll
|
||||
|
||||
type : "TYPE" (ESCAPED_STRING | NAME) -> type
|
||||
clear : "CLEAR" BACKTICK_STRING -> clear
|
||||
set_input : "SET" BACKTICK_STRING (ESCAPED_STRING | BACKTICK_STRING | NAME) -> set_input
|
||||
press : "PRESS" WORD -> press
|
||||
key_down : "KEY_DOWN" WORD -> key_down
|
||||
key_up : "KEY_UP" WORD -> key_up
|
||||
|
||||
eval_cmd : "EVAL" BACKTICK_STRING -> eval_cmd
|
||||
setvar : "SETVAR" NAME "=" value -> setvar
|
||||
proc_call : NAME -> proc_call
|
||||
proc_def : "PROC" NAME line* "ENDPROC" -> proc_def
|
||||
include : "USE" ESCAPED_STRING -> include
|
||||
comment : /#.*/ -> comment
|
||||
|
||||
if_cmd : "IF" "(" condition ")" "THEN" command ("ELSE" command)? -> if_cmd
|
||||
repeat_cmd : "REPEAT" "(" command "," repeat_count ")" -> repeat_cmd
|
||||
|
||||
condition : not_cond | exists_cond | js_cond
|
||||
not_cond : "NOT" condition -> not_cond
|
||||
exists_cond : "EXISTS" BACKTICK_STRING -> exists_cond
|
||||
js_cond : BACKTICK_STRING -> js_cond
|
||||
|
||||
repeat_count : NUMBER | BACKTICK_STRING
|
||||
|
||||
coords : NUMBER NUMBER
|
||||
value : ESCAPED_STRING | BACKTICK_STRING | NUMBER
|
||||
DIR : /(UP|DOWN|LEFT|RIGHT)/i
|
||||
REST : /[^\n]+/
|
||||
|
||||
URL : /(http|https):\/\/[^\s]+/
|
||||
NAME : /\$?[A-Za-z_][A-Za-z0-9_]*/
|
||||
WORD : /[A-Za-z0-9+]+/
|
||||
BACKTICK_STRING : /`[^`]*`/
|
||||
|
||||
%import common.NUMBER
|
||||
%import common.ESCAPED_STRING
|
||||
%import common.WS_INLINE
|
||||
%import common.NEWLINE
|
||||
%ignore WS_INLINE
|
||||
%ignore NEWLINE
|
||||
"""
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 2. IR dataclasses
|
||||
# --------------------------------------------------------------------------- #
|
||||
@dataclass
|
||||
class Cmd:
|
||||
op: str
|
||||
args: List[Any]
|
||||
|
||||
@dataclass
|
||||
class Proc:
|
||||
name: str
|
||||
body: List[Cmd]
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 3. AST → IR
|
||||
# --------------------------------------------------------------------------- #
|
||||
@v_args(inline=True)
|
||||
class ASTBuilder(Transformer):
|
||||
# helpers
|
||||
def _strip(self, s):
|
||||
if s.startswith('"') and s.endswith('"'):
|
||||
return s[1:-1]
|
||||
elif s.startswith('`') and s.endswith('`'):
|
||||
return s[1:-1]
|
||||
return s
|
||||
def start(self,*i): return list(i)
|
||||
def line(self,i): return i
|
||||
def command(self,i): return i
|
||||
|
||||
# WAIT
|
||||
def wait_cmd(self, rest, timeout=None):
|
||||
rest_str = str(rest)
|
||||
# Check if it's a number (including floats)
|
||||
try:
|
||||
num_val = float(rest_str)
|
||||
payload = (num_val, "seconds")
|
||||
except ValueError:
|
||||
if rest_str.startswith('"') and rest_str.endswith('"'):
|
||||
payload = (self._strip(rest_str), "text")
|
||||
elif rest_str.startswith('`') and rest_str.endswith('`'):
|
||||
payload = (self._strip(rest_str), "selector")
|
||||
else:
|
||||
payload = (rest_str, "selector")
|
||||
return Cmd("WAIT", [payload, int(timeout) if timeout else None])
|
||||
|
||||
# NAV
|
||||
def go(self,u): return Cmd("GO",[str(u)])
|
||||
def reload(self): return Cmd("RELOAD",[])
|
||||
def back(self): return Cmd("BACK",[])
|
||||
def forward(self): return Cmd("FORWARD",[])
|
||||
|
||||
# CLICK, DOUBLE_CLICK, RIGHT_CLICK
|
||||
def click(self, *args):
|
||||
return self._handle_click("CLICK", args)
|
||||
|
||||
def double_click(self, *args):
|
||||
return self._handle_click("DBLCLICK", args)
|
||||
|
||||
def right_click(self, *args):
|
||||
return self._handle_click("RIGHTCLICK", args)
|
||||
|
||||
def _handle_click(self, op, args):
|
||||
if len(args) == 1:
|
||||
# Single argument - backtick string
|
||||
target = self._strip(str(args[0]))
|
||||
return Cmd(op, [("selector", target)])
|
||||
else:
|
||||
# Two arguments - coordinates
|
||||
x, y = args
|
||||
return Cmd(op, [("coords", int(x), int(y))])
|
||||
|
||||
|
||||
# MOVE / DRAG / SCROLL
|
||||
def coords(self,x,y): return ("coords",int(x),int(y))
|
||||
def move(self,c): return Cmd("MOVE",[c])
|
||||
def drag(self,c1,c2): return Cmd("DRAG",[c1,c2])
|
||||
def scroll(self,dir_tok,amt=None):
|
||||
return Cmd("SCROLL",[dir_tok.upper(), int(amt) if amt else 500])
|
||||
|
||||
# KEYS
|
||||
def type(self,tok): return Cmd("TYPE",[self._strip(str(tok))])
|
||||
def clear(self,sel): return Cmd("CLEAR",[self._strip(str(sel))])
|
||||
def set_input(self,sel,val): return Cmd("SET",[self._strip(str(sel)), self._strip(str(val))])
|
||||
def press(self,w): return Cmd("PRESS",[str(w)])
|
||||
def key_down(self,w): return Cmd("KEYDOWN",[str(w)])
|
||||
def key_up(self,w): return Cmd("KEYUP",[str(w)])
|
||||
|
||||
# FLOW
|
||||
def eval_cmd(self,txt): return Cmd("EVAL",[self._strip(str(txt))])
|
||||
def setvar(self,n,v):
|
||||
# v might be a Token or a Tree, extract value properly
|
||||
if hasattr(v, 'value'):
|
||||
value = v.value
|
||||
elif hasattr(v, 'children') and len(v.children) > 0:
|
||||
value = v.children[0].value
|
||||
else:
|
||||
value = str(v)
|
||||
return Cmd("SETVAR",[str(n), self._strip(value)])
|
||||
def proc_call(self,n): return Cmd("CALL",[str(n)])
|
||||
def proc_def(self,n,*body): return Proc(str(n),[b for b in body if isinstance(b,Cmd)])
|
||||
def include(self,p): return Cmd("INCLUDE",[self._strip(p)])
|
||||
def comment(self,*_): return Cmd("NOP",[])
|
||||
|
||||
# IF-THEN-ELSE and EXISTS
|
||||
def if_cmd(self, condition, then_cmd, else_cmd=None):
|
||||
return Cmd("IF", [condition, then_cmd, else_cmd])
|
||||
|
||||
def condition(self, cond):
|
||||
return cond
|
||||
|
||||
def not_cond(self, cond):
|
||||
return ("NOT", cond)
|
||||
|
||||
def exists_cond(self, selector):
|
||||
return ("EXISTS", self._strip(str(selector)))
|
||||
|
||||
def js_cond(self, expr):
|
||||
return ("JS", self._strip(str(expr)))
|
||||
|
||||
# REPEAT
|
||||
def repeat_cmd(self, cmd, count):
|
||||
return Cmd("REPEAT", [cmd, count])
|
||||
|
||||
def repeat_count(self, value):
|
||||
return str(value)
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 4. Compiler
|
||||
# --------------------------------------------------------------------------- #
|
||||
class Compiler:
|
||||
def __init__(self, root: pathlib.Path|None=None):
|
||||
self.parser = Lark(GRAMMAR,start="start",parser="lalr")
|
||||
self.root = pathlib.Path(root or ".").resolve()
|
||||
self.vars: Dict[str,Any] = {}
|
||||
self.procs: Dict[str,Proc]= {}
|
||||
|
||||
def compile(self, text: Union[str, List[str]]) -> List[str]:
|
||||
# Handle list input by joining with newlines
|
||||
if isinstance(text, list):
|
||||
text = '\n'.join(text)
|
||||
|
||||
ir = self._parse_with_includes(text)
|
||||
ir = self._collect_procs(ir)
|
||||
ir = self._inline_calls(ir)
|
||||
ir = self._apply_set_vars(ir)
|
||||
return [self._emit_js(c) for c in ir if isinstance(c,Cmd) and c.op!="NOP"]
|
||||
|
||||
# passes
|
||||
def _parse_with_includes(self,txt,seen=None):
|
||||
seen=seen or set()
|
||||
cmds=ASTBuilder().transform(self.parser.parse(txt))
|
||||
out=[]
|
||||
for c in cmds:
|
||||
if isinstance(c,Cmd) and c.op=="INCLUDE":
|
||||
p=(self.root/c.args[0]).resolve()
|
||||
if p in seen: raise ValueError(f"Circular include {p}")
|
||||
seen.add(p); out+=self._parse_with_includes(p.read_text(),seen)
|
||||
else: out.append(c)
|
||||
return out
|
||||
|
||||
def _collect_procs(self,ir):
|
||||
out=[]
|
||||
for i in ir:
|
||||
if isinstance(i,Proc): self.procs[i.name]=i
|
||||
else: out.append(i)
|
||||
return out
|
||||
|
||||
def _inline_calls(self,ir):
|
||||
out=[]
|
||||
for c in ir:
|
||||
if isinstance(c,Cmd) and c.op=="CALL":
|
||||
if c.args[0] not in self.procs:
|
||||
raise ValueError(f"Unknown procedure {c.args[0]!r}")
|
||||
out+=self._inline_calls(self.procs[c.args[0]].body)
|
||||
else: out.append(c)
|
||||
return out
|
||||
|
||||
def _apply_set_vars(self,ir):
|
||||
def sub(s): return re.sub(r"\$(\w+)",lambda m:str(self.vars.get(m.group(1),m.group(0))) ,s) if isinstance(s,str) else s
|
||||
out=[]
|
||||
for c in ir:
|
||||
if isinstance(c,Cmd):
|
||||
if c.op=="SETVAR":
|
||||
# Store variable
|
||||
self.vars[c.args[0].lstrip('$')]=c.args[1]
|
||||
else:
|
||||
# Apply variable substitution to commands that use them
|
||||
if c.op in("TYPE","EVAL","SET"): c.args=[sub(a) for a in c.args]
|
||||
out.append(c)
|
||||
return out
|
||||
|
||||
# JS emitter
|
||||
def _emit_js(self, cmd: Cmd) -> str:
|
||||
op, a = cmd.op, cmd.args
|
||||
if op == "GO": return f"window.location.href = '{a[0]}';"
|
||||
if op == "RELOAD": return "window.location.reload();"
|
||||
if op == "BACK": return "window.history.back();"
|
||||
if op == "FORWARD": return "window.history.forward();"
|
||||
|
||||
if op == "WAIT":
|
||||
arg, kind = a[0]
|
||||
timeout = a[1] or 10
|
||||
if kind == "seconds":
|
||||
return f"await new Promise(r=>setTimeout(r,{arg}*1000));"
|
||||
if kind == "selector":
|
||||
sel = arg.replace("\\","\\\\").replace("'","\\'")
|
||||
return textwrap.dedent(f"""
|
||||
await new Promise((res,rej)=>{{
|
||||
const max = {timeout*1000}, t0 = performance.now();
|
||||
const id = setInterval(()=>{{
|
||||
if(document.querySelector('{sel}')){{clearInterval(id);res();}}
|
||||
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT selector timeout');}}
|
||||
}},100);
|
||||
}});
|
||||
""").strip()
|
||||
if kind == "text":
|
||||
txt = arg.replace('`', '\\`')
|
||||
return textwrap.dedent(f"""
|
||||
await new Promise((res,rej)=>{{
|
||||
const max={timeout*1000},t0=performance.now();
|
||||
const id=setInterval(()=>{{
|
||||
if(document.body.innerText.includes(`{txt}`)){{clearInterval(id);res();}}
|
||||
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT text timeout');}}
|
||||
}},100);
|
||||
}});
|
||||
""").strip()
|
||||
|
||||
# click-style helpers
|
||||
def _js_click(sel, evt="click", button=0, detail=1):
|
||||
sel = sel.replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.querySelector('{sel}');
|
||||
if(el){{
|
||||
el.focus&&el.focus();
|
||||
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
def _js_click_xy(x, y, evt="click", button=0, detail=1):
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.elementFromPoint({x},{y});
|
||||
if(el){{
|
||||
el.focus&&el.focus();
|
||||
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op in ("CLICK", "DBLCLICK", "RIGHTCLICK"):
|
||||
evt = {"CLICK":"click","DBLCLICK":"dblclick","RIGHTCLICK":"contextmenu"}[op]
|
||||
btn = 2 if op=="RIGHTCLICK" else 0
|
||||
det = 2 if op=="DBLCLICK" else 1
|
||||
kind,*rest = a[0]
|
||||
return _js_click_xy(*rest) if kind=="coords" else _js_click(rest[0],evt,btn,det)
|
||||
|
||||
if op == "MOVE":
|
||||
_, x, y = a[0]
|
||||
return textwrap.dedent(f"""
|
||||
document.dispatchEvent(new MouseEvent('mousemove',{{clientX:{x},clientY:{y},bubbles:true}}));
|
||||
""").strip()
|
||||
|
||||
if op == "DRAG":
|
||||
(_, x1, y1), (_, x2, y2) = a
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const s=document.elementFromPoint({x1},{y1});
|
||||
if(!s) return;
|
||||
s.dispatchEvent(new MouseEvent('mousedown',{{bubbles:true,clientX:{x1},clientY:{y1}}}));
|
||||
document.dispatchEvent(new MouseEvent('mousemove',{{bubbles:true,clientX:{x2},clientY:{y2}}}));
|
||||
document.dispatchEvent(new MouseEvent('mouseup', {{bubbles:true,clientX:{x2},clientY:{y2}}}));
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "SCROLL":
|
||||
dir_, amt = a
|
||||
dx, dy = {"UP":(0,-amt),"DOWN":(0,amt),"LEFT":(-amt,0),"RIGHT":(amt,0)}[dir_]
|
||||
return f"window.scrollBy({dx},{dy});"
|
||||
|
||||
if op == "TYPE":
|
||||
txt = a[0].replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.activeElement;
|
||||
if(el){{
|
||||
el.value += '{txt}';
|
||||
el.dispatchEvent(new Event('input',{{bubbles:true}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "CLEAR":
|
||||
sel = a[0].replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.querySelector('{sel}');
|
||||
if(el && 'value' in el){{
|
||||
el.value = '';
|
||||
el.dispatchEvent(new Event('input',{{bubbles:true}}));
|
||||
el.dispatchEvent(new Event('change',{{bubbles:true}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "SET" and len(a) == 2:
|
||||
# This is SET for input fields (SET `#field` "value")
|
||||
sel = a[0].replace("'", "\\'")
|
||||
val = a[1].replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.querySelector('{sel}');
|
||||
if(el && 'value' in el){{
|
||||
el.value = '';
|
||||
el.focus&&el.focus();
|
||||
el.value = '{val}';
|
||||
el.dispatchEvent(new Event('input',{{bubbles:true}}));
|
||||
el.dispatchEvent(new Event('change',{{bubbles:true}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op in ("PRESS","KEYDOWN","KEYUP"):
|
||||
key = a[0]
|
||||
evs = {"PRESS":("keydown","keyup"),"KEYDOWN":("keydown",),"KEYUP":("keyup",)}[op]
|
||||
return ";".join([f"document.dispatchEvent(new KeyboardEvent('{e}',{{key:'{key}',bubbles:true}}))" for e in evs]) + ";"
|
||||
|
||||
if op == "EVAL":
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
try {{
|
||||
{a[0]};
|
||||
}} catch (e) {{
|
||||
console.error('C4A-Script EVAL error:', e);
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "IF":
|
||||
condition, then_cmd, else_cmd = a
|
||||
|
||||
# Generate condition JavaScript
|
||||
js_condition = self._emit_condition(condition)
|
||||
|
||||
# Generate commands - handle both regular commands and procedure calls
|
||||
then_js = self._handle_cmd_or_proc(then_cmd)
|
||||
else_js = self._handle_cmd_or_proc(else_cmd) if else_cmd else ""
|
||||
|
||||
if else_cmd:
|
||||
return textwrap.dedent(f"""
|
||||
if ({js_condition}) {{
|
||||
{then_js}
|
||||
}} else {{
|
||||
{else_js}
|
||||
}}
|
||||
""").strip()
|
||||
else:
|
||||
return textwrap.dedent(f"""
|
||||
if ({js_condition}) {{
|
||||
{then_js}
|
||||
}}
|
||||
""").strip()
|
||||
|
||||
if op == "REPEAT":
|
||||
cmd, count = a
|
||||
|
||||
# Handle the count - could be number or JS expression
|
||||
if count.isdigit():
|
||||
# Simple number
|
||||
repeat_js = self._handle_cmd_or_proc(cmd)
|
||||
return textwrap.dedent(f"""
|
||||
for (let _i = 0; _i < {count}; _i++) {{
|
||||
{repeat_js}
|
||||
}}
|
||||
""").strip()
|
||||
else:
|
||||
# JS expression (from backticks)
|
||||
count_expr = count[1:-1] if count.startswith('`') and count.endswith('`') else count
|
||||
repeat_js = self._handle_cmd_or_proc(cmd)
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const _count = {count_expr};
|
||||
if (typeof _count === 'number') {{
|
||||
for (let _i = 0; _i < _count; _i++) {{
|
||||
{repeat_js}
|
||||
}}
|
||||
}} else if (_count) {{
|
||||
{repeat_js}
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
raise ValueError(f"Unhandled op {op}")
|
||||
|
||||
def _emit_condition(self, condition):
|
||||
"""Convert a condition tuple to JavaScript"""
|
||||
cond_type = condition[0]
|
||||
|
||||
if cond_type == "EXISTS":
|
||||
return f"!!document.querySelector('{condition[1]}')"
|
||||
elif cond_type == "NOT":
|
||||
# Recursively handle the negated condition
|
||||
inner_condition = self._emit_condition(condition[1])
|
||||
return f"!({inner_condition})"
|
||||
else: # JS condition
|
||||
return condition[1]
|
||||
|
||||
def _handle_cmd_or_proc(self, cmd):
|
||||
"""Handle a command that might be a regular command or a procedure call"""
|
||||
if not cmd:
|
||||
return ""
|
||||
|
||||
if isinstance(cmd, Cmd):
|
||||
if cmd.op == "CALL":
|
||||
# Inline the procedure
|
||||
if cmd.args[0] not in self.procs:
|
||||
raise ValueError(f"Unknown procedure {cmd.args[0]!r}")
|
||||
proc_body = self.procs[cmd.args[0]].body
|
||||
return "\n".join([self._emit_js(c) for c in proc_body if c.op != "NOP"])
|
||||
else:
|
||||
return self._emit_js(cmd)
|
||||
return ""
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 5. Helpers + demo
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
def compile_string(script: Union[str, List[str]], *, root: Union[pathlib.Path, None] = None) -> List[str]:
|
||||
"""Compile C4A-Script from string or list of strings to JavaScript.
|
||||
|
||||
Args:
|
||||
script: C4A-Script as a string or list of command strings
|
||||
root: Root directory for resolving includes (optional)
|
||||
|
||||
Returns:
|
||||
List of JavaScript command strings
|
||||
|
||||
Raises:
|
||||
C4AScriptError: When compilation fails with detailed error information
|
||||
"""
|
||||
try:
|
||||
return Compiler(root).compile(script)
|
||||
except Exception as e:
|
||||
# Wrap the error with better formatting
|
||||
raise C4AScriptError.from_exception(e, script)
|
||||
|
||||
def compile_file(path: pathlib.Path) -> List[str]:
|
||||
"""Compile C4A-Script from file to JavaScript.
|
||||
|
||||
Args:
|
||||
path: Path to C4A-Script file
|
||||
|
||||
Returns:
|
||||
List of JavaScript command strings
|
||||
"""
|
||||
return compile_string(path.read_text(), root=path.parent)
|
||||
|
||||
def compile_lines(lines: List[str], *, root: Union[pathlib.Path, None] = None) -> List[str]:
|
||||
"""Compile C4A-Script from list of lines to JavaScript.
|
||||
|
||||
Args:
|
||||
lines: List of C4A-Script command lines
|
||||
root: Root directory for resolving includes (optional)
|
||||
|
||||
Returns:
|
||||
List of JavaScript command strings
|
||||
"""
|
||||
return compile_string(lines, root=root)
|
||||
|
||||
DEMO = """
|
||||
# quick sanity demo
|
||||
PROC login
|
||||
SET `input[name="username"]` $user
|
||||
SET `input[name="password"]` $pass
|
||||
CLICK `button.submit`
|
||||
ENDPROC
|
||||
|
||||
SETVAR user = "tom@crawl4ai.com"
|
||||
SETVAR pass = "hunter2"
|
||||
|
||||
GO https://example.com/login
|
||||
WAIT `input[name="username"]` 10
|
||||
login
|
||||
WAIT 3
|
||||
EVAL `console.log('logged in')`
|
||||
"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) == 2:
|
||||
for js in compile_file(pathlib.Path(sys.argv[1])):
|
||||
print(js)
|
||||
else:
|
||||
print("=== DEMO ===")
|
||||
for js in compile_string(DEMO):
|
||||
print(js)
|
||||
@@ -9,83 +9,44 @@ from urllib.parse import urlparse
|
||||
import OpenSSL.crypto
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class SSLCertificate:
|
||||
# === Inherit from dict ===
|
||||
class SSLCertificate(dict):
|
||||
"""
|
||||
A class representing an SSL certificate with methods to export in various formats.
|
||||
A class representing an SSL certificate, behaving like a dictionary
|
||||
for direct JSON serialization. It stores the certificate information internally
|
||||
and provides methods for export and property access.
|
||||
|
||||
Attributes:
|
||||
cert_info (Dict[str, Any]): The certificate information.
|
||||
|
||||
Methods:
|
||||
from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']: Create SSLCertificate instance from a URL.
|
||||
from_file(file_path: str) -> Optional['SSLCertificate']: Create SSLCertificate instance from a file.
|
||||
from_binary(binary_data: bytes) -> Optional['SSLCertificate']: Create SSLCertificate instance from binary data.
|
||||
export_as_pem() -> str: Export the certificate as PEM format.
|
||||
export_as_der() -> bytes: Export the certificate as DER format.
|
||||
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
|
||||
export_as_text() -> str: Export the certificate as text format.
|
||||
Inherits from dict, so instances are directly JSON serializable.
|
||||
"""
|
||||
|
||||
# Use __slots__ for potential memory optimization if desired, though less common when inheriting dict
|
||||
# __slots__ = ("_cert_info",) # If using slots, be careful with dict inheritance interaction
|
||||
|
||||
def __init__(self, cert_info: Dict[str, Any]):
|
||||
self._cert_info = self._decode_cert_data(cert_info)
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL.
|
||||
Initializes the SSLCertificate object.
|
||||
|
||||
Args:
|
||||
url (str): URL of the website.
|
||||
timeout (int): Timeout for the connection (default: 10).
|
||||
|
||||
Returns:
|
||||
Optional[SSLCertificate]: SSLCertificate instance if successful, None otherwise.
|
||||
cert_info (Dict[str, Any]): The raw certificate dictionary.
|
||||
"""
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
# 1. Decode the data (handle bytes -> str)
|
||||
decoded_info = self._decode_cert_data(cert_info)
|
||||
|
||||
context = ssl.create_default_context()
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
# 2. Store the decoded info internally (optional but good practice)
|
||||
# self._cert_info = decoded_info # You can keep this if methods rely on it
|
||||
|
||||
cert_info = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(),
|
||||
"not_after": x509.get_notAfter(),
|
||||
"fingerprint": x509.digest("sha256").hex(),
|
||||
"signature_algorithm": x509.get_signature_algorithm(),
|
||||
"raw_cert": base64.b64encode(cert_binary),
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info["extensions"] = extensions
|
||||
|
||||
return SSLCertificate(cert_info)
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
# 3. Initialize the dictionary part of the object with the decoded data
|
||||
super().__init__(decoded_info)
|
||||
|
||||
@staticmethod
|
||||
def _decode_cert_data(data: Any) -> Any:
|
||||
"""Helper method to decode bytes in certificate data."""
|
||||
if isinstance(data, bytes):
|
||||
return data.decode("utf-8")
|
||||
try:
|
||||
# Try UTF-8 first, fallback to latin-1 for arbitrary bytes
|
||||
return data.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
return data.decode("latin-1") # Or handle as needed, maybe hex representation
|
||||
elif isinstance(data, dict):
|
||||
return {
|
||||
(
|
||||
@@ -97,36 +58,119 @@ class SSLCertificate:
|
||||
return [SSLCertificate._decode_cert_data(item) for item in data]
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL. Fetches cert info and initializes.
|
||||
(Fetching logic remains the same)
|
||||
"""
|
||||
cert_info_raw = None # Variable to hold the fetched dict
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
|
||||
context = ssl.create_default_context()
|
||||
# Set check_hostname to False and verify_mode to CERT_NONE temporarily
|
||||
# for potentially problematic certificates during fetch, but parse the result regardless.
|
||||
# context.check_hostname = False
|
||||
# context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
if not cert_binary:
|
||||
print(f"Warning: No certificate returned for {hostname}")
|
||||
return None
|
||||
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
|
||||
# Create the dictionary directly
|
||||
cert_info_raw = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(), # Keep as bytes initially, _decode handles it
|
||||
"not_after": x509.get_notAfter(), # Keep as bytes initially
|
||||
"fingerprint": x509.digest("sha256").hex(), # hex() is already string
|
||||
"signature_algorithm": x509.get_signature_algorithm(), # Keep as bytes
|
||||
"raw_cert": base64.b64encode(cert_binary), # Base64 is bytes, _decode handles it
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
# get_short_name() returns bytes, str(ext) handles value conversion
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info_raw["extensions"] = extensions
|
||||
|
||||
except ssl.SSLCertVerificationError as e:
|
||||
print(f"SSL Verification Error for {url}: {e}")
|
||||
# Decide if you want to proceed or return None based on your needs
|
||||
# You might try fetching without verification here if needed, but be cautious.
|
||||
return None
|
||||
except socket.gaierror:
|
||||
print(f"Could not resolve hostname: {hostname}")
|
||||
return None
|
||||
except socket.timeout:
|
||||
print(f"Connection timed out for {url}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error fetching/processing certificate for {url}: {e}")
|
||||
# Log the full error details if needed: logging.exception("Cert fetch error")
|
||||
return None
|
||||
|
||||
# If successful, create the SSLCertificate instance from the dictionary
|
||||
if cert_info_raw:
|
||||
return SSLCertificate(cert_info_raw)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
# --- Properties now access the dictionary items directly via self[] ---
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
return self.get("issuer", {}) # Use self.get for safety
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
return self.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
return self.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
return self.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
return self.get("fingerprint", "")
|
||||
|
||||
# --- Export methods can use `self` directly as it is the dict ---
|
||||
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as JSON.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the JSON file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: JSON string if successful, None otherwise.
|
||||
"""
|
||||
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
|
||||
"""Export certificate as JSON."""
|
||||
# `self` is already the dictionary we want to serialize
|
||||
json_str = json.dumps(self, indent=2, ensure_ascii=False)
|
||||
if filepath:
|
||||
Path(filepath).write_text(json_str, encoding="utf-8")
|
||||
return None
|
||||
return json_str
|
||||
|
||||
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as PEM.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the PEM file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: PEM string if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as PEM."""
|
||||
try:
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
raw_cert_bytes = base64.b64decode(self.get("raw_cert", ""))
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1,
|
||||
base64.b64decode(self._cert_info["raw_cert"]),
|
||||
OpenSSL.crypto.FILETYPE_ASN1, raw_cert_bytes
|
||||
)
|
||||
pem_data = OpenSSL.crypto.dump_certificate(
|
||||
OpenSSL.crypto.FILETYPE_PEM, x509
|
||||
@@ -136,49 +180,25 @@ class SSLCertificate:
|
||||
Path(filepath).write_text(pem_data, encoding="utf-8")
|
||||
return None
|
||||
return pem_data
|
||||
except Exception:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error converting to PEM: {e}")
|
||||
return None
|
||||
|
||||
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
||||
"""
|
||||
Export certificate as DER.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the DER file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[bytes]: DER bytes if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as DER."""
|
||||
try:
|
||||
der_data = base64.b64decode(self._cert_info["raw_cert"])
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
der_data = base64.b64decode(self.get("raw_cert", ""))
|
||||
if filepath:
|
||||
Path(filepath).write_bytes(der_data)
|
||||
return None
|
||||
return der_data
|
||||
except Exception:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error converting to DER: {e}")
|
||||
return None
|
||||
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
"""Get certificate issuer information."""
|
||||
return self._cert_info.get("issuer", {})
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
"""Get certificate subject information."""
|
||||
return self._cert_info.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
"""Get certificate validity start date."""
|
||||
return self._cert_info.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
"""Get certificate validity end date."""
|
||||
return self._cert_info.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
"""Get certificate fingerprint."""
|
||||
return self._cert_info.get("fingerprint", "")
|
||||
# Optional: Add __repr__ for better debugging
|
||||
def __repr__(self) -> str:
|
||||
subject_cn = self.subject.get('CN', 'N/A')
|
||||
issuer_cn = self.issuer.get('CN', 'N/A')
|
||||
return f"<SSLCertificate Subject='{subject_cn}' Issuer='{issuer_cn}'>"
|
||||
@@ -10,12 +10,16 @@ CacheMode = Union['CacheModeType']
|
||||
CrawlResult = Union['CrawlResultType']
|
||||
CrawlerHub = Union['CrawlerHubType']
|
||||
BrowserProfiler = Union['BrowserProfilerType']
|
||||
# NEW: Add AsyncUrlSeederType
|
||||
AsyncUrlSeeder = Union['AsyncUrlSeederType']
|
||||
|
||||
# Configuration types
|
||||
BrowserConfig = Union['BrowserConfigType']
|
||||
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
|
||||
LLMConfig = Union['LLMConfigType']
|
||||
# NEW: Add SeedingConfigType
|
||||
SeedingConfig = Union['SeedingConfigType']
|
||||
|
||||
# Content scraping types
|
||||
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
|
||||
@@ -94,6 +98,8 @@ if TYPE_CHECKING:
|
||||
from .models import CrawlResult as CrawlResultType
|
||||
from .hub import CrawlerHub as CrawlerHubType
|
||||
from .browser_profiler import BrowserProfiler as BrowserProfilerType
|
||||
# NEW: Import AsyncUrlSeeder for type checking
|
||||
from .async_url_seeder import AsyncUrlSeeder as AsyncUrlSeederType
|
||||
|
||||
# Configuration imports
|
||||
from .async_configs import (
|
||||
@@ -101,6 +107,8 @@ if TYPE_CHECKING:
|
||||
CrawlerRunConfig as CrawlerRunConfigType,
|
||||
HTTPCrawlerConfig as HTTPCrawlerConfigType,
|
||||
LLMConfig as LLMConfigType,
|
||||
# NEW: Import SeedingConfig for type checking
|
||||
SeedingConfig as SeedingConfigType,
|
||||
)
|
||||
|
||||
# Content scraping imports
|
||||
@@ -184,4 +192,4 @@ if TYPE_CHECKING:
|
||||
|
||||
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
|
||||
from .async_configs import LLMConfig
|
||||
return LLMConfig(*args, **kwargs)
|
||||
return LLMConfig(*args, **kwargs)
|
||||
@@ -6,6 +6,7 @@ import html
|
||||
import lxml
|
||||
import re
|
||||
import os
|
||||
import subprocess
|
||||
import platform
|
||||
from .prompts import PROMPT_EXTRACT_BLOCKS
|
||||
from array import array
|
||||
@@ -20,7 +21,6 @@ from urllib.parse import urljoin
|
||||
import requests
|
||||
from requests.exceptions import InvalidSchema
|
||||
import xxhash
|
||||
from colorama import Fore, Style, init
|
||||
import textwrap
|
||||
import cProfile
|
||||
import pstats
|
||||
@@ -32,7 +32,6 @@ import hashlib
|
||||
|
||||
from urllib.robotparser import RobotFileParser
|
||||
import aiohttp
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
from functools import lru_cache
|
||||
|
||||
from packaging import version
|
||||
@@ -43,6 +42,37 @@ from itertools import chain
|
||||
from collections import deque
|
||||
from typing import Generator, Iterable
|
||||
|
||||
import numpy as np
|
||||
|
||||
from urllib.parse import (
|
||||
urljoin, urlparse, urlunparse,
|
||||
parse_qsl, urlencode, quote, unquote
|
||||
)
|
||||
|
||||
|
||||
# Monkey patch to fix wildcard handling in urllib.robotparser
|
||||
from urllib.robotparser import RuleLine
|
||||
import re
|
||||
|
||||
original_applies_to = RuleLine.applies_to
|
||||
|
||||
def patched_applies_to(self, filename):
|
||||
# Handle wildcards in paths
|
||||
if '*' in self.path or '%2A' in self.path or self.path in ("*", "%2A"):
|
||||
pattern = self.path.replace('%2A', '*')
|
||||
pattern = re.escape(pattern).replace('\\*', '.*')
|
||||
pattern = '^' + pattern
|
||||
if pattern.endswith('\\$'):
|
||||
pattern = pattern[:-2] + '$'
|
||||
try:
|
||||
return bool(re.match(pattern, filename))
|
||||
except re.error:
|
||||
return original_applies_to(self, filename)
|
||||
return original_applies_to(self, filename)
|
||||
|
||||
RuleLine.applies_to = patched_applies_to
|
||||
# Monkey patch ends
|
||||
|
||||
def chunk_documents(
|
||||
documents: Iterable[str],
|
||||
chunk_token_threshold: int,
|
||||
@@ -136,13 +166,20 @@ def merge_chunks(
|
||||
word_token_ratio: float = 1.0,
|
||||
splitter: Callable = None
|
||||
) -> List[str]:
|
||||
"""Merges documents into chunks of specified token size.
|
||||
"""
|
||||
Merges a sequence of documents into chunks based on a target token count, with optional overlap.
|
||||
|
||||
Each document is split into tokens using the provided splitter function (defaults to str.split). Tokens are distributed into chunks aiming for the specified target size, with optional overlapping tokens between consecutive chunks. Returns a list of non-empty merged chunks as strings.
|
||||
|
||||
Args:
|
||||
docs: Input documents
|
||||
target_size: Desired token count per chunk
|
||||
overlap: Number of tokens to overlap between chunks
|
||||
word_token_ratio: Multiplier for word->token conversion
|
||||
docs: Sequence of input document strings to be merged.
|
||||
target_size: Target number of tokens per chunk.
|
||||
overlap: Number of tokens to overlap between consecutive chunks.
|
||||
word_token_ratio: Multiplier to estimate token count from word count.
|
||||
splitter: Callable used to split each document into tokens.
|
||||
|
||||
Returns:
|
||||
List of merged document chunks as strings, each not exceeding the target token size.
|
||||
"""
|
||||
# Pre-tokenize all docs and store token counts
|
||||
splitter = splitter or str.split
|
||||
@@ -151,7 +188,7 @@ def merge_chunks(
|
||||
total_tokens = 0
|
||||
|
||||
for doc in docs:
|
||||
tokens = doc.split()
|
||||
tokens = splitter(doc)
|
||||
count = int(len(tokens) * word_token_ratio)
|
||||
if count: # Skip empty docs
|
||||
token_counts.append(count)
|
||||
@@ -304,7 +341,7 @@ class RobotsParser:
|
||||
robots_url = f"{scheme}://{domain}/robots.txt"
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(robots_url, timeout=2) as response:
|
||||
async with session.get(robots_url, timeout=2, ssl=False) as response:
|
||||
if response.status == 200:
|
||||
rules = await response.text()
|
||||
self._cache_rules(domain, rules)
|
||||
@@ -441,14 +478,13 @@ def create_box_message(
|
||||
str: A formatted string containing the styled message box.
|
||||
"""
|
||||
|
||||
init()
|
||||
|
||||
# Define border and text colors for different types
|
||||
styles = {
|
||||
"warning": (Fore.YELLOW, Fore.LIGHTYELLOW_EX, "⚠"),
|
||||
"info": (Fore.BLUE, Fore.LIGHTBLUE_EX, "ℹ"),
|
||||
"success": (Fore.GREEN, Fore.LIGHTGREEN_EX, "✓"),
|
||||
"error": (Fore.RED, Fore.LIGHTRED_EX, "×"),
|
||||
"warning": ("yellow", "bright_yellow", "⚠"),
|
||||
"info": ("blue", "bright_blue", "ℹ"),
|
||||
"debug": ("lightblack", "bright_black", "⋯"),
|
||||
"success": ("green", "bright_green", "✓"),
|
||||
"error": ("red", "bright_red", "×"),
|
||||
}
|
||||
|
||||
border_color, text_color, prefix = styles.get(type.lower(), styles["info"])
|
||||
@@ -480,12 +516,12 @@ def create_box_message(
|
||||
# Create the box with colored borders and lighter text
|
||||
horizontal_line = h_line * (width - 1)
|
||||
box = [
|
||||
f"{border_color}{tl}{horizontal_line}{tr}",
|
||||
f"[{border_color}]{tl}{horizontal_line}{tr}[/{border_color}]",
|
||||
*[
|
||||
f"{border_color}{v_line}{text_color} {line:<{width-2}}{border_color}{v_line}"
|
||||
f"[{border_color}]{v_line}[{text_color}] {line:<{width-2}}[/{text_color}][{border_color}]{v_line}[/{border_color}]"
|
||||
for line in formatted_lines
|
||||
],
|
||||
f"{border_color}{bl}{horizontal_line}{br}{Style.RESET_ALL}",
|
||||
f"[{border_color}]{bl}{horizontal_line}{br}[/{border_color}]",
|
||||
]
|
||||
|
||||
result = "\n".join(box)
|
||||
@@ -1111,6 +1147,23 @@ def get_content_of_website_optimized(
|
||||
css_selector: str = None,
|
||||
**kwargs,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Extracts and cleans content from website HTML, optimizing for useful media and contextual information.
|
||||
|
||||
Parses the provided HTML to extract internal and external links, filters and scores images for usefulness, gathers contextual descriptions for media, removes unwanted or low-value elements, and converts the cleaned HTML to Markdown. Also extracts metadata and returns all structured content in a dictionary.
|
||||
|
||||
Args:
|
||||
url: The URL of the website being processed.
|
||||
html: The raw HTML content to extract from.
|
||||
word_count_threshold: Minimum word count for elements to be retained.
|
||||
css_selector: Optional CSS selector to restrict extraction to specific elements.
|
||||
|
||||
Returns:
|
||||
A dictionary containing Markdown content, cleaned HTML, extraction success status, media and link lists, and metadata.
|
||||
|
||||
Raises:
|
||||
InvalidCSSSelectorError: If a provided CSS selector does not match any elements.
|
||||
"""
|
||||
if not html:
|
||||
return None
|
||||
|
||||
@@ -1153,6 +1206,20 @@ def get_content_of_website_optimized(
|
||||
|
||||
def process_image(img, url, index, total_images):
|
||||
# Check if an image has valid display and inside undesired html elements
|
||||
"""
|
||||
Processes an HTML image element to determine its relevance and extract metadata.
|
||||
|
||||
Evaluates an image's visibility, context, and usefulness based on its attributes and parent elements. If the image passes validation and exceeds a usefulness score threshold, returns a dictionary with its source, alt text, contextual description, score, and type. Otherwise, returns None.
|
||||
|
||||
Args:
|
||||
img: The BeautifulSoup image tag to process.
|
||||
url: The base URL of the page containing the image.
|
||||
index: The index of the image in the list of images on the page.
|
||||
total_images: The total number of images on the page.
|
||||
|
||||
Returns:
|
||||
A dictionary with image metadata if the image is considered useful, or None otherwise.
|
||||
"""
|
||||
def is_valid_image(img, parent, parent_classes):
|
||||
style = img.get("style", "")
|
||||
src = img.get("src", "")
|
||||
@@ -1174,6 +1241,20 @@ def get_content_of_website_optimized(
|
||||
# Score an image for it's usefulness
|
||||
def score_image_for_usefulness(img, base_url, index, images_count):
|
||||
# Function to parse image height/width value and units
|
||||
"""
|
||||
Scores an HTML image element for usefulness based on size, format, attributes, and position.
|
||||
|
||||
The function evaluates an image's dimensions, file format, alt text, and its position among all images on the page to assign a usefulness score. Higher scores indicate images that are likely more relevant or informative for content extraction or summarization.
|
||||
|
||||
Args:
|
||||
img: The HTML image element to score.
|
||||
base_url: The base URL used to resolve relative image sources.
|
||||
index: The position of the image in the list of images on the page (zero-based).
|
||||
images_count: The total number of images on the page.
|
||||
|
||||
Returns:
|
||||
An integer usefulness score for the image.
|
||||
"""
|
||||
def parse_dimension(dimension):
|
||||
if dimension:
|
||||
match = re.match(r"(\d+)(\D*)", dimension)
|
||||
@@ -1188,6 +1269,16 @@ def get_content_of_website_optimized(
|
||||
# Fetch image file metadata to extract size and extension
|
||||
def fetch_image_file_size(img, base_url):
|
||||
# If src is relative path construct full URL, if not it may be CDN URL
|
||||
"""
|
||||
Fetches the file size of an image by sending a HEAD request to its URL.
|
||||
|
||||
Args:
|
||||
img: A BeautifulSoup tag representing the image element.
|
||||
base_url: The base URL to resolve relative image sources.
|
||||
|
||||
Returns:
|
||||
The value of the "Content-Length" header as a string if available, otherwise None.
|
||||
"""
|
||||
img_url = urljoin(base_url, img.get("src"))
|
||||
try:
|
||||
response = requests.head(img_url)
|
||||
@@ -1198,8 +1289,6 @@ def get_content_of_website_optimized(
|
||||
return None
|
||||
except InvalidSchema:
|
||||
return None
|
||||
finally:
|
||||
return
|
||||
|
||||
image_height = img.get("height")
|
||||
height_value, height_unit = parse_dimension(image_height)
|
||||
@@ -1458,6 +1547,14 @@ def extract_metadata_using_lxml(html, doc=None):
|
||||
content = tag.get("content", "").strip()
|
||||
if property_name and content:
|
||||
metadata[property_name] = content
|
||||
|
||||
# Article metadata
|
||||
article_tags = head.xpath('.//meta[starts-with(@property, "article:")]')
|
||||
for tag in article_tags:
|
||||
property_name = tag.get("property", "").strip()
|
||||
content = tag.get("content", "").strip()
|
||||
if property_name and content:
|
||||
metadata[property_name] = content
|
||||
|
||||
return metadata
|
||||
|
||||
@@ -1533,7 +1630,15 @@ def extract_metadata(html, soup=None):
|
||||
content = tag.get("content", "").strip()
|
||||
if property_name and content:
|
||||
metadata[property_name] = content
|
||||
|
||||
|
||||
# Article metadata
|
||||
article_tags = head.find_all("meta", attrs={"property": re.compile(r"^article:")})
|
||||
for tag in article_tags:
|
||||
property_name = tag.get("property", "").strip()
|
||||
content = tag.get("content", "").strip()
|
||||
if property_name and content:
|
||||
metadata[property_name] = content
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
@@ -2002,9 +2107,101 @@ def normalize_url(href, base_url):
|
||||
parsed_base = urlparse(base_url)
|
||||
if not parsed_base.scheme or not parsed_base.netloc:
|
||||
raise ValueError(f"Invalid base URL format: {base_url}")
|
||||
|
||||
if parsed_base.scheme.lower() not in ["http", "https"]:
|
||||
# Handle special protocols
|
||||
raise ValueError(f"Invalid base URL format: {base_url}")
|
||||
cleaned_href = href.strip()
|
||||
|
||||
# Use urljoin to handle all cases
|
||||
normalized = urljoin(base_url, href.strip())
|
||||
return urljoin(base_url, cleaned_href)
|
||||
|
||||
|
||||
|
||||
|
||||
def normalize_url(
|
||||
href: str,
|
||||
base_url: str,
|
||||
*,
|
||||
drop_query_tracking=True,
|
||||
sort_query=True,
|
||||
keep_fragment=False,
|
||||
extra_drop_params=None
|
||||
):
|
||||
"""
|
||||
Extended URL normalizer
|
||||
|
||||
Parameters
|
||||
----------
|
||||
href : str
|
||||
The raw link extracted from a page.
|
||||
base_url : str
|
||||
The page’s canonical URL (used to resolve relative links).
|
||||
drop_query_tracking : bool (default True)
|
||||
Remove common tracking query parameters.
|
||||
sort_query : bool (default True)
|
||||
Alphabetically sort query keys for deterministic output.
|
||||
keep_fragment : bool (default False)
|
||||
Preserve the hash fragment (#section) if you need in-page links.
|
||||
extra_drop_params : Iterable[str] | None
|
||||
Additional query keys to strip (case-insensitive).
|
||||
|
||||
Returns
|
||||
-------
|
||||
str | None
|
||||
A clean, canonical URL or None if href is empty/None.
|
||||
"""
|
||||
if not href:
|
||||
return None
|
||||
|
||||
# Resolve relative paths first
|
||||
full_url = urljoin(base_url, href.strip())
|
||||
|
||||
# Parse once, edit parts, then rebuild
|
||||
parsed = urlparse(full_url)
|
||||
|
||||
# ── netloc ──
|
||||
netloc = parsed.netloc.lower()
|
||||
|
||||
# ── path ──
|
||||
# Strip duplicate slashes and trailing “/” (except root)
|
||||
path = quote(unquote(parsed.path))
|
||||
if path.endswith('/') and path != '/':
|
||||
path = path.rstrip('/')
|
||||
|
||||
# ── query ──
|
||||
query = parsed.query
|
||||
if query:
|
||||
# explode, mutate, then rebuild
|
||||
params = [(k.lower(), v) for k, v in parse_qsl(query, keep_blank_values=True)]
|
||||
|
||||
if drop_query_tracking:
|
||||
default_tracking = {
|
||||
'utm_source', 'utm_medium', 'utm_campaign', 'utm_term',
|
||||
'utm_content', 'gclid', 'fbclid', 'ref', 'ref_src'
|
||||
}
|
||||
if extra_drop_params:
|
||||
default_tracking |= {p.lower() for p in extra_drop_params}
|
||||
params = [(k, v) for k, v in params if k not in default_tracking]
|
||||
|
||||
if sort_query:
|
||||
params.sort(key=lambda kv: kv[0])
|
||||
|
||||
query = urlencode(params, doseq=True) if params else ''
|
||||
|
||||
# ── fragment ──
|
||||
fragment = parsed.fragment if keep_fragment else ''
|
||||
|
||||
# Re-assemble
|
||||
normalized = urlunparse((
|
||||
parsed.scheme,
|
||||
netloc,
|
||||
path,
|
||||
parsed.params,
|
||||
query,
|
||||
fragment
|
||||
))
|
||||
|
||||
return normalized
|
||||
|
||||
|
||||
@@ -2047,7 +2244,7 @@ def normalize_url_for_deep_crawl(href, base_url):
|
||||
normalized = urlunparse((
|
||||
parsed.scheme,
|
||||
netloc,
|
||||
parsed.path.rstrip('/') or '/', # Normalize trailing slash
|
||||
parsed.path.rstrip('/'), # Normalize trailing slash
|
||||
parsed.params,
|
||||
query,
|
||||
fragment
|
||||
@@ -2075,7 +2272,7 @@ def efficient_normalize_url_for_deep_crawl(href, base_url):
|
||||
normalized = urlunparse((
|
||||
parsed.scheme,
|
||||
parsed.netloc.lower(),
|
||||
parsed.path,
|
||||
parsed.path.rstrip('/'),
|
||||
parsed.params,
|
||||
parsed.query,
|
||||
'' # Remove fragment
|
||||
@@ -2733,33 +2930,67 @@ def preprocess_html_for_schema(html_content, text_threshold=100, attr_value_thre
|
||||
# Also truncate tail text if present
|
||||
if element.tail and len(element.tail.strip()) > text_threshold:
|
||||
element.tail = element.tail.strip()[:text_threshold] + '...'
|
||||
|
||||
# 4. Find repeated patterns and keep only a few examples
|
||||
# This is a simplistic approach - more sophisticated pattern detection could be implemented
|
||||
pattern_elements = {}
|
||||
for element in tree.xpath('//*[contains(@class, "")]'):
|
||||
parent = element.getparent()
|
||||
|
||||
# 4. Detect duplicates and drop them in a single pass
|
||||
seen: dict[tuple, None] = {}
|
||||
for el in list(tree.xpath('//*[@class]')): # snapshot once, XPath is fast
|
||||
parent = el.getparent()
|
||||
if parent is None:
|
||||
continue
|
||||
|
||||
# Create a signature based on tag and classes
|
||||
classes = element.get('class', '')
|
||||
if not classes:
|
||||
|
||||
cls = el.get('class')
|
||||
if not cls:
|
||||
continue
|
||||
signature = f"{element.tag}.{classes}"
|
||||
|
||||
if signature in pattern_elements:
|
||||
pattern_elements[signature].append(element)
|
||||
|
||||
# ── build signature ───────────────────────────────────────────
|
||||
h = xxhash.xxh64() # stream, no big join()
|
||||
for txt in el.itertext():
|
||||
h.update(txt)
|
||||
sig = (el.tag, cls, h.intdigest()) # tuple cheaper & hashable
|
||||
|
||||
# ── first seen? keep – else drop ─────────────
|
||||
if sig in seen and parent is not None:
|
||||
parent.remove(el) # duplicate
|
||||
else:
|
||||
pattern_elements[signature] = [element]
|
||||
seen[sig] = None
|
||||
|
||||
# Keep only 3 examples of each repeating pattern
|
||||
for signature, elements in pattern_elements.items():
|
||||
if len(elements) > 3:
|
||||
# Keep the first 2 and last elements
|
||||
for element in elements[2:-1]:
|
||||
if element.getparent() is not None:
|
||||
element.getparent().remove(element)
|
||||
# # 4. Find repeated patterns and keep only a few examples
|
||||
# # This is a simplistic approach - more sophisticated pattern detection could be implemented
|
||||
# pattern_elements = {}
|
||||
# for element in tree.xpath('//*[contains(@class, "")]'):
|
||||
# parent = element.getparent()
|
||||
# if parent is None:
|
||||
# continue
|
||||
|
||||
# # Create a signature based on tag and classes
|
||||
# classes = element.get('class', '')
|
||||
# if not classes:
|
||||
# continue
|
||||
# innert_text = ''.join(element.xpath('.//text()'))
|
||||
# innert_text_hash = xxhash.xxh64(innert_text.encode()).hexdigest()
|
||||
# signature = f"{element.tag}.{classes}.{innert_text_hash}"
|
||||
|
||||
# if signature in pattern_elements:
|
||||
# pattern_elements[signature].append(element)
|
||||
# else:
|
||||
# pattern_elements[signature] = [element]
|
||||
|
||||
# # Keep only first examples of each repeating pattern
|
||||
# for signature, elements in pattern_elements.items():
|
||||
# if len(elements) > 1:
|
||||
# # Keep the first element and remove the rest
|
||||
# for element in elements[1:]:
|
||||
# if element.getparent() is not None:
|
||||
# element.getparent().remove(element)
|
||||
|
||||
|
||||
# # Keep only 3 examples of each repeating pattern
|
||||
# for signature, elements in pattern_elements.items():
|
||||
# if len(elements) > 3:
|
||||
# # Keep the first 2 and last elements
|
||||
# for element in elements[2:-1]:
|
||||
# if element.getparent() is not None:
|
||||
# element.getparent().remove(element)
|
||||
|
||||
# 5. Convert back to string
|
||||
result = etree.tostring(tree, encoding='unicode', method='html')
|
||||
@@ -2772,6 +3003,393 @@ def preprocess_html_for_schema(html_content, text_threshold=100, attr_value_thre
|
||||
|
||||
except Exception as e:
|
||||
# Fallback for parsing errors
|
||||
return html_content[:max_size] if len(html_content) > max_size else html_content
|
||||
|
||||
return html_content[:max_size] if len(html_content) > max_size else html_content
|
||||
|
||||
def start_colab_display_server():
|
||||
"""
|
||||
Start virtual display server in Google Colab.
|
||||
Raises error if not running in Colab environment.
|
||||
"""
|
||||
# Check if running in Google Colab
|
||||
try:
|
||||
import google.colab
|
||||
from google.colab import output
|
||||
from IPython.display import IFrame, display
|
||||
except ImportError:
|
||||
raise RuntimeError("This function must be run in Google Colab environment.")
|
||||
|
||||
import os, time, subprocess
|
||||
|
||||
os.environ["DISPLAY"] = ":99"
|
||||
|
||||
# Xvfb
|
||||
xvfb = subprocess.Popen(["Xvfb", ":99", "-screen", "0", "1280x720x24"])
|
||||
time.sleep(2)
|
||||
|
||||
# minimal window manager
|
||||
fluxbox = subprocess.Popen(["fluxbox"])
|
||||
|
||||
# VNC → X
|
||||
x11vnc = subprocess.Popen(["x11vnc",
|
||||
"-display", ":99",
|
||||
"-nopw", "-forever", "-shared",
|
||||
"-rfbport", "5900", "-quiet"])
|
||||
|
||||
# websockify → VNC
|
||||
novnc = subprocess.Popen(["/opt/novnc/utils/websockify/run",
|
||||
"6080", "localhost:5900",
|
||||
"--web", "/opt/novnc"])
|
||||
|
||||
time.sleep(2) # give ports a moment
|
||||
|
||||
# Colab proxy url
|
||||
url = output.eval_js("google.colab.kernel.proxyPort(6080)")
|
||||
display(IFrame(f"{url}/vnc.html?autoconnect=true&resize=scale", width=1024, height=768))
|
||||
|
||||
|
||||
|
||||
def setup_colab_environment():
|
||||
"""
|
||||
Alternative setup using IPython magic commands
|
||||
"""
|
||||
from IPython import get_ipython
|
||||
ipython = get_ipython()
|
||||
|
||||
print("🚀 Setting up Crawl4AI environment in Google Colab...")
|
||||
|
||||
# Run the bash commands
|
||||
ipython.run_cell_magic('bash', '', '''
|
||||
set -e
|
||||
|
||||
echo "📦 Installing system dependencies..."
|
||||
apt-get update -y
|
||||
apt-get install -y xvfb x11vnc fluxbox websockify git
|
||||
|
||||
echo "📥 Setting up virtual display..."
|
||||
git clone https://github.com/novnc/noVNC /opt/novnc
|
||||
git clone https://github.com/novnc/websockify /opt/novnc/utils/websockify
|
||||
|
||||
pip install -q nest_asyncio google-colab
|
||||
echo "✅ Setup complete!"
|
||||
''')
|
||||
|
||||
|
||||
# Link Quality Scoring Functions
|
||||
def extract_page_context(page_title: str, headlines_text: str, meta_description: str, base_url: str) -> dict:
|
||||
"""
|
||||
Extract page context for link scoring - called ONCE per page for performance.
|
||||
Parser-agnostic function that takes pre-extracted data.
|
||||
|
||||
Args:
|
||||
page_title: Title of the page
|
||||
headlines_text: Combined text from h1, h2, h3 elements
|
||||
meta_description: Meta description content
|
||||
base_url: Base URL of the page
|
||||
|
||||
Returns:
|
||||
Dictionary containing page context data for fast link scoring
|
||||
"""
|
||||
context = {
|
||||
'terms': set(),
|
||||
'headlines': headlines_text or '',
|
||||
'meta_description': meta_description or '',
|
||||
'domain': '',
|
||||
'is_docs_site': False
|
||||
}
|
||||
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(base_url)
|
||||
context['domain'] = parsed.netloc.lower()
|
||||
|
||||
# Check if this is a documentation/reference site
|
||||
context['is_docs_site'] = any(indicator in context['domain']
|
||||
for indicator in ['docs.', 'api.', 'developer.', 'reference.'])
|
||||
|
||||
# Create term set for fast intersection (performance optimization)
|
||||
all_text = ((page_title or '') + ' ' + context['headlines'] + ' ' + context['meta_description']).lower()
|
||||
# Simple tokenization - fast and sufficient for scoring
|
||||
context['terms'] = set(word.strip('.,!?;:"()[]{}')
|
||||
for word in all_text.split()
|
||||
if len(word.strip('.,!?;:"()[]{}')) > 2)
|
||||
|
||||
except Exception:
|
||||
# Fail gracefully - return empty context
|
||||
pass
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def calculate_link_intrinsic_score(
|
||||
link_text: str,
|
||||
url: str,
|
||||
title_attr: str,
|
||||
class_attr: str,
|
||||
rel_attr: str,
|
||||
page_context: dict
|
||||
) -> float:
|
||||
"""
|
||||
Ultra-fast link quality scoring using only provided data (no DOM access needed).
|
||||
Parser-agnostic function.
|
||||
|
||||
Args:
|
||||
link_text: Text content of the link
|
||||
url: Link URL
|
||||
title_attr: Title attribute of the link
|
||||
class_attr: Class attribute of the link
|
||||
rel_attr: Rel attribute of the link
|
||||
page_context: Pre-computed page context from extract_page_context()
|
||||
|
||||
Returns:
|
||||
Quality score (0.0 - 10.0), higher is better
|
||||
"""
|
||||
score = 0.0
|
||||
|
||||
try:
|
||||
# 1. ATTRIBUTE QUALITY (string analysis - very fast)
|
||||
if title_attr and len(title_attr.strip()) > 3:
|
||||
score += 1.0
|
||||
|
||||
class_str = (class_attr or '').lower()
|
||||
# Navigation/important classes boost score
|
||||
if any(nav_class in class_str for nav_class in ['nav', 'menu', 'primary', 'main', 'important']):
|
||||
score += 1.5
|
||||
# Marketing/ad classes reduce score
|
||||
if any(bad_class in class_str for bad_class in ['ad', 'sponsor', 'track', 'promo', 'banner']):
|
||||
score -= 1.0
|
||||
|
||||
rel_str = (rel_attr or '').lower()
|
||||
# Semantic rel values
|
||||
if any(good_rel in rel_str for good_rel in ['canonical', 'next', 'prev', 'chapter']):
|
||||
score += 1.0
|
||||
if any(bad_rel in rel_str for bad_rel in ['nofollow', 'sponsored', 'ugc']):
|
||||
score -= 0.5
|
||||
|
||||
# 2. URL STRUCTURE QUALITY (string operations - very fast)
|
||||
url_lower = url.lower()
|
||||
|
||||
# High-value path patterns
|
||||
if any(good_path in url_lower for good_path in ['/docs/', '/api/', '/guide/', '/tutorial/', '/reference/', '/manual/']):
|
||||
score += 2.0
|
||||
elif any(medium_path in url_lower for medium_path in ['/blog/', '/article/', '/post/', '/news/']):
|
||||
score += 1.0
|
||||
|
||||
# Penalize certain patterns
|
||||
if any(bad_path in url_lower for bad_path in ['/admin/', '/login/', '/cart/', '/checkout/', '/track/', '/click/']):
|
||||
score -= 1.5
|
||||
|
||||
# URL depth (shallow URLs often more important)
|
||||
url_depth = url.count('/') - 2 # Subtract protocol and domain
|
||||
if url_depth <= 2:
|
||||
score += 1.0
|
||||
elif url_depth > 5:
|
||||
score -= 0.5
|
||||
|
||||
# HTTPS bonus
|
||||
if url.startswith('https://'):
|
||||
score += 0.5
|
||||
|
||||
# 3. TEXT QUALITY (string analysis - very fast)
|
||||
if link_text:
|
||||
text_clean = link_text.strip()
|
||||
if len(text_clean) > 3:
|
||||
score += 1.0
|
||||
|
||||
# Multi-word links are usually more descriptive
|
||||
word_count = len(text_clean.split())
|
||||
if word_count >= 2:
|
||||
score += 0.5
|
||||
if word_count >= 4:
|
||||
score += 0.5
|
||||
|
||||
# Avoid generic link text
|
||||
generic_texts = ['click here', 'read more', 'more info', 'link', 'here']
|
||||
if text_clean.lower() in generic_texts:
|
||||
score -= 1.0
|
||||
|
||||
# 4. CONTEXTUAL RELEVANCE (pre-computed page terms - very fast)
|
||||
if page_context.get('terms') and link_text:
|
||||
link_words = set(word.strip('.,!?;:"()[]{}').lower()
|
||||
for word in link_text.split()
|
||||
if len(word.strip('.,!?;:"()[]{}')) > 2)
|
||||
|
||||
if link_words:
|
||||
# Calculate word overlap ratio
|
||||
overlap = len(link_words & page_context['terms'])
|
||||
if overlap > 0:
|
||||
relevance_ratio = overlap / min(len(link_words), 10) # Cap to avoid over-weighting
|
||||
score += relevance_ratio * 2.0 # Up to 2 points for relevance
|
||||
|
||||
# 5. DOMAIN CONTEXT BONUSES (very fast string checks)
|
||||
if page_context.get('is_docs_site', False):
|
||||
# Documentation sites: prioritize internal navigation
|
||||
if link_text and any(doc_keyword in link_text.lower()
|
||||
for doc_keyword in ['api', 'reference', 'guide', 'tutorial', 'example']):
|
||||
score += 1.0
|
||||
|
||||
except Exception:
|
||||
# Fail gracefully - return minimal score
|
||||
score = 0.5
|
||||
|
||||
# Ensure score is within reasonable bounds
|
||||
return max(0.0, min(score, 10.0))
|
||||
|
||||
|
||||
def calculate_total_score(
|
||||
intrinsic_score: Optional[float] = None,
|
||||
contextual_score: Optional[float] = None,
|
||||
score_links_enabled: bool = False,
|
||||
query_provided: bool = False
|
||||
) -> float:
|
||||
"""
|
||||
Calculate combined total score from intrinsic and contextual scores with smart fallbacks.
|
||||
|
||||
Args:
|
||||
intrinsic_score: Quality score based on URL structure, text, and context (0-10)
|
||||
contextual_score: BM25 relevance score based on query and head content (0-1 typically)
|
||||
score_links_enabled: Whether link scoring is enabled
|
||||
query_provided: Whether a query was provided for contextual scoring
|
||||
|
||||
Returns:
|
||||
Combined total score (0-10 scale)
|
||||
|
||||
Scoring Logic:
|
||||
- No scoring: return 5.0 (neutral score)
|
||||
- Only intrinsic: return normalized intrinsic score
|
||||
- Only contextual: return contextual score scaled to 10
|
||||
- Both: weighted combination (70% intrinsic, 30% contextual scaled)
|
||||
"""
|
||||
# Case 1: No scoring enabled at all
|
||||
if not score_links_enabled:
|
||||
return 5.0 # Neutral score - all links treated equally
|
||||
|
||||
# Normalize scores to handle None values
|
||||
intrinsic = intrinsic_score if intrinsic_score is not None else 0.0
|
||||
contextual = contextual_score if contextual_score is not None else 0.0
|
||||
|
||||
# Case 2: Only intrinsic scoring (no query provided or no head extraction)
|
||||
if not query_provided or contextual_score is None:
|
||||
# Use intrinsic score directly (already 0-10 scale)
|
||||
return max(0.0, min(intrinsic, 10.0))
|
||||
|
||||
# Case 3: Both intrinsic and contextual scores available
|
||||
# Scale contextual score (typically 0-1) to 0-10 range
|
||||
contextual_scaled = min(contextual * 10.0, 10.0)
|
||||
|
||||
# Weighted combination: 70% intrinsic (structure/content quality) + 30% contextual (query relevance)
|
||||
# This gives more weight to link quality while still considering relevance
|
||||
total = (intrinsic * 0.7) + (contextual_scaled * 0.3)
|
||||
|
||||
return max(0.0, min(total, 10.0))
|
||||
|
||||
|
||||
# Embedding utilities
|
||||
async def get_text_embeddings(
|
||||
texts: List[str],
|
||||
llm_config: Optional[Dict] = None,
|
||||
model_name: str = "sentence-transformers/all-MiniLM-L6-v2",
|
||||
batch_size: int = 32
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Compute embeddings for a list of texts using specified model.
|
||||
|
||||
Args:
|
||||
texts: List of texts to embed
|
||||
llm_config: Optional LLM configuration for API-based embeddings
|
||||
model_name: Model name (used when llm_config is None)
|
||||
batch_size: Batch size for processing
|
||||
|
||||
Returns:
|
||||
numpy array of embeddings
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
if not texts:
|
||||
return np.array([])
|
||||
|
||||
# If LLMConfig provided, use litellm for embeddings
|
||||
if llm_config is not None:
|
||||
from litellm import aembedding
|
||||
|
||||
# Get embedding model from config or use default
|
||||
embedding_model = llm_config.get('provider', 'text-embedding-3-small')
|
||||
api_base = llm_config.get('base_url', llm_config.get('api_base'))
|
||||
|
||||
# Prepare kwargs
|
||||
kwargs = {
|
||||
'model': embedding_model,
|
||||
'input': texts,
|
||||
'api_key': llm_config.get('api_token', llm_config.get('api_key'))
|
||||
}
|
||||
|
||||
if api_base:
|
||||
kwargs['api_base'] = api_base
|
||||
|
||||
# Handle OpenAI-compatible endpoints
|
||||
if api_base and 'openai/' not in embedding_model:
|
||||
kwargs['model'] = f"openai/{embedding_model}"
|
||||
|
||||
# Get embeddings
|
||||
response = await aembedding(**kwargs)
|
||||
|
||||
# Extract embeddings from response
|
||||
embeddings = []
|
||||
for item in response.data:
|
||||
embeddings.append(item['embedding'])
|
||||
|
||||
return np.array(embeddings)
|
||||
|
||||
# Default: use sentence-transformers
|
||||
else:
|
||||
# Lazy load to avoid importing heavy libraries unless needed
|
||||
try:
|
||||
from sentence_transformers import SentenceTransformer
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"sentence-transformers is required for local embeddings. "
|
||||
"Install it with: pip install 'crawl4ai[transformer]' or pip install sentence-transformers"
|
||||
)
|
||||
|
||||
# Cache the model in function attribute to avoid reloading
|
||||
if not hasattr(get_text_embeddings, '_models'):
|
||||
get_text_embeddings._models = {}
|
||||
|
||||
if model_name not in get_text_embeddings._models:
|
||||
get_text_embeddings._models[model_name] = SentenceTransformer(model_name)
|
||||
|
||||
encoder = get_text_embeddings._models[model_name]
|
||||
|
||||
# Batch encode for efficiency
|
||||
embeddings = encoder.encode(
|
||||
texts,
|
||||
batch_size=batch_size,
|
||||
show_progress_bar=False,
|
||||
convert_to_numpy=True
|
||||
)
|
||||
|
||||
return embeddings
|
||||
|
||||
|
||||
def get_text_embeddings_sync(
|
||||
texts: List[str],
|
||||
llm_config: Optional[Dict] = None,
|
||||
model_name: str = "sentence-transformers/all-MiniLM-L6-v2",
|
||||
batch_size: int = 32
|
||||
) -> np.ndarray:
|
||||
"""Synchronous wrapper for get_text_embeddings"""
|
||||
import numpy as np
|
||||
return asyncio.run(get_text_embeddings(texts, llm_config, model_name, batch_size))
|
||||
|
||||
|
||||
def cosine_similarity(vec1: np.ndarray, vec2: np.ndarray) -> float:
|
||||
"""Calculate cosine similarity between two vectors"""
|
||||
import numpy as np
|
||||
dot_product = np.dot(vec1, vec2)
|
||||
norm_product = np.linalg.norm(vec1) * np.linalg.norm(vec2)
|
||||
return float(dot_product / norm_product) if norm_product != 0 else 0.0
|
||||
|
||||
|
||||
def cosine_distance(vec1: np.ndarray, vec2: np.ndarray) -> float:
|
||||
"""Calculate cosine distance (1 - similarity) between two vectors"""
|
||||
return 1 - cosine_similarity(vec1, vec2)
|
||||
|
||||
|
||||
@@ -1,644 +0,0 @@
|
||||
# Crawl4AI Docker Guide 🐳
|
||||
|
||||
## Table of Contents
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Option 1: Using Docker Compose (Recommended)](#option-1-using-docker-compose-recommended)
|
||||
- [Option 2: Manual Local Build & Run](#option-2-manual-local-build--run)
|
||||
- [Option 3: Using Pre-built Docker Hub Images](#option-3-using-pre-built-docker-hub-images)
|
||||
- [Dockerfile Parameters](#dockerfile-parameters)
|
||||
- [Using the API](#using-the-api)
|
||||
- [Understanding Request Schema](#understanding-request-schema)
|
||||
- [REST API Examples](#rest-api-examples)
|
||||
- [Python SDK](#python-sdk)
|
||||
- [Metrics & Monitoring](#metrics--monitoring)
|
||||
- [Deployment Scenarios](#deployment-scenarios)
|
||||
- [Complete Examples](#complete-examples)
|
||||
- [Server Configuration](#server-configuration)
|
||||
- [Understanding config.yml](#understanding-configyml)
|
||||
- [JWT Authentication](#jwt-authentication)
|
||||
- [Configuration Tips and Best Practices](#configuration-tips-and-best-practices)
|
||||
- [Customizing Your Configuration](#customizing-your-configuration)
|
||||
- [Configuration Recommendations](#configuration-recommendations)
|
||||
- [Getting Help](#getting-help)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before we dive in, make sure you have:
|
||||
- Docker installed and running (version 20.10.0 or higher), including `docker compose` (usually bundled with Docker Desktop).
|
||||
- `git` for cloning the repository.
|
||||
- At least 4GB of RAM available for the container (more recommended for heavy use).
|
||||
- Python 3.10+ (if using the Python SDK).
|
||||
- Node.js 16+ (if using the Node.js examples).
|
||||
|
||||
> 💡 **Pro tip**: Run `docker info` to check your Docker installation and available resources.
|
||||
|
||||
## Installation
|
||||
|
||||
We offer several ways to get the Crawl4AI server running. Docker Compose is the easiest way to manage local builds and runs.
|
||||
|
||||
### Option 1: Using Docker Compose (Recommended)
|
||||
|
||||
Docker Compose simplifies building and running the service, especially for local development and testing across different platforms.
|
||||
|
||||
#### 1. Clone Repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/unclecode/crawl4ai.git
|
||||
cd crawl4ai
|
||||
```
|
||||
|
||||
#### 2. Environment Setup (API Keys)
|
||||
|
||||
If you plan to use LLMs, copy the example environment file and add your API keys. This file should be in the **project root directory**.
|
||||
|
||||
```bash
|
||||
# Make sure you are in the 'crawl4ai' root directory
|
||||
cp deploy/docker/.llm.env.example .llm.env
|
||||
|
||||
# Now edit .llm.env and add your API keys
|
||||
# Example content:
|
||||
# OPENAI_API_KEY=sk-your-key
|
||||
# ANTHROPIC_API_KEY=your-anthropic-key
|
||||
# ...
|
||||
```
|
||||
> 🔑 **Note**: Keep your API keys secure! Never commit `.llm.env` to version control.
|
||||
|
||||
#### 3. Build and Run with Compose
|
||||
|
||||
The `docker-compose.yml` file in the project root defines services for different scenarios using **profiles**.
|
||||
|
||||
* **Build and Run Locally (AMD64):**
|
||||
```bash
|
||||
# Builds the image locally using Dockerfile and runs it
|
||||
docker compose --profile local-amd64 up --build -d
|
||||
```
|
||||
|
||||
* **Build and Run Locally (ARM64):**
|
||||
```bash
|
||||
# Builds the image locally using Dockerfile and runs it
|
||||
docker compose --profile local-arm64 up --build -d
|
||||
```
|
||||
|
||||
* **Run Pre-built Image from Docker Hub (AMD64):**
|
||||
```bash
|
||||
# Pulls and runs the specified AMD64 image from Docker Hub
|
||||
# (Set VERSION env var for specific tags, e.g., VERSION=0.5.1-d1)
|
||||
docker compose --profile hub-amd64 up -d
|
||||
```
|
||||
|
||||
* **Run Pre-built Image from Docker Hub (ARM64):**
|
||||
```bash
|
||||
# Pulls and runs the specified ARM64 image from Docker Hub
|
||||
docker compose --profile hub-arm64 up -d
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`.
|
||||
|
||||
#### 4. Stopping Compose Services
|
||||
|
||||
```bash
|
||||
# Stop the service(s) associated with a profile (e.g., local-amd64)
|
||||
docker compose --profile local-amd64 down
|
||||
```
|
||||
|
||||
### Option 2: Manual Local Build & Run
|
||||
|
||||
If you prefer not to use Docker Compose for local builds.
|
||||
|
||||
#### 1. Clone Repository & Setup Environment
|
||||
|
||||
Follow steps 1 and 2 from the Docker Compose section above (clone repo, `cd crawl4ai`, create `.llm.env` in the root).
|
||||
|
||||
#### 2. Build the Image (Multi-Arch)
|
||||
|
||||
Use `docker buildx` to build the image. This example builds for multiple platforms and loads the image matching your host architecture into the local Docker daemon.
|
||||
|
||||
```bash
|
||||
# Make sure you are in the 'crawl4ai' root directory
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t crawl4ai-local:latest --load .
|
||||
```
|
||||
|
||||
#### 3. Run the Container
|
||||
|
||||
* **Basic run (no LLM support):**
|
||||
```bash
|
||||
# Replace --platform if your host is ARM64
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-standalone \
|
||||
--shm-size=1g \
|
||||
--platform linux/amd64 \
|
||||
crawl4ai-local:latest
|
||||
```
|
||||
|
||||
* **With LLM support:**
|
||||
```bash
|
||||
# Make sure .llm.env is in the current directory (project root)
|
||||
# Replace --platform if your host is ARM64
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-standalone \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
--platform linux/amd64 \
|
||||
crawl4ai-local:latest
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`.
|
||||
|
||||
#### 4. Stopping the Manual Container
|
||||
|
||||
```bash
|
||||
docker stop crawl4ai-standalone && docker rm crawl4ai-standalone
|
||||
```
|
||||
|
||||
### Option 3: Using Pre-built Docker Hub Images
|
||||
|
||||
Pull and run images directly from Docker Hub without building locally.
|
||||
|
||||
#### 1. Pull the Image
|
||||
|
||||
We use a versioning scheme like `LIBRARY_VERSION-dREVISION` (e.g., `0.5.1-d1`). The `latest` tag points to the most recent stable release. Images are built with multi-arch manifests, so Docker usually pulls the correct version for your system automatically.
|
||||
|
||||
```bash
|
||||
# Pull a specific version (recommended for stability)
|
||||
docker pull unclecode/crawl4ai:0.5.1-d1
|
||||
|
||||
# Or pull the latest stable version
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
#### 2. Setup Environment (API Keys)
|
||||
|
||||
If using LLMs, create the `.llm.env` file in a directory of your choice, similar to Step 2 in the Compose section.
|
||||
|
||||
#### 3. Run the Container
|
||||
|
||||
* **Basic run:**
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-hub \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.5.1-d1 # Or use :latest
|
||||
```
|
||||
|
||||
* **With LLM support:**
|
||||
```bash
|
||||
# Make sure .llm.env is in the current directory you are running docker from
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-hub \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.5.1-d1 # Or use :latest
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`.
|
||||
|
||||
#### 4. Stopping the Hub Container
|
||||
|
||||
```bash
|
||||
docker stop crawl4ai-hub && docker rm crawl4ai-hub
|
||||
```
|
||||
|
||||
#### Docker Hub Versioning Explained
|
||||
|
||||
* **Image Name:** `unclecode/crawl4ai`
|
||||
* **Tag Format:** `LIBRARY_VERSION-dREVISION`
|
||||
* `LIBRARY_VERSION`: The Semantic Version of the core `crawl4ai` Python library included (e.g., `0.5.1`).
|
||||
* `dREVISION`: An incrementing number (starting at `d1`) for Docker build changes made *without* changing the library version (e.g., base image updates, dependency fixes). Resets to `d1` for each new `LIBRARY_VERSION`.
|
||||
* **Example:** `unclecode/crawl4ai:0.5.1-d1`
|
||||
* **`latest` Tag:** Points to the most recent stable `LIBRARY_VERSION-dREVISION`.
|
||||
* **Multi-Arch:** Images support `linux/amd64` and `linux/arm64`. Docker automatically selects the correct architecture.
|
||||
|
||||
---
|
||||
|
||||
*(Rest of the document remains largely the same, but with key updates below)*
|
||||
|
||||
---
|
||||
|
||||
## Dockerfile Parameters
|
||||
|
||||
You can customize the image build process using build arguments (`--build-arg`). These are typically used via `docker buildx build` or within the `docker-compose.yml` file.
|
||||
|
||||
```bash
|
||||
# Example: Build with 'all' features using buildx
|
||||
docker buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--build-arg INSTALL_TYPE=all \
|
||||
-t yourname/crawl4ai-all:latest \
|
||||
--load \
|
||||
. # Build from root context
|
||||
```
|
||||
|
||||
### Build Arguments Explained
|
||||
|
||||
| Argument | Description | Default | Options |
|
||||
| :----------- | :--------------------------------------- | :-------- | :--------------------------------- |
|
||||
| INSTALL_TYPE | Feature set | `default` | `default`, `all`, `torch`, `transformer` |
|
||||
| ENABLE_GPU | GPU support (CUDA for AMD64) | `false` | `true`, `false` |
|
||||
| APP_HOME | Install path inside container (advanced) | `/app` | any valid path |
|
||||
| USE_LOCAL | Install library from local source | `true` | `true`, `false` |
|
||||
| GITHUB_REPO | Git repo to clone if USE_LOCAL=false | *(see Dockerfile)* | any git URL |
|
||||
| GITHUB_BRANCH| Git branch to clone if USE_LOCAL=false | `main` | any branch name |
|
||||
|
||||
*(Note: PYTHON_VERSION is fixed by the `FROM` instruction in the Dockerfile)*
|
||||
|
||||
### Build Best Practices
|
||||
|
||||
1. **Choose the Right Install Type**
|
||||
* `default`: Basic installation, smallest image size. Suitable for most standard web scraping and markdown generation.
|
||||
* `all`: Full features including `torch` and `transformers` for advanced extraction strategies (e.g., CosineStrategy, certain LLM filters). Significantly larger image. Ensure you need these extras.
|
||||
2. **Platform Considerations**
|
||||
* Use `buildx` for building multi-architecture images, especially for pushing to registries.
|
||||
* Use `docker compose` profiles (`local-amd64`, `local-arm64`) for easy platform-specific local builds.
|
||||
3. **Performance Optimization**
|
||||
* The image automatically includes platform-specific optimizations (OpenMP for AMD64, OpenBLAS for ARM64).
|
||||
|
||||
---
|
||||
|
||||
## Using the API
|
||||
|
||||
Communicate with the running Docker server via its REST API (defaulting to `http://localhost:11235`). You can use the Python SDK or make direct HTTP requests.
|
||||
|
||||
### Python SDK
|
||||
|
||||
Install the SDK: `pip install crawl4ai`
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||
from crawl4ai import BrowserConfig, CrawlerRunConfig, CacheMode # Assuming you have crawl4ai installed
|
||||
|
||||
async def main():
|
||||
# Point to the correct server port
|
||||
async with Crawl4aiDockerClient(base_url="http://localhost:11235", verbose=True) as client:
|
||||
# If JWT is enabled on the server, authenticate first:
|
||||
# await client.authenticate("user@example.com") # See Server Configuration section
|
||||
|
||||
# Example Non-streaming crawl
|
||||
print("--- Running Non-Streaming Crawl ---")
|
||||
results = await client.crawl(
|
||||
["https://httpbin.org/html"],
|
||||
browser_config=BrowserConfig(headless=True), # Use library classes for config aid
|
||||
crawler_config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
)
|
||||
if results: # client.crawl returns None on failure
|
||||
print(f"Non-streaming results success: {results.success}")
|
||||
if results.success:
|
||||
for result in results: # Iterate through the CrawlResultContainer
|
||||
print(f"URL: {result.url}, Success: {result.success}")
|
||||
else:
|
||||
print("Non-streaming crawl failed.")
|
||||
|
||||
|
||||
# Example Streaming crawl
|
||||
print("\n--- Running Streaming Crawl ---")
|
||||
stream_config = CrawlerRunConfig(stream=True, cache_mode=CacheMode.BYPASS)
|
||||
try:
|
||||
async for result in await client.crawl( # client.crawl returns an async generator for streaming
|
||||
["https://httpbin.org/html", "https://httpbin.org/links/5/0"],
|
||||
browser_config=BrowserConfig(headless=True),
|
||||
crawler_config=stream_config
|
||||
):
|
||||
print(f"Streamed result: URL: {result.url}, Success: {result.success}")
|
||||
except Exception as e:
|
||||
print(f"Streaming crawl failed: {e}")
|
||||
|
||||
|
||||
# Example Get schema
|
||||
print("\n--- Getting Schema ---")
|
||||
schema = await client.get_schema()
|
||||
print(f"Schema received: {bool(schema)}") # Print whether schema was received
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
*(SDK parameters like timeout, verify_ssl etc. remain the same)*
|
||||
|
||||
### Second Approach: Direct API Calls
|
||||
|
||||
Crucially, when sending configurations directly via JSON, they **must** follow the `{"type": "ClassName", "params": {...}}` structure for any non-primitive value (like config objects or strategies). Dictionaries must be wrapped as `{"type": "dict", "value": {...}}`.
|
||||
|
||||
*(Keep the detailed explanation of Configuration Structure, Basic Pattern, Simple vs Complex, Strategy Pattern, Complex Nested Example, Quick Grammar Overview, Important Rules, Pro Tip)*
|
||||
|
||||
#### More Examples *(Ensure Schema example uses type/value wrapper)*
|
||||
|
||||
**Advanced Crawler Configuration**
|
||||
*(Keep example, ensure cache_mode uses valid enum value like "bypass")*
|
||||
|
||||
**Extraction Strategy**
|
||||
```json
|
||||
{
|
||||
"crawler_config": {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {
|
||||
"extraction_strategy": {
|
||||
"type": "JsonCssExtractionStrategy",
|
||||
"params": {
|
||||
"schema": {
|
||||
"type": "dict",
|
||||
"value": {
|
||||
"baseSelector": "article.post",
|
||||
"fields": [
|
||||
{"name": "title", "selector": "h1", "type": "text"},
|
||||
{"name": "content", "selector": ".content", "type": "html"}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**LLM Extraction Strategy** *(Keep example, ensure schema uses type/value wrapper)*
|
||||
*(Keep Deep Crawler Example)*
|
||||
|
||||
### REST API Examples
|
||||
|
||||
Update URLs to use port `11235`.
|
||||
|
||||
#### Simple Crawl
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Configuration objects converted to the required JSON structure
|
||||
browser_config_payload = {
|
||||
"type": "BrowserConfig",
|
||||
"params": {"headless": True}
|
||||
}
|
||||
crawler_config_payload = {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {"stream": False, "cache_mode": "bypass"} # Use string value of enum
|
||||
}
|
||||
|
||||
crawl_payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"browser_config": browser_config_payload,
|
||||
"crawler_config": crawler_config_payload
|
||||
}
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl", # Updated port
|
||||
# headers={"Authorization": f"Bearer {token}"}, # If JWT is enabled
|
||||
json=crawl_payload
|
||||
)
|
||||
print(f"Status Code: {response.status_code}")
|
||||
if response.ok:
|
||||
print(response.json())
|
||||
else:
|
||||
print(f"Error: {response.text}")
|
||||
|
||||
```
|
||||
|
||||
#### Streaming Results
|
||||
|
||||
```python
|
||||
import json
|
||||
import httpx # Use httpx for async streaming example
|
||||
|
||||
async def test_stream_crawl(token: str = None): # Made token optional
|
||||
"""Test the /crawl/stream endpoint with multiple URLs."""
|
||||
url = "http://localhost:11235/crawl/stream" # Updated port
|
||||
payload = {
|
||||
"urls": [
|
||||
"https://httpbin.org/html",
|
||||
"https://httpbin.org/links/5/0",
|
||||
],
|
||||
"browser_config": {
|
||||
"type": "BrowserConfig",
|
||||
"params": {"headless": True, "viewport": {"type": "dict", "value": {"width": 1200, "height": 800}}} # Viewport needs type:dict
|
||||
},
|
||||
"crawler_config": {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {"stream": True, "cache_mode": "bypass"}
|
||||
}
|
||||
}
|
||||
|
||||
headers = {}
|
||||
# if token:
|
||||
# headers = {"Authorization": f"Bearer {token}"} # If JWT is enabled
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with client.stream("POST", url, json=payload, headers=headers, timeout=120.0) as response:
|
||||
print(f"Status: {response.status_code} (Expected: 200)")
|
||||
response.raise_for_status() # Raise exception for bad status codes
|
||||
|
||||
# Read streaming response line-by-line (NDJSON)
|
||||
async for line in response.aiter_lines():
|
||||
if line:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
# Check for completion marker
|
||||
if data.get("status") == "completed":
|
||||
print("Stream completed.")
|
||||
break
|
||||
print(f"Streamed Result: {json.dumps(data, indent=2)}")
|
||||
except json.JSONDecodeError:
|
||||
print(f"Warning: Could not decode JSON line: {line}")
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
print(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
|
||||
except Exception as e:
|
||||
print(f"Error in streaming crawl test: {str(e)}")
|
||||
|
||||
# To run this example:
|
||||
# import asyncio
|
||||
# asyncio.run(test_stream_crawl())
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Metrics & Monitoring
|
||||
|
||||
Keep an eye on your crawler with these endpoints:
|
||||
|
||||
- `/health` - Quick health check
|
||||
- `/metrics` - Detailed Prometheus metrics
|
||||
- `/schema` - Full API schema
|
||||
|
||||
Example health check:
|
||||
```bash
|
||||
curl http://localhost:11235/health
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*(Deployment Scenarios and Complete Examples sections remain the same, maybe update links if examples moved)*
|
||||
|
||||
---
|
||||
|
||||
## Server Configuration
|
||||
|
||||
The server's behavior can be customized through the `config.yml` file.
|
||||
|
||||
### Understanding config.yml
|
||||
|
||||
The configuration file is loaded from `/app/config.yml` inside the container. By default, the file from `deploy/docker/config.yml` in the repository is copied there during the build.
|
||||
|
||||
Here's a detailed breakdown of the configuration options (using defaults from `deploy/docker/config.yml`):
|
||||
|
||||
```yaml
|
||||
# Application Configuration
|
||||
app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0" # Consider setting this to match library version, e.g., "0.5.1"
|
||||
host: "0.0.0.0"
|
||||
port: 8020 # NOTE: This port is used ONLY when running server.py directly. Gunicorn overrides this (see supervisord.conf).
|
||||
reload: False # Default set to False - suitable for production
|
||||
timeout_keep_alive: 300
|
||||
|
||||
# Default LLM Configuration
|
||||
llm:
|
||||
provider: "openai/gpt-4o-mini"
|
||||
api_key_env: "OPENAI_API_KEY"
|
||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
||||
|
||||
# Redis Configuration (Used by internal Redis server managed by supervisord)
|
||||
redis:
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
db: 0
|
||||
password: ""
|
||||
# ... other redis options ...
|
||||
|
||||
# Rate Limiting Configuration
|
||||
rate_limiting:
|
||||
enabled: True
|
||||
default_limit: "1000/minute"
|
||||
trusted_proxies: []
|
||||
storage_uri: "memory://" # Use "redis://localhost:6379" if you need persistent/shared limits
|
||||
|
||||
# Security Configuration
|
||||
security:
|
||||
enabled: false # Master toggle for security features
|
||||
jwt_enabled: false # Enable JWT authentication (requires security.enabled=true)
|
||||
https_redirect: false # Force HTTPS (requires security.enabled=true)
|
||||
trusted_hosts: ["*"] # Allowed hosts (use specific domains in production)
|
||||
headers: # Security headers (applied if security.enabled=true)
|
||||
x_content_type_options: "nosniff"
|
||||
x_frame_options: "DENY"
|
||||
content_security_policy: "default-src 'self'"
|
||||
strict_transport_security: "max-age=63072000; includeSubDomains"
|
||||
|
||||
# Crawler Configuration
|
||||
crawler:
|
||||
memory_threshold_percent: 95.0
|
||||
rate_limiter:
|
||||
base_delay: [1.0, 2.0] # Min/max delay between requests in seconds for dispatcher
|
||||
timeouts:
|
||||
stream_init: 30.0 # Timeout for stream initialization
|
||||
batch_process: 300.0 # Timeout for non-streaming /crawl processing
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
level: "INFO"
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Observability Configuration
|
||||
observability:
|
||||
prometheus:
|
||||
enabled: True
|
||||
endpoint: "/metrics"
|
||||
health_check:
|
||||
endpoint: "/health"
|
||||
```
|
||||
|
||||
*(JWT Authentication section remains the same, just note the default port is now 11235 for requests)*
|
||||
|
||||
*(Configuration Tips and Best Practices remain the same)*
|
||||
|
||||
### Customizing Your Configuration
|
||||
|
||||
You can override the default `config.yml`.
|
||||
|
||||
#### Method 1: Modify Before Build
|
||||
|
||||
1. Edit the `deploy/docker/config.yml` file in your local repository clone.
|
||||
2. Build the image using `docker buildx` or `docker compose --profile local-... up --build`. The modified file will be copied into the image.
|
||||
|
||||
#### Method 2: Runtime Mount (Recommended for Custom Deploys)
|
||||
|
||||
1. Create your custom configuration file, e.g., `my-custom-config.yml` locally. Ensure it contains all necessary sections.
|
||||
2. Mount it when running the container:
|
||||
|
||||
* **Using `docker run`:**
|
||||
```bash
|
||||
# Assumes my-custom-config.yml is in the current directory
|
||||
docker run -d -p 11235:11235 \
|
||||
--name crawl4ai-custom-config \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
-v $(pwd)/my-custom-config.yml:/app/config.yml \
|
||||
unclecode/crawl4ai:latest # Or your specific tag
|
||||
```
|
||||
|
||||
* **Using `docker-compose.yml`:** Add a `volumes` section to the service definition:
|
||||
```yaml
|
||||
services:
|
||||
crawl4ai-hub-amd64: # Or your chosen service
|
||||
image: unclecode/crawl4ai:latest
|
||||
profiles: ["hub-amd64"]
|
||||
<<: *base-config
|
||||
volumes:
|
||||
# Mount local custom config over the default one in the container
|
||||
- ./my-custom-config.yml:/app/config.yml
|
||||
# Keep the shared memory volume from base-config
|
||||
- /dev/shm:/dev/shm
|
||||
```
|
||||
*(Note: Ensure `my-custom-config.yml` is in the same directory as `docker-compose.yml`)*
|
||||
|
||||
> 💡 When mounting, your custom file *completely replaces* the default one. Ensure it's a valid and complete configuration.
|
||||
|
||||
### Configuration Recommendations
|
||||
|
||||
1. **Security First** 🔒
|
||||
- Always enable security in production
|
||||
- Use specific trusted_hosts instead of wildcards
|
||||
- Set up proper rate limiting to protect your server
|
||||
- Consider your environment before enabling HTTPS redirect
|
||||
|
||||
2. **Resource Management** 💻
|
||||
- Adjust memory_threshold_percent based on available RAM
|
||||
- Set timeouts according to your content size and network conditions
|
||||
- Use Redis for rate limiting in multi-container setups
|
||||
|
||||
3. **Monitoring** 📊
|
||||
- Enable Prometheus if you need metrics
|
||||
- Set DEBUG logging in development, INFO in production
|
||||
- Regular health check monitoring is crucial
|
||||
|
||||
4. **Performance Tuning** ⚡
|
||||
- Start with conservative rate limiter delays
|
||||
- Increase batch_process timeout for large content
|
||||
- Adjust stream_init timeout based on initial response times
|
||||
|
||||
## Getting Help
|
||||
|
||||
We're here to help you succeed with Crawl4AI! Here's how to get support:
|
||||
|
||||
- 📖 Check our [full documentation](https://docs.crawl4ai.com)
|
||||
- 🐛 Found a bug? [Open an issue](https://github.com/unclecode/crawl4ai/issues)
|
||||
- 💬 Join our [Discord community](https://discord.gg/crawl4ai)
|
||||
- ⭐ Star us on GitHub to show support!
|
||||
|
||||
## Summary
|
||||
|
||||
In this guide, we've covered everything you need to get started with Crawl4AI's Docker deployment:
|
||||
- Building and running the Docker container
|
||||
- Configuring the environment
|
||||
- Making API requests with proper typing
|
||||
- Using the Python SDK
|
||||
- Monitoring your deployment
|
||||
|
||||
Remember, the examples in the `examples` folder are your friends - they show real-world usage patterns that you can adapt for your needs.
|
||||
|
||||
Keep exploring, and don't hesitate to reach out if you need help! We're building something amazing together. 🚀
|
||||
|
||||
Happy crawling! 🕷️
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,11 @@
|
||||
import os
|
||||
import json
|
||||
import asyncio
|
||||
from typing import List, Tuple
|
||||
from typing import List, Tuple, Dict
|
||||
from functools import partial
|
||||
from uuid import uuid4
|
||||
from datetime import datetime
|
||||
from base64 import b64encode
|
||||
|
||||
import logging
|
||||
from typing import Optional, AsyncGenerator
|
||||
@@ -40,8 +43,19 @@ from utils import (
|
||||
decode_redis_hash
|
||||
)
|
||||
|
||||
import psutil, time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# --- Helper to get memory ---
|
||||
def _get_memory_mb():
|
||||
try:
|
||||
return psutil.Process().memory_info().rss / (1024 * 1024)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get memory info: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def handle_llm_qa(
|
||||
url: str,
|
||||
query: str,
|
||||
@@ -49,6 +63,8 @@ async def handle_llm_qa(
|
||||
) -> str:
|
||||
"""Process QA using LLM with crawled content as context."""
|
||||
try:
|
||||
if not url.startswith(('http://', 'https://')):
|
||||
url = 'https://' + url
|
||||
# Extract base URL by finding last '?q=' occurrence
|
||||
last_q_index = url.rfind('?q=')
|
||||
if last_q_index != -1:
|
||||
@@ -62,7 +78,7 @@ async def handle_llm_qa(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=result.error_message
|
||||
)
|
||||
content = result.markdown.fit_markdown
|
||||
content = result.markdown.fit_markdown or result.markdown.raw_markdown
|
||||
|
||||
# Create prompt and get LLM response
|
||||
prompt = f"""Use the following content as context to answer the question.
|
||||
@@ -259,7 +275,9 @@ async def handle_llm_request(
|
||||
async def handle_task_status(
|
||||
redis: aioredis.Redis,
|
||||
task_id: str,
|
||||
base_url: str
|
||||
base_url: str,
|
||||
*,
|
||||
keep: bool = False
|
||||
) -> JSONResponse:
|
||||
"""Handle task status check requests."""
|
||||
task = await redis.hgetall(f"task:{task_id}")
|
||||
@@ -273,7 +291,7 @@ async def handle_task_status(
|
||||
response = create_task_response(task, task_id, base_url)
|
||||
|
||||
if task["status"] in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
|
||||
if should_cleanup_task(task["created_at"]):
|
||||
if not keep and should_cleanup_task(task["created_at"]):
|
||||
await redis.delete(f"task:{task_id}")
|
||||
|
||||
return JSONResponse(response)
|
||||
@@ -351,7 +369,12 @@ async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator)
|
||||
try:
|
||||
async for result in results_gen:
|
||||
try:
|
||||
server_memory_mb = _get_memory_mb()
|
||||
result_dict = result.model_dump()
|
||||
result_dict['server_memory_mb'] = server_memory_mb
|
||||
# If PDF exists, encode it to base64
|
||||
if result_dict.get('pdf') is not None:
|
||||
result_dict['pdf'] = b64encode(result_dict['pdf']).decode('utf-8')
|
||||
logger.info(f"Streaming result for {result_dict.get('url', 'unknown')}")
|
||||
data = json.dumps(result_dict, default=datetime_handler) + "\n"
|
||||
yield data.encode('utf-8')
|
||||
@@ -365,10 +388,11 @@ async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator)
|
||||
except asyncio.CancelledError:
|
||||
logger.warning("Client disconnected during streaming")
|
||||
finally:
|
||||
try:
|
||||
await crawler.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Crawler cleanup error: {e}")
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as e:
|
||||
# logger.error(f"Crawler cleanup error: {e}")
|
||||
pass
|
||||
|
||||
async def handle_crawl_request(
|
||||
urls: List[str],
|
||||
@@ -377,7 +401,13 @@ async def handle_crawl_request(
|
||||
config: dict
|
||||
) -> dict:
|
||||
"""Handle non-streaming crawl requests."""
|
||||
start_mem_mb = _get_memory_mb() # <--- Get memory before
|
||||
start_time = time.time()
|
||||
mem_delta_mb = None
|
||||
peak_mem_mb = start_mem_mb
|
||||
|
||||
try:
|
||||
urls = [('https://' + url) if not url.startswith(('http://', 'https://')) else url for url in urls]
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
|
||||
@@ -385,11 +415,21 @@ async def handle_crawl_request(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||
)
|
||||
) if config["crawler"]["rate_limiter"]["enabled"] else None
|
||||
)
|
||||
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
# crawler: AsyncWebCrawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
base_config = config["crawler"]["base_config"]
|
||||
# Iterate on key-value pairs in global_config then use haseattr to set them
|
||||
for key, value in base_config.items():
|
||||
if hasattr(crawler_config, key):
|
||||
setattr(crawler_config, key, value)
|
||||
|
||||
crawler: AsyncWebCrawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
results = []
|
||||
func = getattr(crawler, "arun" if len(urls) == 1 else "arun_many")
|
||||
partial_func = partial(func,
|
||||
@@ -397,19 +437,55 @@ async def handle_crawl_request(
|
||||
config=crawler_config,
|
||||
dispatcher=dispatcher)
|
||||
results = await partial_func()
|
||||
await crawler.close()
|
||||
|
||||
# await crawler.close()
|
||||
|
||||
end_mem_mb = _get_memory_mb() # <--- Get memory after
|
||||
end_time = time.time()
|
||||
|
||||
if start_mem_mb is not None and end_mem_mb is not None:
|
||||
mem_delta_mb = end_mem_mb - start_mem_mb # <--- Calculate delta
|
||||
peak_mem_mb = max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb) # <--- Get peak memory
|
||||
logger.info(f"Memory usage: Start: {start_mem_mb} MB, End: {end_mem_mb} MB, Delta: {mem_delta_mb} MB, Peak: {peak_mem_mb} MB")
|
||||
|
||||
# Process results to handle PDF bytes
|
||||
processed_results = []
|
||||
for result in results:
|
||||
result_dict = result.model_dump()
|
||||
# If PDF exists, encode it to base64
|
||||
if result_dict.get('pdf') is not None:
|
||||
result_dict['pdf'] = b64encode(result_dict['pdf']).decode('utf-8')
|
||||
processed_results.append(result_dict)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"results": [result.model_dump() for result in results]
|
||||
"results": processed_results,
|
||||
"server_processing_time_s": end_time - start_time,
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": peak_mem_mb
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Crawl error: {str(e)}", exc_info=True)
|
||||
if 'crawler' in locals():
|
||||
await crawler.close()
|
||||
if 'crawler' in locals() and crawler.ready: # Check if crawler was initialized and started
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during exception handling: {close_e}")
|
||||
logger.error(f"Error closing crawler during exception handling: {str(e)}")
|
||||
|
||||
# Measure memory even on error if possible
|
||||
end_mem_mb_error = _get_memory_mb()
|
||||
if start_mem_mb is not None and end_mem_mb_error is not None:
|
||||
mem_delta_mb = end_mem_mb_error - start_mem_mb
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
detail=json.dumps({ # Send structured error
|
||||
"error": str(e),
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb_error or 0)
|
||||
})
|
||||
)
|
||||
|
||||
async def handle_stream_crawl_request(
|
||||
@@ -421,9 +497,11 @@ async def handle_stream_crawl_request(
|
||||
"""Handle streaming crawl requests."""
|
||||
try:
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
browser_config.verbose = True
|
||||
# browser_config.verbose = True # Set to False or remove for production stress testing
|
||||
browser_config.verbose = False
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
crawler_config.scraping_strategy = LXMLWebScrapingStrategy()
|
||||
crawler_config.stream = True
|
||||
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
@@ -432,8 +510,11 @@ async def handle_stream_crawl_request(
|
||||
)
|
||||
)
|
||||
|
||||
crawler = AsyncWebCrawler(config=browser_config)
|
||||
await crawler.start()
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
# crawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
results_gen = await crawler.arun_many(
|
||||
urls=urls,
|
||||
@@ -444,10 +525,60 @@ async def handle_stream_crawl_request(
|
||||
return crawler, results_gen
|
||||
|
||||
except Exception as e:
|
||||
if 'crawler' in locals():
|
||||
await crawler.close()
|
||||
# Make sure to close crawler if started during an error here
|
||||
if 'crawler' in locals() and crawler.ready:
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during stream setup exception: {close_e}")
|
||||
logger.error(f"Error closing crawler during stream setup exception: {str(e)}")
|
||||
logger.error(f"Stream crawl error: {str(e)}", exc_info=True)
|
||||
# Raising HTTPException here will prevent streaming response
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
)
|
||||
|
||||
async def handle_crawl_job(
|
||||
redis,
|
||||
background_tasks: BackgroundTasks,
|
||||
urls: List[str],
|
||||
browser_config: Dict,
|
||||
crawler_config: Dict,
|
||||
config: Dict,
|
||||
) -> Dict:
|
||||
"""
|
||||
Fire-and-forget version of handle_crawl_request.
|
||||
Creates a task in Redis, runs the heavy work in a background task,
|
||||
lets /crawl/job/{task_id} polling fetch the result.
|
||||
"""
|
||||
task_id = f"crawl_{uuid4().hex[:8]}"
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.PROCESSING, # <-- keep enum values consistent
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"url": json.dumps(urls), # store list as JSON string
|
||||
"result": "",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
async def _runner():
|
||||
try:
|
||||
result = await handle_crawl_request(
|
||||
urls=urls,
|
||||
browser_config=browser_config,
|
||||
crawler_config=crawler_config,
|
||||
config=config,
|
||||
)
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.COMPLETED,
|
||||
"result": json.dumps(result),
|
||||
})
|
||||
await asyncio.sleep(5) # Give Redis time to process the update
|
||||
except Exception as exc:
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.FAILED,
|
||||
"error": str(exc),
|
||||
})
|
||||
|
||||
background_tasks.add_task(_runner)
|
||||
return {"task_id": task_id}
|
||||
11631
deploy/docker/c4ai-code-context.md
Normal file
11631
deploy/docker/c4ai-code-context.md
Normal file
File diff suppressed because it is too large
Load Diff
8913
deploy/docker/c4ai-doc-context.md
Normal file
8913
deploy/docker/c4ai-doc-context.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,8 +3,9 @@ app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0"
|
||||
host: "0.0.0.0"
|
||||
port: 8020
|
||||
port: 11234
|
||||
reload: False
|
||||
workers: 1
|
||||
timeout_keep_alive: 300
|
||||
|
||||
# Default LLM Configuration
|
||||
@@ -50,12 +51,31 @@ security:
|
||||
|
||||
# Crawler Configuration
|
||||
crawler:
|
||||
base_config:
|
||||
simulate_user: true
|
||||
memory_threshold_percent: 95.0
|
||||
rate_limiter:
|
||||
enabled: true
|
||||
base_delay: [1.0, 2.0]
|
||||
timeouts:
|
||||
stream_init: 30.0 # Timeout for stream initialization
|
||||
batch_process: 300.0 # Timeout for batch processing
|
||||
pool:
|
||||
max_pages: 40 # ← GLOBAL_SEM permits
|
||||
idle_ttl_sec: 1800 # ← 30 min janitor cutoff
|
||||
browser:
|
||||
kwargs:
|
||||
headless: true
|
||||
text_mode: true
|
||||
extra_args:
|
||||
# - "--single-process"
|
||||
- "--no-sandbox"
|
||||
- "--disable-dev-shm-usage"
|
||||
- "--disable-gpu"
|
||||
- "--disable-software-rasterizer"
|
||||
- "--disable-web-security"
|
||||
- "--allow-insecure-localhost"
|
||||
- "--ignore-certificate-errors"
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
@@ -68,4 +88,4 @@ observability:
|
||||
enabled: True
|
||||
endpoint: "/metrics"
|
||||
health_check:
|
||||
endpoint: "/health"
|
||||
endpoint: "/health"
|
||||
60
deploy/docker/crawler_pool.py
Normal file
60
deploy/docker/crawler_pool.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# crawler_pool.py (new file)
|
||||
import asyncio, json, hashlib, time, psutil
|
||||
from contextlib import suppress
|
||||
from typing import Dict
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
from typing import Dict
|
||||
from utils import load_config
|
||||
|
||||
CONFIG = load_config()
|
||||
|
||||
POOL: Dict[str, AsyncWebCrawler] = {}
|
||||
LAST_USED: Dict[str, float] = {}
|
||||
LOCK = asyncio.Lock()
|
||||
|
||||
MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) # % RAM – refuse new browsers above this
|
||||
IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800) # close if unused for 30 min
|
||||
|
||||
def _sig(cfg: BrowserConfig) -> str:
|
||||
payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":"))
|
||||
return hashlib.sha1(payload.encode()).hexdigest()
|
||||
|
||||
async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
|
||||
try:
|
||||
sig = _sig(cfg)
|
||||
async with LOCK:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time();
|
||||
return POOL[sig]
|
||||
if psutil.virtual_memory().percent >= MEM_LIMIT:
|
||||
raise MemoryError("RAM pressure – new browser denied")
|
||||
crawler = AsyncWebCrawler(config=cfg, thread_safe=False)
|
||||
await crawler.start()
|
||||
POOL[sig] = crawler; LAST_USED[sig] = time.time()
|
||||
return crawler
|
||||
except MemoryError as e:
|
||||
raise MemoryError(f"RAM pressure – new browser denied: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to start browser: {e}")
|
||||
finally:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
else:
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
async def close_all():
|
||||
async with LOCK:
|
||||
await asyncio.gather(*(c.close() for c in POOL.values()), return_exceptions=True)
|
||||
POOL.clear(); LAST_USED.clear()
|
||||
|
||||
async def janitor():
|
||||
while True:
|
||||
await asyncio.sleep(60)
|
||||
now = time.time()
|
||||
async with LOCK:
|
||||
for sig, crawler in list(POOL.items()):
|
||||
if now - LAST_USED[sig] > IDLE_TTL:
|
||||
with suppress(Exception): await crawler.close()
|
||||
POOL.pop(sig, None); LAST_USED.pop(sig, None)
|
||||
99
deploy/docker/job.py
Normal file
99
deploy/docker/job.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""
|
||||
Job endpoints (enqueue + poll) for long-running LLM extraction and raw crawl.
|
||||
Relies on the existing Redis task helpers in api.py
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional, Callable
|
||||
from fastapi import APIRouter, BackgroundTasks, Depends, Request
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
|
||||
from api import (
|
||||
handle_llm_request,
|
||||
handle_crawl_job,
|
||||
handle_task_status,
|
||||
)
|
||||
|
||||
# ------------- dependency placeholders -------------
|
||||
_redis = None # will be injected from server.py
|
||||
_config = None
|
||||
_token_dep: Callable = lambda: None # dummy until injected
|
||||
|
||||
# public router
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# === init hook called by server.py =========================================
|
||||
def init_job_router(redis, config, token_dep) -> APIRouter:
|
||||
"""Inject shared singletons and return the router for mounting."""
|
||||
global _redis, _config, _token_dep
|
||||
_redis, _config, _token_dep = redis, config, token_dep
|
||||
return router
|
||||
|
||||
|
||||
# ---------- payload models --------------------------------------------------
|
||||
class LlmJobPayload(BaseModel):
|
||||
url: HttpUrl
|
||||
q: str
|
||||
schema: Optional[str] = None
|
||||
cache: bool = False
|
||||
|
||||
|
||||
class CrawlJobPayload(BaseModel):
|
||||
urls: list[HttpUrl]
|
||||
browser_config: Dict = {}
|
||||
crawler_config: Dict = {}
|
||||
|
||||
|
||||
# ---------- LLM job ---------------------------------------------------------
|
||||
@router.post("/llm/job", status_code=202)
|
||||
async def llm_job_enqueue(
|
||||
payload: LlmJobPayload,
|
||||
background_tasks: BackgroundTasks,
|
||||
request: Request,
|
||||
_td: Dict = Depends(lambda: _token_dep()), # late-bound dep
|
||||
):
|
||||
return await handle_llm_request(
|
||||
_redis,
|
||||
background_tasks,
|
||||
request,
|
||||
str(payload.url),
|
||||
query=payload.q,
|
||||
schema=payload.schema,
|
||||
cache=payload.cache,
|
||||
config=_config,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/llm/job/{task_id}")
|
||||
async def llm_job_status(
|
||||
request: Request,
|
||||
task_id: str,
|
||||
_td: Dict = Depends(lambda: _token_dep())
|
||||
):
|
||||
return await handle_task_status(_redis, task_id)
|
||||
|
||||
|
||||
# ---------- CRAWL job -------------------------------------------------------
|
||||
@router.post("/crawl/job", status_code=202)
|
||||
async def crawl_job_enqueue(
|
||||
payload: CrawlJobPayload,
|
||||
background_tasks: BackgroundTasks,
|
||||
_td: Dict = Depends(lambda: _token_dep()),
|
||||
):
|
||||
return await handle_crawl_job(
|
||||
_redis,
|
||||
background_tasks,
|
||||
[str(u) for u in payload.urls],
|
||||
payload.browser_config,
|
||||
payload.crawler_config,
|
||||
config=_config,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/crawl/job/{task_id}")
|
||||
async def crawl_job_status(
|
||||
request: Request,
|
||||
task_id: str,
|
||||
_td: Dict = Depends(lambda: _token_dep())
|
||||
):
|
||||
return await handle_task_status(_redis, task_id, base_url=str(request.base_url))
|
||||
252
deploy/docker/mcp_bridge.py
Normal file
252
deploy/docker/mcp_bridge.py
Normal file
@@ -0,0 +1,252 @@
|
||||
# deploy/docker/mcp_bridge.py
|
||||
|
||||
from __future__ import annotations
|
||||
import inspect, json, re, anyio
|
||||
from contextlib import suppress
|
||||
from typing import Any, Callable, Dict, List, Tuple
|
||||
import httpx
|
||||
|
||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
from fastapi import Request
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
from pydantic import BaseModel
|
||||
from mcp.server.sse import SseServerTransport
|
||||
|
||||
import mcp.types as t
|
||||
from mcp.server.lowlevel.server import Server, NotificationOptions
|
||||
from mcp.server.models import InitializationOptions
|
||||
|
||||
# ── opt‑in decorators ───────────────────────────────────────────
|
||||
def mcp_resource(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "resource", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
def mcp_template(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "template", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
def mcp_tool(name: str | None = None):
|
||||
def deco(fn):
|
||||
fn.__mcp_kind__, fn.__mcp_name__ = "tool", name
|
||||
return fn
|
||||
return deco
|
||||
|
||||
# ── HTTP‑proxy helper for FastAPI endpoints ─────────────────────
|
||||
def _make_http_proxy(base_url: str, route):
|
||||
method = list(route.methods - {"HEAD", "OPTIONS"})[0]
|
||||
async def proxy(**kwargs):
|
||||
# replace `/items/{id}` style params first
|
||||
path = route.path
|
||||
for k, v in list(kwargs.items()):
|
||||
placeholder = "{" + k + "}"
|
||||
if placeholder in path:
|
||||
path = path.replace(placeholder, str(v))
|
||||
kwargs.pop(k)
|
||||
url = base_url.rstrip("/") + path
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
try:
|
||||
r = (
|
||||
await client.get(url, params=kwargs)
|
||||
if method == "GET"
|
||||
else await client.request(method, url, json=kwargs)
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.text if method == "GET" else r.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
# surface FastAPI error details instead of plain 500
|
||||
raise HTTPException(e.response.status_code, e.response.text)
|
||||
return proxy
|
||||
|
||||
# ── main entry point ────────────────────────────────────────────
|
||||
def attach_mcp(
|
||||
app: FastAPI,
|
||||
*, # keyword‑only
|
||||
base: str = "/mcp",
|
||||
name: str | None = None,
|
||||
base_url: str, # eg. "http://127.0.0.1:8020"
|
||||
) -> None:
|
||||
"""Call once after all routes are declared to expose WS+SSE MCP endpoints."""
|
||||
server_name = name or app.title or "FastAPI-MCP"
|
||||
mcp = Server(server_name)
|
||||
|
||||
# tools: Dict[str, Callable] = {}
|
||||
tools: Dict[str, Tuple[Callable, Callable]] = {}
|
||||
resources: Dict[str, Callable] = {}
|
||||
templates: Dict[str, Callable] = {}
|
||||
|
||||
# register decorated FastAPI routes
|
||||
for route in app.routes:
|
||||
fn = getattr(route, "endpoint", None)
|
||||
kind = getattr(fn, "__mcp_kind__", None)
|
||||
if not kind:
|
||||
continue
|
||||
|
||||
key = fn.__mcp_name__ or re.sub(r"[/{}}]", "_", route.path).strip("_")
|
||||
|
||||
# if kind == "tool":
|
||||
# tools[key] = _make_http_proxy(base_url, route)
|
||||
if kind == "tool":
|
||||
proxy = _make_http_proxy(base_url, route)
|
||||
tools[key] = (proxy, fn)
|
||||
continue
|
||||
if kind == "resource":
|
||||
resources[key] = fn
|
||||
if kind == "template":
|
||||
templates[key] = fn
|
||||
|
||||
# helpers for JSON‑Schema
|
||||
def _schema(model: type[BaseModel] | None) -> dict:
|
||||
return {"type": "object"} if model is None else model.model_json_schema()
|
||||
|
||||
def _body_model(fn: Callable) -> type[BaseModel] | None:
|
||||
for p in inspect.signature(fn).parameters.values():
|
||||
a = p.annotation
|
||||
if inspect.isclass(a) and issubclass(a, BaseModel):
|
||||
return a
|
||||
return None
|
||||
|
||||
# MCP handlers
|
||||
@mcp.list_tools()
|
||||
async def _list_tools() -> List[t.Tool]:
|
||||
out = []
|
||||
for k, (proxy, orig_fn) in tools.items():
|
||||
desc = getattr(orig_fn, "__mcp_description__", None) or inspect.getdoc(orig_fn) or ""
|
||||
schema = getattr(orig_fn, "__mcp_schema__", None) or _schema(_body_model(orig_fn))
|
||||
out.append(
|
||||
t.Tool(name=k, description=desc, inputSchema=schema)
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
@mcp.call_tool()
|
||||
async def _call_tool(name: str, arguments: Dict | None) -> List[t.TextContent]:
|
||||
if name not in tools:
|
||||
raise HTTPException(404, "tool not found")
|
||||
|
||||
proxy, _ = tools[name]
|
||||
try:
|
||||
res = await proxy(**(arguments or {}))
|
||||
except HTTPException as exc:
|
||||
# map server‑side errors into MCP "text/error" payloads
|
||||
err = {"error": exc.status_code, "detail": exc.detail}
|
||||
return [t.TextContent(type = "text", text=json.dumps(err))]
|
||||
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
|
||||
|
||||
@mcp.list_resources()
|
||||
async def _list_resources() -> List[t.Resource]:
|
||||
return [
|
||||
t.Resource(name=k, description=inspect.getdoc(f) or "", mime_type="application/json")
|
||||
for k, f in resources.items()
|
||||
]
|
||||
|
||||
@mcp.read_resource()
|
||||
async def _read_resource(name: str) -> List[t.TextContent]:
|
||||
if name not in resources:
|
||||
raise HTTPException(404, "resource not found")
|
||||
res = resources[name]()
|
||||
return [t.TextContent(type = "text", text=json.dumps(res, default=str))]
|
||||
|
||||
@mcp.list_resource_templates()
|
||||
async def _list_templates() -> List[t.ResourceTemplate]:
|
||||
return [
|
||||
t.ResourceTemplate(
|
||||
name=k,
|
||||
description=inspect.getdoc(f) or "",
|
||||
parameters={
|
||||
p: {"type": "string"} for p in _path_params(app, f)
|
||||
},
|
||||
)
|
||||
for k, f in templates.items()
|
||||
]
|
||||
|
||||
init_opts = InitializationOptions(
|
||||
server_name=server_name,
|
||||
server_version="0.1.0",
|
||||
capabilities=mcp.get_capabilities(
|
||||
notification_options=NotificationOptions(),
|
||||
experimental_capabilities={},
|
||||
),
|
||||
)
|
||||
|
||||
# ── WebSocket transport ────────────────────────────────────
|
||||
@app.websocket_route(f"{base}/ws")
|
||||
async def _ws(ws: WebSocket):
|
||||
await ws.accept()
|
||||
c2s_send, c2s_recv = anyio.create_memory_object_stream(100)
|
||||
s2c_send, s2c_recv = anyio.create_memory_object_stream(100)
|
||||
|
||||
from pydantic import TypeAdapter
|
||||
from mcp.types import JSONRPCMessage
|
||||
adapter = TypeAdapter(JSONRPCMessage)
|
||||
|
||||
init_done = anyio.Event()
|
||||
|
||||
async def srv_to_ws():
|
||||
first = True
|
||||
try:
|
||||
async for msg in s2c_recv:
|
||||
await ws.send_json(msg.model_dump())
|
||||
if first:
|
||||
init_done.set()
|
||||
first = False
|
||||
finally:
|
||||
# make sure cleanup survives TaskGroup cancellation
|
||||
with anyio.CancelScope(shield=True):
|
||||
with suppress(RuntimeError): # idempotent close
|
||||
await ws.close()
|
||||
|
||||
async def ws_to_srv():
|
||||
try:
|
||||
# 1st frame is always "initialize"
|
||||
first = adapter.validate_python(await ws.receive_json())
|
||||
await c2s_send.send(first)
|
||||
await init_done.wait() # block until server ready
|
||||
while True:
|
||||
data = await ws.receive_json()
|
||||
await c2s_send.send(adapter.validate_python(data))
|
||||
except WebSocketDisconnect:
|
||||
await c2s_send.aclose()
|
||||
|
||||
async with anyio.create_task_group() as tg:
|
||||
tg.start_soon(mcp.run, c2s_recv, s2c_send, init_opts)
|
||||
tg.start_soon(ws_to_srv)
|
||||
tg.start_soon(srv_to_ws)
|
||||
|
||||
# ── SSE transport (official) ─────────────────────────────
|
||||
sse = SseServerTransport(f"{base}/messages/")
|
||||
|
||||
@app.get(f"{base}/sse")
|
||||
async def _mcp_sse(request: Request):
|
||||
async with sse.connect_sse(
|
||||
request.scope, request.receive, request._send # starlette ASGI primitives
|
||||
) as (read_stream, write_stream):
|
||||
await mcp.run(read_stream, write_stream, init_opts)
|
||||
|
||||
# client → server frames are POSTed here
|
||||
app.mount(f"{base}/messages", app=sse.handle_post_message)
|
||||
|
||||
# ── schema endpoint ───────────────────────────────────────
|
||||
@app.get(f"{base}/schema")
|
||||
async def _schema_endpoint():
|
||||
return JSONResponse({
|
||||
"tools": [x.model_dump() for x in await _list_tools()],
|
||||
"resources": [x.model_dump() for x in await _list_resources()],
|
||||
"resource_templates": [x.model_dump() for x in await _list_templates()],
|
||||
})
|
||||
|
||||
|
||||
# ── helpers ────────────────────────────────────────────────────
|
||||
def _route_name(path: str) -> str:
|
||||
return re.sub(r"[/{}}]", "_", path).strip("_")
|
||||
|
||||
def _path_params(app: FastAPI, fn: Callable) -> List[str]:
|
||||
for r in app.routes:
|
||||
if r.endpoint is fn:
|
||||
return list(r.param_convertors.keys())
|
||||
return []
|
||||
@@ -1,9 +1,17 @@
|
||||
fastapi
|
||||
uvicorn
|
||||
fastapi>=0.115.12
|
||||
uvicorn>=0.34.2
|
||||
gunicorn>=23.0.0
|
||||
slowapi>=0.1.9
|
||||
prometheus-fastapi-instrumentator>=7.0.2
|
||||
slowapi==0.1.9
|
||||
prometheus-fastapi-instrumentator>=7.1.0
|
||||
redis>=5.2.1
|
||||
jwt>=1.3.1
|
||||
dnspython>=2.7.0
|
||||
email-validator>=2.2.0
|
||||
email-validator==2.2.0
|
||||
sse-starlette==2.2.1
|
||||
pydantic>=2.11
|
||||
rank-bm25==0.2.2
|
||||
anyio==4.9.0
|
||||
PyJWT==2.10.1
|
||||
mcp>=1.6.0
|
||||
websockets>=15.0.1
|
||||
httpx[http2]>=0.27.2
|
||||
|
||||
41
deploy/docker/schemas.py
Normal file
41
deploy/docker/schemas.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from typing import List, Optional, Dict
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, Field
|
||||
from utils import FilterType
|
||||
|
||||
|
||||
class CrawlRequest(BaseModel):
|
||||
urls: List[str] = Field(min_length=1, max_length=100)
|
||||
browser_config: Optional[Dict] = Field(default_factory=dict)
|
||||
crawler_config: Optional[Dict] = Field(default_factory=dict)
|
||||
|
||||
class MarkdownRequest(BaseModel):
|
||||
"""Request body for the /md endpoint."""
|
||||
url: str = Field(..., description="Absolute http/https URL to fetch")
|
||||
f: FilterType = Field(FilterType.FIT, description="Content‑filter strategy: fit, raw, bm25, or llm")
|
||||
q: Optional[str] = Field(None, description="Query string used by BM25/LLM filters")
|
||||
c: Optional[str] = Field("0", description="Cache‑bust / revision counter")
|
||||
|
||||
|
||||
class RawCode(BaseModel):
|
||||
code: str
|
||||
|
||||
class HTMLRequest(BaseModel):
|
||||
url: str
|
||||
|
||||
class ScreenshotRequest(BaseModel):
|
||||
url: str
|
||||
screenshot_wait_for: Optional[float] = 2
|
||||
output_path: Optional[str] = None
|
||||
|
||||
class PDFRequest(BaseModel):
|
||||
url: str
|
||||
output_path: Optional[str] = None
|
||||
|
||||
|
||||
class JSEndpointRequest(BaseModel):
|
||||
url: str
|
||||
scripts: List[str] = Field(
|
||||
...,
|
||||
description="List of separated JavaScript snippets to execute"
|
||||
)
|
||||
@@ -1,150 +1,449 @@
|
||||
# ───────────────────────── server.py ─────────────────────────
|
||||
"""
|
||||
Crawl4AI FastAPI entry‑point
|
||||
• Browser pool + global page cap
|
||||
• Rate‑limiting, security, metrics
|
||||
• /crawl, /crawl/stream, /md, /llm endpoints
|
||||
"""
|
||||
|
||||
# ── stdlib & 3rd‑party imports ───────────────────────────────
|
||||
from crawler_pool import get_crawler, close_all, janitor
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from auth import create_access_token, get_token_dependency, TokenRequest
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, List, Dict
|
||||
from fastapi import Request, Depends
|
||||
from fastapi.responses import FileResponse
|
||||
import base64
|
||||
import re
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||
from api import (
|
||||
handle_markdown_request, handle_llm_qa,
|
||||
handle_stream_crawl_request, handle_crawl_request,
|
||||
stream_results
|
||||
)
|
||||
from schemas import (
|
||||
CrawlRequest,
|
||||
MarkdownRequest,
|
||||
RawCode,
|
||||
HTMLRequest,
|
||||
ScreenshotRequest,
|
||||
PDFRequest,
|
||||
JSEndpointRequest,
|
||||
)
|
||||
|
||||
from utils import (
|
||||
FilterType, load_config, setup_logging, verify_email_domain
|
||||
)
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from typing import List, Optional, Dict
|
||||
from fastapi import FastAPI, HTTPException, Request, Query, Path, Depends
|
||||
from fastapi.responses import StreamingResponse, RedirectResponse, PlainTextResponse, JSONResponse
|
||||
import asyncio
|
||||
from typing import List
|
||||
from contextlib import asynccontextmanager
|
||||
import pathlib
|
||||
|
||||
from fastapi import (
|
||||
FastAPI, HTTPException, Request, Path, Query, Depends
|
||||
)
|
||||
from rank_bm25 import BM25Okapi
|
||||
from fastapi.responses import (
|
||||
StreamingResponse, RedirectResponse, PlainTextResponse, JSONResponse
|
||||
)
|
||||
from fastapi.middleware.httpsredirect import HTTPSRedirectMiddleware
|
||||
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from job import init_job_router
|
||||
|
||||
from mcp_bridge import attach_mcp, mcp_resource, mcp_template, mcp_tool
|
||||
|
||||
import ast
|
||||
import crawl4ai as _c4
|
||||
from pydantic import BaseModel, Field
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
from prometheus_fastapi_instrumentator import Instrumentator
|
||||
from redis import asyncio as aioredis
|
||||
|
||||
# ── internal imports (after sys.path append) ─────────────────
|
||||
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
||||
from utils import FilterType, load_config, setup_logging, verify_email_domain
|
||||
from api import (
|
||||
handle_markdown_request,
|
||||
handle_llm_qa,
|
||||
handle_stream_crawl_request,
|
||||
handle_crawl_request,
|
||||
stream_results
|
||||
)
|
||||
from auth import create_access_token, get_token_dependency, TokenRequest # Import from auth.py
|
||||
|
||||
__version__ = "0.2.6"
|
||||
|
||||
class CrawlRequest(BaseModel):
|
||||
urls: List[str] = Field(min_length=1, max_length=100)
|
||||
browser_config: Optional[Dict] = Field(default_factory=dict)
|
||||
crawler_config: Optional[Dict] = Field(default_factory=dict)
|
||||
|
||||
# Load configuration and setup
|
||||
# ────────────────── configuration / logging ──────────────────
|
||||
config = load_config()
|
||||
setup_logging(config)
|
||||
|
||||
# Initialize Redis
|
||||
__version__ = "0.5.1-d1"
|
||||
|
||||
# ── global page semaphore (hard cap) ─────────────────────────
|
||||
MAX_PAGES = config["crawler"]["pool"].get("max_pages", 30)
|
||||
GLOBAL_SEM = asyncio.Semaphore(MAX_PAGES)
|
||||
|
||||
# import logging
|
||||
# page_log = logging.getLogger("page_cap")
|
||||
# orig_arun = AsyncWebCrawler.arun
|
||||
# async def capped_arun(self, *a, **kw):
|
||||
# await GLOBAL_SEM.acquire() # ← take slot
|
||||
# try:
|
||||
# in_flight = MAX_PAGES - GLOBAL_SEM._value # used permits
|
||||
# page_log.info("🕸️ pages_in_flight=%s / %s", in_flight, MAX_PAGES)
|
||||
# return await orig_arun(self, *a, **kw)
|
||||
# finally:
|
||||
# GLOBAL_SEM.release() # ← free slot
|
||||
|
||||
orig_arun = AsyncWebCrawler.arun
|
||||
|
||||
|
||||
async def capped_arun(self, *a, **kw):
|
||||
async with GLOBAL_SEM:
|
||||
return await orig_arun(self, *a, **kw)
|
||||
AsyncWebCrawler.arun = capped_arun
|
||||
|
||||
# ───────────────────── FastAPI lifespan ──────────────────────
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(_: FastAPI):
|
||||
await get_crawler(BrowserConfig(
|
||||
extra_args=config["crawler"]["browser"].get("extra_args", []),
|
||||
**config["crawler"]["browser"].get("kwargs", {}),
|
||||
)) # warm‑up
|
||||
app.state.janitor = asyncio.create_task(janitor()) # idle GC
|
||||
yield
|
||||
app.state.janitor.cancel()
|
||||
await close_all()
|
||||
|
||||
# ───────────────────── FastAPI instance ──────────────────────
|
||||
app = FastAPI(
|
||||
title=config["app"]["title"],
|
||||
version=config["app"]["version"],
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
# ── static playground ──────────────────────────────────────
|
||||
STATIC_DIR = pathlib.Path(__file__).parent / "static" / "playground"
|
||||
if not STATIC_DIR.exists():
|
||||
raise RuntimeError(f"Playground assets not found at {STATIC_DIR}")
|
||||
app.mount(
|
||||
"/playground",
|
||||
StaticFiles(directory=STATIC_DIR, html=True),
|
||||
name="play",
|
||||
)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
return RedirectResponse("/playground")
|
||||
|
||||
# ─────────────────── infra / middleware ─────────────────────
|
||||
redis = aioredis.from_url(config["redis"].get("uri", "redis://localhost"))
|
||||
|
||||
# Initialize rate limiter
|
||||
limiter = Limiter(
|
||||
key_func=get_remote_address,
|
||||
default_limits=[config["rate_limiting"]["default_limit"]],
|
||||
storage_uri=config["rate_limiting"]["storage_uri"]
|
||||
storage_uri=config["rate_limiting"]["storage_uri"],
|
||||
)
|
||||
|
||||
app = FastAPI(
|
||||
title=config["app"]["title"],
|
||||
version=config["app"]["version"]
|
||||
)
|
||||
|
||||
# Configure middleware
|
||||
def setup_security_middleware(app, config):
|
||||
sec_config = config.get("security", {})
|
||||
if sec_config.get("enabled", False):
|
||||
if sec_config.get("https_redirect", False):
|
||||
app.add_middleware(HTTPSRedirectMiddleware)
|
||||
if sec_config.get("trusted_hosts", []) != ["*"]:
|
||||
app.add_middleware(TrustedHostMiddleware, allowed_hosts=sec_config["trusted_hosts"])
|
||||
def _setup_security(app_: FastAPI):
|
||||
sec = config["security"]
|
||||
if not sec["enabled"]:
|
||||
return
|
||||
if sec.get("https_redirect"):
|
||||
app_.add_middleware(HTTPSRedirectMiddleware)
|
||||
if sec.get("trusted_hosts", []) != ["*"]:
|
||||
app_.add_middleware(
|
||||
TrustedHostMiddleware, allowed_hosts=sec["trusted_hosts"]
|
||||
)
|
||||
|
||||
setup_security_middleware(app, config)
|
||||
|
||||
# Prometheus instrumentation
|
||||
_setup_security(app)
|
||||
|
||||
if config["observability"]["prometheus"]["enabled"]:
|
||||
Instrumentator().instrument(app).expose(app)
|
||||
|
||||
# Get token dependency based on config
|
||||
token_dependency = get_token_dependency(config)
|
||||
token_dep = get_token_dependency(config)
|
||||
|
||||
|
||||
# Middleware for security headers
|
||||
@app.middleware("http")
|
||||
async def add_security_headers(request: Request, call_next):
|
||||
response = await call_next(request)
|
||||
resp = await call_next(request)
|
||||
if config["security"]["enabled"]:
|
||||
response.headers.update(config["security"]["headers"])
|
||||
return response
|
||||
resp.headers.update(config["security"]["headers"])
|
||||
return resp
|
||||
|
||||
# Token endpoint (always available, but usage depends on config)
|
||||
# ───────────────── safe config‑dump helper ─────────────────
|
||||
ALLOWED_TYPES = {
|
||||
"CrawlerRunConfig": CrawlerRunConfig,
|
||||
"BrowserConfig": BrowserConfig,
|
||||
}
|
||||
|
||||
|
||||
def _safe_eval_config(expr: str) -> dict:
|
||||
"""
|
||||
Accept exactly one top‑level call to CrawlerRunConfig(...) or BrowserConfig(...).
|
||||
Whatever is inside the parentheses is fine *except* further function calls
|
||||
(so no __import__('os') stuff). All public names from crawl4ai are available
|
||||
when we eval.
|
||||
"""
|
||||
tree = ast.parse(expr, mode="eval")
|
||||
|
||||
# must be a single call
|
||||
if not isinstance(tree.body, ast.Call):
|
||||
raise ValueError("Expression must be a single constructor call")
|
||||
|
||||
call = tree.body
|
||||
if not (isinstance(call.func, ast.Name) and call.func.id in {"CrawlerRunConfig", "BrowserConfig"}):
|
||||
raise ValueError(
|
||||
"Only CrawlerRunConfig(...) or BrowserConfig(...) are allowed")
|
||||
|
||||
# forbid nested calls to keep the surface tiny
|
||||
for node in ast.walk(call):
|
||||
if isinstance(node, ast.Call) and node is not call:
|
||||
raise ValueError("Nested function calls are not permitted")
|
||||
|
||||
# expose everything that crawl4ai exports, nothing else
|
||||
safe_env = {name: getattr(_c4, name)
|
||||
for name in dir(_c4) if not name.startswith("_")}
|
||||
obj = eval(compile(tree, "<config>", "eval"),
|
||||
{"__builtins__": {}}, safe_env)
|
||||
return obj.dump()
|
||||
|
||||
|
||||
# ── job router ──────────────────────────────────────────────
|
||||
app.include_router(init_job_router(redis, config, token_dep))
|
||||
|
||||
# ──────────────────────── Endpoints ──────────────────────────
|
||||
@app.post("/token")
|
||||
async def get_token(request_data: TokenRequest):
|
||||
if not verify_email_domain(request_data.email):
|
||||
raise HTTPException(status_code=400, detail="Invalid email domain")
|
||||
token = create_access_token({"sub": request_data.email})
|
||||
return {"email": request_data.email, "access_token": token, "token_type": "bearer"}
|
||||
async def get_token(req: TokenRequest):
|
||||
if not verify_email_domain(req.email):
|
||||
raise HTTPException(400, "Invalid email domain")
|
||||
token = create_access_token({"sub": req.email})
|
||||
return {"email": req.email, "access_token": token, "token_type": "bearer"}
|
||||
|
||||
# Endpoints with conditional auth
|
||||
@app.get("/md/{url:path}")
|
||||
|
||||
@app.post("/config/dump")
|
||||
async def config_dump(raw: RawCode):
|
||||
try:
|
||||
return JSONResponse(_safe_eval_config(raw.code.strip()))
|
||||
except Exception as e:
|
||||
raise HTTPException(400, str(e))
|
||||
|
||||
|
||||
@app.post("/md")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("md")
|
||||
async def get_markdown(
|
||||
request: Request,
|
||||
url: str,
|
||||
f: FilterType = FilterType.FIT,
|
||||
q: Optional[str] = None,
|
||||
c: Optional[str] = "0",
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
body: MarkdownRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
result = await handle_markdown_request(url, f, q, c, config)
|
||||
return PlainTextResponse(result)
|
||||
if not body.url.startswith(("http://", "https://")):
|
||||
raise HTTPException(
|
||||
400, "URL must be absolute and start with http/https")
|
||||
markdown = await handle_markdown_request(
|
||||
body.url, body.f, body.q, body.c, config
|
||||
)
|
||||
return JSONResponse({
|
||||
"url": body.url,
|
||||
"filter": body.f,
|
||||
"query": body.q,
|
||||
"cache": body.c,
|
||||
"markdown": markdown,
|
||||
"success": True
|
||||
})
|
||||
|
||||
@app.get("/llm/{url:path}", description="URL should be without http/https prefix")
|
||||
|
||||
@app.post("/html")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("html")
|
||||
async def generate_html(
|
||||
request: Request,
|
||||
body: HTMLRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Crawls the URL, preprocesses the raw HTML for schema extraction, and returns the processed HTML.
|
||||
Use when you need sanitized HTML structures for building schemas or further processing.
|
||||
"""
|
||||
cfg = CrawlerRunConfig()
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
raw_html = results[0].html
|
||||
from crawl4ai.utils import preprocess_html_for_schema
|
||||
processed_html = preprocess_html_for_schema(raw_html)
|
||||
return JSONResponse({"html": processed_html, "url": body.url, "success": True})
|
||||
|
||||
# Screenshot endpoint
|
||||
|
||||
|
||||
@app.post("/screenshot")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("screenshot")
|
||||
async def generate_screenshot(
|
||||
request: Request,
|
||||
body: ScreenshotRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Capture a full-page PNG screenshot of the specified URL, waiting an optional delay before capture,
|
||||
Use when you need an image snapshot of the rendered page. Its recommened to provide an output path to save the screenshot.
|
||||
Then in result instead of the screenshot you will get a path to the saved file.
|
||||
"""
|
||||
cfg = CrawlerRunConfig(
|
||||
screenshot=True, screenshot_wait_for=body.screenshot_wait_for)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
screenshot_data = results[0].screenshot
|
||||
if body.output_path:
|
||||
abs_path = os.path.abspath(body.output_path)
|
||||
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
|
||||
with open(abs_path, "wb") as f:
|
||||
f.write(base64.b64decode(screenshot_data))
|
||||
return {"success": True, "path": abs_path}
|
||||
return {"success": True, "screenshot": screenshot_data}
|
||||
|
||||
# PDF endpoint
|
||||
|
||||
|
||||
@app.post("/pdf")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("pdf")
|
||||
async def generate_pdf(
|
||||
request: Request,
|
||||
body: PDFRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Generate a PDF document of the specified URL,
|
||||
Use when you need a printable or archivable snapshot of the page. It is recommended to provide an output path to save the PDF.
|
||||
Then in result instead of the PDF you will get a path to the saved file.
|
||||
"""
|
||||
cfg = CrawlerRunConfig(pdf=True)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
pdf_data = results[0].pdf
|
||||
if body.output_path:
|
||||
abs_path = os.path.abspath(body.output_path)
|
||||
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
|
||||
with open(abs_path, "wb") as f:
|
||||
f.write(pdf_data)
|
||||
return {"success": True, "path": abs_path}
|
||||
return {"success": True, "pdf": base64.b64encode(pdf_data).decode()}
|
||||
|
||||
|
||||
@app.post("/execute_js")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("execute_js")
|
||||
async def execute_js(
|
||||
request: Request,
|
||||
body: JSEndpointRequest,
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Execute a sequence of JavaScript snippets on the specified URL.
|
||||
Return the full CrawlResult JSON (first result).
|
||||
Use this when you need to interact with dynamic pages using JS.
|
||||
REMEMBER: Scripts accept a list of separated JS snippets to execute and execute them in order.
|
||||
IMPORTANT: Each script should be an expression that returns a value. It can be an IIFE or an async function. You can think of it as such.
|
||||
Your script will replace '{script}' and execute in the browser context. So provide either an IIFE or a sync/async function that returns a value.
|
||||
Return Format:
|
||||
- The return result is an instance of CrawlResult, so you have access to markdown, links, and other stuff. If this is enough, you don't need to call again for other endpoints.
|
||||
|
||||
```python
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
success: bool
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
mhtml: Optional[str] = None
|
||||
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
error_message: Optional[str] = None
|
||||
session_id: Optional[str] = None
|
||||
response_headers: Optional[dict] = None
|
||||
status_code: Optional[int] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
references_markdown: str
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
```
|
||||
|
||||
"""
|
||||
cfg = CrawlerRunConfig(js_code=body.scripts)
|
||||
async with AsyncWebCrawler(config=BrowserConfig()) as crawler:
|
||||
results = await crawler.arun(url=body.url, config=cfg)
|
||||
# Return JSON-serializable dict of the first CrawlResult
|
||||
data = results[0].model_dump()
|
||||
return JSONResponse(data)
|
||||
|
||||
|
||||
@app.get("/llm/{url:path}")
|
||||
async def llm_endpoint(
|
||||
request: Request,
|
||||
url: str = Path(...),
|
||||
q: Optional[str] = Query(None),
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
q: str = Query(...),
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
if not q:
|
||||
raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
|
||||
if not url.startswith(('http://', 'https://')):
|
||||
url = 'https://' + url
|
||||
try:
|
||||
answer = await handle_llm_qa(url, q, config)
|
||||
return JSONResponse({"answer": answer})
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
raise HTTPException(400, "Query parameter 'q' is required")
|
||||
if not url.startswith(("http://", "https://")):
|
||||
url = "https://" + url
|
||||
answer = await handle_llm_qa(url, q, config)
|
||||
return JSONResponse({"answer": answer})
|
||||
|
||||
|
||||
@app.get("/schema")
|
||||
async def get_schema():
|
||||
from crawl4ai import BrowserConfig, CrawlerRunConfig
|
||||
return {"browser": BrowserConfig().dump(), "crawler": CrawlerRunConfig().dump()}
|
||||
return {"browser": BrowserConfig().dump(),
|
||||
"crawler": CrawlerRunConfig().dump()}
|
||||
|
||||
|
||||
@app.get(config["observability"]["health_check"]["endpoint"])
|
||||
async def health():
|
||||
return {"status": "ok", "timestamp": time.time(), "version": __version__}
|
||||
|
||||
|
||||
@app.get(config["observability"]["prometheus"]["endpoint"])
|
||||
async def metrics():
|
||||
return RedirectResponse(url=config["observability"]["prometheus"]["endpoint"])
|
||||
return RedirectResponse(config["observability"]["prometheus"]["endpoint"])
|
||||
|
||||
|
||||
@app.post("/crawl")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("crawl")
|
||||
async def crawl(
|
||||
request: Request,
|
||||
crawl_request: CrawlRequest,
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
"""
|
||||
Crawl a list of URLs and return the results as JSON.
|
||||
"""
|
||||
if not crawl_request.urls:
|
||||
raise HTTPException(status_code=400, detail="At least one URL required")
|
||||
|
||||
results = await handle_crawl_request(
|
||||
raise HTTPException(400, "At least one URL required")
|
||||
res = await handle_crawl_request(
|
||||
urls=crawl_request.urls,
|
||||
browser_config=crawl_request.browser_config,
|
||||
crawler_config=crawl_request.crawler_config,
|
||||
config=config
|
||||
config=config,
|
||||
)
|
||||
|
||||
return JSONResponse(results)
|
||||
return JSONResponse(res)
|
||||
|
||||
|
||||
@app.post("/crawl/stream")
|
||||
@@ -152,24 +451,161 @@ async def crawl(
|
||||
async def crawl_stream(
|
||||
request: Request,
|
||||
crawl_request: CrawlRequest,
|
||||
token_data: Optional[Dict] = Depends(token_dependency)
|
||||
_td: Dict = Depends(token_dep),
|
||||
):
|
||||
if not crawl_request.urls:
|
||||
raise HTTPException(status_code=400, detail="At least one URL required")
|
||||
|
||||
crawler, results_gen = await handle_stream_crawl_request(
|
||||
raise HTTPException(400, "At least one URL required")
|
||||
crawler, gen = await handle_stream_crawl_request(
|
||||
urls=crawl_request.urls,
|
||||
browser_config=crawl_request.browser_config,
|
||||
crawler_config=crawl_request.crawler_config,
|
||||
config=config
|
||||
config=config,
|
||||
)
|
||||
|
||||
return StreamingResponse(
|
||||
stream_results(crawler, results_gen),
|
||||
media_type='application/x-ndjson',
|
||||
headers={'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'X-Stream-Status': 'active'}
|
||||
stream_results(crawler, gen),
|
||||
media_type="application/x-ndjson",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Stream-Status": "active",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def chunk_code_functions(code_md: str) -> List[str]:
|
||||
"""Extract each function/class from markdown code blocks per file."""
|
||||
pattern = re.compile(
|
||||
# match "## File: <path>" then a ```py fence, then capture until the closing ```
|
||||
r'##\s*File:\s*(?P<path>.+?)\s*?\r?\n' # file header
|
||||
r'```py\s*?\r?\n' # opening fence
|
||||
r'(?P<code>.*?)(?=\r?\n```)', # code block
|
||||
re.DOTALL
|
||||
)
|
||||
chunks: List[str] = []
|
||||
for m in pattern.finditer(code_md):
|
||||
file_path = m.group("path").strip()
|
||||
code_blk = m.group("code")
|
||||
tree = ast.parse(code_blk)
|
||||
lines = code_blk.splitlines()
|
||||
for node in tree.body:
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
start = node.lineno - 1
|
||||
end = getattr(node, "end_lineno", start + 1)
|
||||
snippet = "\n".join(lines[start:end])
|
||||
chunks.append(f"# File: {file_path}\n{snippet}")
|
||||
return chunks
|
||||
|
||||
|
||||
def chunk_doc_sections(doc: str) -> List[str]:
|
||||
lines = doc.splitlines(keepends=True)
|
||||
sections = []
|
||||
current: List[str] = []
|
||||
for line in lines:
|
||||
if re.match(r"^#{1,6}\s", line):
|
||||
if current:
|
||||
sections.append("".join(current))
|
||||
current = [line]
|
||||
else:
|
||||
current.append(line)
|
||||
if current:
|
||||
sections.append("".join(current))
|
||||
return sections
|
||||
|
||||
|
||||
@app.get("/ask")
|
||||
@limiter.limit(config["rate_limiting"]["default_limit"])
|
||||
@mcp_tool("ask")
|
||||
async def get_context(
|
||||
request: Request,
|
||||
_td: Dict = Depends(token_dep),
|
||||
context_type: str = Query("all", regex="^(code|doc|all)$"),
|
||||
query: Optional[str] = Query(
|
||||
None, description="search query to filter chunks"),
|
||||
score_ratio: float = Query(
|
||||
0.5, ge=0.0, le=1.0, description="min score as fraction of max_score"),
|
||||
max_results: int = Query(
|
||||
20, ge=1, description="absolute cap on returned chunks"),
|
||||
):
|
||||
"""
|
||||
This end point is design for any questions about Crawl4ai library. It returns a plain text markdown with extensive information about Crawl4ai.
|
||||
You can use this as a context for any AI assistant. Use this endpoint for AI assistants to retrieve library context for decision making or code generation tasks.
|
||||
Alway is BEST practice you provide a query to filter the context. Otherwise the lenght of the response will be very long.
|
||||
|
||||
Parameters:
|
||||
- context_type: Specify "code" for code context, "doc" for documentation context, or "all" for both.
|
||||
- query: RECOMMENDED search query to filter paragraphs using BM25. You can leave this empty to get all the context.
|
||||
- score_ratio: Minimum score as a fraction of the maximum score for filtering results.
|
||||
- max_results: Maximum number of results to return. Default is 20.
|
||||
|
||||
Returns:
|
||||
- JSON response with the requested context.
|
||||
- If "code" is specified, returns the code context.
|
||||
- If "doc" is specified, returns the documentation context.
|
||||
- If "all" is specified, returns both code and documentation contexts.
|
||||
"""
|
||||
# load contexts
|
||||
base = os.path.dirname(__file__)
|
||||
code_path = os.path.join(base, "c4ai-code-context.md")
|
||||
doc_path = os.path.join(base, "c4ai-doc-context.md")
|
||||
if not os.path.exists(code_path) or not os.path.exists(doc_path):
|
||||
raise HTTPException(404, "Context files not found")
|
||||
|
||||
with open(code_path, "r") as f:
|
||||
code_content = f.read()
|
||||
with open(doc_path, "r") as f:
|
||||
doc_content = f.read()
|
||||
|
||||
# if no query, just return raw contexts
|
||||
if not query:
|
||||
if context_type == "code":
|
||||
return JSONResponse({"code_context": code_content})
|
||||
if context_type == "doc":
|
||||
return JSONResponse({"doc_context": doc_content})
|
||||
return JSONResponse({
|
||||
"code_context": code_content,
|
||||
"doc_context": doc_content,
|
||||
})
|
||||
|
||||
tokens = query.split()
|
||||
results: Dict[str, List[Dict[str, float]]] = {}
|
||||
|
||||
# code BM25 over functions/classes
|
||||
if context_type in ("code", "all"):
|
||||
code_chunks = chunk_code_functions(code_content)
|
||||
bm25 = BM25Okapi([c.split() for c in code_chunks])
|
||||
scores = bm25.get_scores(tokens)
|
||||
max_sc = float(scores.max()) if scores.size > 0 else 0.0
|
||||
cutoff = max_sc * score_ratio
|
||||
picked = [(c, s) for c, s in zip(code_chunks, scores) if s >= cutoff]
|
||||
picked = sorted(picked, key=lambda x: x[1], reverse=True)[:max_results]
|
||||
results["code_results"] = [{"text": c, "score": s} for c, s in picked]
|
||||
|
||||
# doc BM25 over markdown sections
|
||||
if context_type in ("doc", "all"):
|
||||
sections = chunk_doc_sections(doc_content)
|
||||
bm25d = BM25Okapi([sec.split() for sec in sections])
|
||||
scores_d = bm25d.get_scores(tokens)
|
||||
max_sd = float(scores_d.max()) if scores_d.size > 0 else 0.0
|
||||
cutoff_d = max_sd * score_ratio
|
||||
idxs = [i for i, s in enumerate(scores_d) if s >= cutoff_d]
|
||||
neighbors = set(i for idx in idxs for i in (idx-1, idx, idx+1))
|
||||
valid = [i for i in sorted(neighbors) if 0 <= i < len(sections)]
|
||||
valid = valid[:max_results]
|
||||
results["doc_results"] = [
|
||||
{"text": sections[i], "score": scores_d[i]} for i in valid
|
||||
]
|
||||
|
||||
return JSONResponse(results)
|
||||
|
||||
|
||||
# attach MCP layer (adds /mcp/ws, /mcp/sse, /mcp/schema)
|
||||
print(f"MCP server running on {config['app']['host']}:{config['app']['port']}")
|
||||
attach_mcp(
|
||||
app,
|
||||
base_url=f"http://{config['app']['host']}:{config['app']['port']}"
|
||||
)
|
||||
|
||||
# ────────────────────────── cli ──────────────────────────────
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(
|
||||
@@ -177,5 +613,6 @@ if __name__ == "__main__":
|
||||
host=config["app"]["host"],
|
||||
port=config["app"]["port"],
|
||||
reload=config["app"]["reload"],
|
||||
timeout_keep_alive=config["app"]["timeout_keep_alive"]
|
||||
)
|
||||
timeout_keep_alive=config["app"]["timeout_keep_alive"],
|
||||
)
|
||||
# ─────────────────────────────────────────────────────────────
|
||||
|
||||
965
deploy/docker/static/playground/index.html
Normal file
965
deploy/docker/static/playground/index.html
Normal file
@@ -0,0 +1,965 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Crawl4AI Playground</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<script>
|
||||
tailwind.config = {
|
||||
theme: {
|
||||
extend: {
|
||||
colors: {
|
||||
primary: '#4EFFFF',
|
||||
primarydim: '#09b5a5',
|
||||
accent: '#F380F5',
|
||||
dark: '#070708',
|
||||
light: '#E8E9ED',
|
||||
secondary: '#D5CEBF',
|
||||
codebg: '#1E1E1E',
|
||||
surface: '#202020',
|
||||
border: '#3F3F44',
|
||||
},
|
||||
fontFamily: {
|
||||
mono: ['Fira Code', 'monospace'],
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Fira+Code:wght@400;500&display=swap" rel="stylesheet">
|
||||
<!-- Highlight.js -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/github-dark.min.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/highlight.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.11/clipboard.min.js"></script>
|
||||
<!-- CodeMirror (python mode) -->
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/codemirror.min.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/codemirror.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/mode/python/python.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/addon/edit/matchbrackets.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/addon/selection/active-line.min.js"></script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.65.16/theme/darcula.min.css">
|
||||
<!-- <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/python.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/bash.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/languages/json.min.js"></script> -->
|
||||
<style>
|
||||
/* Custom CodeMirror styling to match theme */
|
||||
.CodeMirror {
|
||||
background-color: #1E1E1E !important;
|
||||
color: #E8E9ED !important;
|
||||
border-radius: 4px;
|
||||
font-family: 'Fira Code', monospace;
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
||||
.CodeMirror-gutters {
|
||||
background-color: #1E1E1E !important;
|
||||
border-right: 1px solid #3F3F44 !important;
|
||||
}
|
||||
|
||||
.CodeMirror-linenumber {
|
||||
color: #3F3F44 !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-keyword {
|
||||
color: #4EFFFF !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-string {
|
||||
color: #F380F5 !important;
|
||||
}
|
||||
|
||||
.cm-s-darcula .cm-number {
|
||||
color: #D5CEBF !important;
|
||||
}
|
||||
|
||||
/* Add to your <style> section or Tailwind config */
|
||||
.hljs {
|
||||
background: #1E1E1E !important;
|
||||
border-radius: 4px;
|
||||
padding: 1rem !important;
|
||||
}
|
||||
|
||||
pre code.hljs {
|
||||
display: block;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
/* Language-specific colors */
|
||||
.hljs-attr {
|
||||
color: #4EFFFF;
|
||||
}
|
||||
|
||||
/* JSON keys */
|
||||
.hljs-string {
|
||||
color: #F380F5;
|
||||
}
|
||||
|
||||
/* Strings */
|
||||
.hljs-number {
|
||||
color: #D5CEBF;
|
||||
}
|
||||
|
||||
/* Numbers */
|
||||
.hljs-keyword {
|
||||
color: #4EFFFF;
|
||||
}
|
||||
|
||||
pre code {
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.copy-btn {
|
||||
transition: all 0.2s ease;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.copy-btn:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.tab-content:hover .copy-btn {
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.tab-content:hover .copy-btn:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* copid text highlighted */
|
||||
.highlighted {
|
||||
background-color: rgba(78, 255, 255, 0.2) !important;
|
||||
transition: background-color 0.5s ease;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body class="bg-dark text-light font-mono min-h-screen flex flex-col" style="font-feature-settings: 'calt' 0;">
|
||||
<!-- Header -->
|
||||
<header class="border-b border-border px-4 py-2 flex items-center">
|
||||
<h1 class="text-lg font-medium flex items-center space-x-4">
|
||||
<span>🚀🤖 <span class="text-primary">Crawl4AI</span> Playground</span>
|
||||
|
||||
<!-- GitHub badges -->
|
||||
<a href="https://github.com/unclecode/crawl4ai" target="_blank" class="flex space-x-1">
|
||||
<img src="https://img.shields.io/github/stars/unclecode/crawl4ai?style=social"
|
||||
alt="GitHub stars" class="h-5">
|
||||
<img src="https://img.shields.io/github/forks/unclecode/crawl4ai?style=social"
|
||||
alt="GitHub forks" class="h-5">
|
||||
</a>
|
||||
|
||||
<!-- Docs -->
|
||||
<a href="https://docs.crawl4ai.com" target="_blank"
|
||||
class="text-xs text-secondary hover:text-primary underline flex items-center">
|
||||
Docs
|
||||
</a>
|
||||
|
||||
<!-- X (Twitter) follow -->
|
||||
<a href="https://x.com/unclecode" target="_blank"
|
||||
class="hover:text-primary flex items-center" title="Follow @unclecode on X">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"
|
||||
class="w-4 h-4 fill-current mr-1">
|
||||
<path d="M22.46 6c-.77.35-1.6.58-2.46.69a4.27 4.27 0 001.88-2.35 8.53 8.53 0 01-2.71 1.04 4.24 4.24 0 00-7.23 3.87A12.05 12.05 0 013 4.62a4.24 4.24 0 001.31 5.65 4.2 4.2 0 01-1.92-.53v.05a4.24 4.24 0 003.4 4.16 4.31 4.31 0 01-1.91.07 4.25 4.25 0 003.96 2.95A8.5 8.5 0 012 19.55a12.04 12.04 0 006.53 1.92c7.84 0 12.13-6.49 12.13-12.13 0-.18-.01-.36-.02-.54A8.63 8.63 0 0024 5.1a8.45 8.45 0 01-2.54.7z"/>
|
||||
</svg>
|
||||
<span class="text-xs">@unclecode</span>
|
||||
</a>
|
||||
</h1>
|
||||
|
||||
<div class="ml-auto flex space-x-2">
|
||||
<button id="play-tab"
|
||||
class="px-3 py-1 rounded-t bg-surface border border-b-0 border-border text-primary">Playground</button>
|
||||
<button id="stress-tab" class="px-3 py-1 rounded-t border border-border hover:bg-surface">Stress
|
||||
Test</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Main Playground -->
|
||||
<main id="playground" class="flex-1 flex flex-col p-4 space-y-4 max-w-5xl w-full mx-auto">
|
||||
<!-- Request Builder -->
|
||||
<section class="bg-surface rounded-lg border border-border overflow-hidden">
|
||||
<div class="px-4 py-2 border-b border-border flex items-center">
|
||||
<h2 class="font-medium">Request Builder</h2>
|
||||
<select id="endpoint" class="ml-auto bg-dark border border-border rounded px-2 py-1 text-sm">
|
||||
<option value="crawl">/crawl (batch)</option>
|
||||
<option value="crawl_stream">/crawl/stream</option>
|
||||
<option value="md">/md</option>
|
||||
<option value="llm">/llm</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="p-4">
|
||||
<label class="block mb-2 text-sm">URL(s) - one per line</label>
|
||||
<textarea id="urls" class="w-full bg-dark border border-border rounded p-2 h-32 text-sm mb-4"
|
||||
spellcheck="false">https://example.com</textarea>
|
||||
|
||||
<!-- Specific options for /md endpoint -->
|
||||
<details id="md-options" class="mb-4 hidden">
|
||||
<summary class="text-sm text-secondary cursor-pointer">/md Options</summary>
|
||||
<div class="mt-2 space-y-3 p-2 border border-border rounded">
|
||||
<div>
|
||||
<label for="md-filter" class="block text-xs text-secondary mb-1">Filter Type</label>
|
||||
<select id="md-filter" class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
<option value="fit">fit - Adaptive content filtering</option>
|
||||
<option value="raw">raw - No filtering</option>
|
||||
<option value="bm25">bm25 - BM25 keyword relevance</option>
|
||||
<option value="llm">llm - LLM-based filtering</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label for="md-query" class="block text-xs text-secondary mb-1">Query (for BM25/LLM filters)</label>
|
||||
<input id="md-query" type="text" placeholder="Enter search terms or instructions"
|
||||
class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
</div>
|
||||
<div>
|
||||
<label for="md-cache" class="block text-xs text-secondary mb-1">Cache Mode</label>
|
||||
<select id="md-cache" class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
<option value="0">Write-Only (0)</option>
|
||||
<option value="1">Enabled (1)</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
|
||||
<!-- Specific options for /llm endpoint -->
|
||||
<details id="llm-options" class="mb-4 hidden">
|
||||
<summary class="text-sm text-secondary cursor-pointer">/llm Options</summary>
|
||||
<div class="mt-2 space-y-3 p-2 border border-border rounded">
|
||||
<div>
|
||||
<label for="llm-question" class="block text-xs text-secondary mb-1">Question</label>
|
||||
<input id="llm-question" type="text" value="What is this page about?"
|
||||
class="bg-dark border border-border rounded px-2 py-1 text-sm w-full">
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
|
||||
<!-- Advanced config for /crawl endpoints -->
|
||||
<details id="adv-config" class="mb-4">
|
||||
<summary class="text-sm text-secondary cursor-pointer">Advanced Config <span
|
||||
class="text-xs text-primary">(Python → auto‑JSON)</span></summary>
|
||||
|
||||
<!-- Toolbar -->
|
||||
<div class="flex items-center justify-end space-x-3 mt-2">
|
||||
<label for="cfg-type" class="text-xs text-secondary">Type:</label>
|
||||
<select id="cfg-type"
|
||||
class="bg-dark border border-border rounded px-1 py-0.5 text-xs">
|
||||
<option value="CrawlerRunConfig">CrawlerRunConfig</option>
|
||||
<option value="BrowserConfig">BrowserConfig</option>
|
||||
</select>
|
||||
|
||||
<!-- help link -->
|
||||
<a href="https://docs.crawl4ai.com/api/parameters/"
|
||||
target="_blank"
|
||||
class="text-xs text-primary hover:underline flex items-center space-x-1"
|
||||
title="Open parameter reference in new tab">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"
|
||||
class="w-4 h-4 fill-current">
|
||||
<path d="M13 3h8v8h-2V6.41l-9.29 9.3-1.42-1.42 9.3-9.29H13V3z"/>
|
||||
<path d="M5 5h4V3H3v6h2V5zm0 14v-4H3v6h6v-2H5z"/>
|
||||
</svg>
|
||||
<span>Docs</span>
|
||||
</a>
|
||||
|
||||
<span id="cfg-status" class="text-xs text-secondary ml-2"></span>
|
||||
</div>
|
||||
|
||||
<!-- CodeMirror host -->
|
||||
<div id="adv-editor" class="mt-2 border border-border rounded overflow-hidden h-40"></div>
|
||||
</details>
|
||||
|
||||
<div class="flex space-x-2">
|
||||
<button id="run-btn" class="bg-primary text-dark px-4 py-2 rounded hover:bg-primarydim font-medium">
|
||||
Run (⌘/Ctrl+Enter)
|
||||
</button>
|
||||
<button id="export-btn" class="border border-border px-4 py-2 rounded hover:bg-surface hidden">
|
||||
Export Python Code
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Execution Status -->
|
||||
<section id="execution-status" class="hidden bg-surface rounded-lg border border-border p-3 text-sm">
|
||||
<div class="flex space-x-4">
|
||||
<div id="status-badge" class="flex items-center">
|
||||
<span class="w-3 h-3 rounded-full mr-2"></span>
|
||||
<span>Ready</span>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-secondary">Time:</span>
|
||||
<span id="exec-time" class="text-light">-</span>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-secondary">Memory:</span>
|
||||
<span id="exec-mem" class="text-light">-</span>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Response Viewer -->
|
||||
<!-- Update the Response Viewer section -->
|
||||
<section class="bg-surface rounded-lg border border-border overflow-hidden flex-1 flex flex-col">
|
||||
<div class="border-b border-border flex">
|
||||
<button data-tab="response" class="tab-btn active px-4 py-2 border-r border-border">Response</button>
|
||||
<button data-tab="python" class="tab-btn px-4 py-2 border-r border-border">Python</button>
|
||||
<button data-tab="curl" class="tab-btn px-4 py-2">cURL</button>
|
||||
</div>
|
||||
<div class="flex-1 overflow-auto relative">
|
||||
<!-- Response Tab -->
|
||||
<div class="tab-content active h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#response-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="response-content" class="p-4 text-sm h-full"><code class="json hljs">{}</code></pre>
|
||||
</div>
|
||||
|
||||
<!-- Python Tab -->
|
||||
<div class="tab-content hidden h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#python-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="python-content" class="p-4 text-sm h-full"><code class="python hljs"></code></pre>
|
||||
</div>
|
||||
|
||||
<!-- cURL Tab -->
|
||||
<div class="tab-content hidden h-full">
|
||||
<div class="absolute right-2 top-2">
|
||||
<button class="copy-btn bg-surface border border-border rounded px-2 py-1 text-xs hover:bg-dark"
|
||||
data-target="#curl-content code">
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre id="curl-content" class="p-4 text-sm h-full"><code class="bash hljs"></code></pre>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<!-- Stress Test Modal -->
|
||||
<div id="stress-modal"
|
||||
class="hidden fixed inset-0 bg-black bg-opacity-70 z-50 flex items-center justify-center p-4">
|
||||
<div class="bg-surface rounded-lg border border-accent w-full max-w-3xl max-h-[90vh] flex flex-col">
|
||||
<div class="px-4 py-2 border-b border-border flex items-center">
|
||||
<h2 class="font-medium text-accent">🔥 Stress Test</h2>
|
||||
<button id="close-stress" class="ml-auto text-secondary hover:text-light">×</button>
|
||||
</div>
|
||||
|
||||
<div class="p-4 space-y-4 flex-1 overflow-auto">
|
||||
<div class="grid grid-cols-3 gap-4">
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Total URLs</label>
|
||||
<input id="st-total" type="number" value="20"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Chunk Size</label>
|
||||
<input id="st-chunk" type="number" value="5"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
<div>
|
||||
<label class="block text-sm mb-1">Concurrency</label>
|
||||
<input id="st-conc" type="number" value="2"
|
||||
class="w-full bg-dark border border-border rounded px-3 py-1">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex items-center">
|
||||
<input id="st-stream" type="checkbox" class="mr-2">
|
||||
<label for="st-stream" class="text-sm">Use /crawl/stream</label>
|
||||
<button id="st-run"
|
||||
class="ml-auto bg-accent text-dark px-4 py-2 rounded hover:bg-opacity-90 font-medium">
|
||||
Run Stress Test
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div class="mt-4">
|
||||
<div class="bg-dark rounded border border-border p-3 h-64 overflow-auto text-sm whitespace-break-spaces"
|
||||
id="stress-log"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="px-4 py-2 border-t border-border text-sm text-secondary">
|
||||
<div class="flex justify-between">
|
||||
<span>Completed: <span id="stress-completed">0</span>/<span id="stress-total">0</span></span>
|
||||
<span>Avg. Time: <span id="stress-avg-time">0</span>ms</span>
|
||||
<span>Peak Memory: <span id="stress-peak-mem">0</span>MB</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Tab switching
|
||||
document.querySelectorAll('.tab-btn').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
|
||||
document.querySelectorAll('.tab-content').forEach(c => c.classList.add('hidden'));
|
||||
|
||||
btn.classList.add('active');
|
||||
const tabName = btn.dataset.tab;
|
||||
document.querySelector(`#${tabName}-content`).parentElement.classList.remove('hidden');
|
||||
|
||||
// Re-highlight content when switching tabs
|
||||
const activeCode = document.querySelector(`#${tabName}-content code`);
|
||||
if (activeCode) {
|
||||
forceHighlightElement(activeCode);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// View switching
|
||||
document.getElementById('play-tab').addEventListener('click', () => {
|
||||
document.getElementById('playground').classList.remove('hidden');
|
||||
document.getElementById('stress-modal').classList.add('hidden');
|
||||
document.getElementById('play-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('stress-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
document.getElementById('stress-tab').addEventListener('click', () => {
|
||||
document.getElementById('stress-modal').classList.remove('hidden');
|
||||
document.getElementById('stress-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('play-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
document.getElementById('close-stress').addEventListener('click', () => {
|
||||
document.getElementById('stress-modal').classList.add('hidden');
|
||||
document.getElementById('play-tab').classList.add('bg-surface', 'border-b-0');
|
||||
document.getElementById('stress-tab').classList.remove('bg-surface', 'border-b-0');
|
||||
});
|
||||
|
||||
// Initialize clipboard and highlight.js
|
||||
new ClipboardJS('#export-btn');
|
||||
hljs.highlightAll();
|
||||
|
||||
// Keyboard shortcut
|
||||
window.addEventListener('keydown', e => {
|
||||
if ((e.ctrlKey || e.metaKey) && e.key === 'Enter') {
|
||||
document.getElementById('run-btn').click();
|
||||
}
|
||||
});
|
||||
|
||||
// ================ ADVANCED CONFIG EDITOR ================
|
||||
const cm = CodeMirror(document.getElementById('adv-editor'), {
|
||||
value: `CrawlerRunConfig(
|
||||
stream=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)`,
|
||||
mode: 'python',
|
||||
lineNumbers: true,
|
||||
theme: 'darcula',
|
||||
tabSize: 4,
|
||||
styleActiveLine: true,
|
||||
matchBrackets: true,
|
||||
gutters: ["CodeMirror-linenumbers"],
|
||||
lineWrapping: true,
|
||||
});
|
||||
|
||||
const TEMPLATES = {
|
||||
CrawlerRunConfig: `CrawlerRunConfig(
|
||||
stream=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)`,
|
||||
BrowserConfig: `BrowserConfig(
|
||||
headless=True,
|
||||
extra_args=[
|
||||
"--no-sandbox",
|
||||
"--disable-gpu",
|
||||
],
|
||||
)`,
|
||||
};
|
||||
|
||||
document.getElementById('cfg-type').addEventListener('change', (e) => {
|
||||
cm.setValue(TEMPLATES[e.target.value]);
|
||||
document.getElementById('cfg-status').textContent = '';
|
||||
});
|
||||
|
||||
// Handle endpoint selection change to show appropriate options
|
||||
document.getElementById('endpoint').addEventListener('change', function(e) {
|
||||
const endpoint = e.target.value;
|
||||
const mdOptions = document.getElementById('md-options');
|
||||
const llmOptions = document.getElementById('llm-options');
|
||||
const advConfig = document.getElementById('adv-config');
|
||||
|
||||
// Hide all option sections first
|
||||
mdOptions.classList.add('hidden');
|
||||
llmOptions.classList.add('hidden');
|
||||
advConfig.classList.add('hidden');
|
||||
|
||||
// Show the appropriate section based on endpoint
|
||||
if (endpoint === 'md') {
|
||||
mdOptions.classList.remove('hidden');
|
||||
// Auto-open the /md options
|
||||
mdOptions.setAttribute('open', '');
|
||||
} else if (endpoint === 'llm') {
|
||||
llmOptions.classList.remove('hidden');
|
||||
// Auto-open the /llm options
|
||||
llmOptions.setAttribute('open', '');
|
||||
} else {
|
||||
// For /crawl endpoints, show the advanced config
|
||||
advConfig.classList.remove('hidden');
|
||||
}
|
||||
});
|
||||
|
||||
async function pyConfigToJson() {
|
||||
const code = cm.getValue().trim();
|
||||
if (!code) return {};
|
||||
|
||||
const res = await fetch('/config/dump', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ code }),
|
||||
});
|
||||
|
||||
const statusEl = document.getElementById('cfg-status');
|
||||
if (!res.ok) {
|
||||
const msg = await res.text();
|
||||
statusEl.textContent = '✖ config error';
|
||||
statusEl.className = 'text-xs text-red-400';
|
||||
throw new Error(msg || 'Invalid config');
|
||||
}
|
||||
|
||||
statusEl.textContent = '✓ parsed';
|
||||
statusEl.className = 'text-xs text-green-400';
|
||||
|
||||
return await res.json();
|
||||
}
|
||||
|
||||
// ================ SERVER COMMUNICATION ================
|
||||
|
||||
// Update status UI
|
||||
function updateStatus(status, time, memory, peakMemory) {
|
||||
const statusEl = document.getElementById('execution-status');
|
||||
const badgeEl = document.querySelector('#status-badge span:first-child');
|
||||
const textEl = document.querySelector('#status-badge span:last-child');
|
||||
|
||||
statusEl.classList.remove('hidden');
|
||||
badgeEl.className = 'w-3 h-3 rounded-full mr-2';
|
||||
|
||||
if (status === 'success') {
|
||||
badgeEl.classList.add('bg-green-500');
|
||||
textEl.textContent = 'Success';
|
||||
} else if (status === 'error') {
|
||||
badgeEl.classList.add('bg-red-500');
|
||||
textEl.textContent = 'Error';
|
||||
} else {
|
||||
badgeEl.classList.add('bg-yellow-500');
|
||||
textEl.textContent = 'Processing...';
|
||||
}
|
||||
|
||||
if (time) {
|
||||
document.getElementById('exec-time').textContent = `${time}ms`;
|
||||
}
|
||||
|
||||
if (memory !== undefined && peakMemory !== undefined) {
|
||||
document.getElementById('exec-mem').textContent = `Δ${memory >= 0 ? '+' : ''}${memory}MB (Peak: ${peakMemory}MB)`;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate code snippets
|
||||
function generateSnippets(api, payload, method = 'POST') {
|
||||
// Python snippet
|
||||
const pyCodeEl = document.querySelector('#python-content code');
|
||||
let pySnippet;
|
||||
|
||||
if (method === 'GET') {
|
||||
// GET request (for /llm endpoint)
|
||||
pySnippet = `import httpx\n\nasync def crawl():\n async with httpx.AsyncClient() as client:\n response = await client.get(\n "${window.location.origin}${api}"\n )\n return response.json()`;
|
||||
} else {
|
||||
// POST request (for /crawl and /md endpoints)
|
||||
pySnippet = `import httpx\n\nasync def crawl():\n async with httpx.AsyncClient() as client:\n response = await client.post(\n "${window.location.origin}${api}",\n json=${JSON.stringify(payload, null, 4).replace(/\n/g, '\n ')}\n )\n return response.json()`;
|
||||
}
|
||||
|
||||
pyCodeEl.textContent = pySnippet;
|
||||
pyCodeEl.className = 'python hljs'; // Reset classes
|
||||
forceHighlightElement(pyCodeEl);
|
||||
|
||||
// cURL snippet
|
||||
const curlCodeEl = document.querySelector('#curl-content code');
|
||||
let curlSnippet;
|
||||
|
||||
if (method === 'GET') {
|
||||
// GET request (for /llm endpoint)
|
||||
curlSnippet = `curl -X GET "${window.location.origin}${api}"`;
|
||||
} else {
|
||||
// POST request (for /crawl and /md endpoints)
|
||||
curlSnippet = `curl -X POST ${window.location.origin}${api} \\\n -H "Content-Type: application/json" \\\n -d '${JSON.stringify(payload)}'`;
|
||||
}
|
||||
|
||||
curlCodeEl.textContent = curlSnippet;
|
||||
curlCodeEl.className = 'bash hljs'; // Reset classes
|
||||
forceHighlightElement(curlCodeEl);
|
||||
}
|
||||
|
||||
// Main run function
|
||||
async function runCrawl() {
|
||||
const endpoint = document.getElementById('endpoint').value;
|
||||
const urls = document.getElementById('urls').value.trim().split(/\n/).filter(u => u);
|
||||
// 1) grab python from CodeMirror, validate via /config/dump
|
||||
let advConfig = {};
|
||||
try {
|
||||
const cfgJson = await pyConfigToJson(); // may throw
|
||||
if (Object.keys(cfgJson).length) {
|
||||
const cfgType = document.getElementById('cfg-type').value;
|
||||
advConfig = cfgType === 'CrawlerRunConfig'
|
||||
? { crawler_config: cfgJson }
|
||||
: { browser_config: cfgJson };
|
||||
}
|
||||
} catch (err) {
|
||||
updateStatus('error');
|
||||
document.querySelector('#response-content code').textContent =
|
||||
JSON.stringify({ error: err.message }, null, 2);
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
return; // stop run
|
||||
}
|
||||
|
||||
const endpointMap = {
|
||||
crawl: '/crawl',
|
||||
// crawl_stream: '/crawl/stream',
|
||||
md: '/md',
|
||||
llm: '/llm'
|
||||
};
|
||||
|
||||
const api = endpointMap[endpoint];
|
||||
let payload;
|
||||
|
||||
// Create appropriate payload based on endpoint type
|
||||
if (endpoint === 'md') {
|
||||
// Get values from the /md specific inputs
|
||||
const filterType = document.getElementById('md-filter').value;
|
||||
const query = document.getElementById('md-query').value.trim();
|
||||
const cache = document.getElementById('md-cache').value;
|
||||
|
||||
// MD endpoint expects: { url, f, q, c }
|
||||
payload = {
|
||||
url: urls[0], // Take first URL
|
||||
f: filterType, // Lowercase filter type as required by server
|
||||
q: query || null, // Use the query if provided, otherwise null
|
||||
c: cache
|
||||
};
|
||||
} else if (endpoint === 'llm') {
|
||||
// LLM endpoint has a different URL pattern and uses query params
|
||||
// This will be handled directly in the fetch below
|
||||
payload = null;
|
||||
} else {
|
||||
// Default payload for /crawl and /crawl/stream
|
||||
payload = {
|
||||
urls,
|
||||
...advConfig
|
||||
};
|
||||
}
|
||||
|
||||
updateStatus('processing');
|
||||
|
||||
try {
|
||||
const startTime = performance.now();
|
||||
let response, responseData;
|
||||
|
||||
if (endpoint === 'llm') {
|
||||
// Special handling for LLM endpoint which uses URL pattern: /llm/{encoded_url}?q={query}
|
||||
const url = urls[0];
|
||||
const encodedUrl = encodeURIComponent(url);
|
||||
// Get the question from the LLM-specific input
|
||||
const question = document.getElementById('llm-question').value.trim() || "What is this page about?";
|
||||
|
||||
response = await fetch(`${api}/${encodedUrl}?q=${encodeURIComponent(question)}`, {
|
||||
method: 'GET',
|
||||
headers: { 'Accept': 'application/json' }
|
||||
});
|
||||
responseData = await response.json();
|
||||
const time = Math.round(performance.now() - startTime);
|
||||
if (!response.ok) {
|
||||
updateStatus('error', time);
|
||||
throw new Error(responseData.error || 'Request failed');
|
||||
}
|
||||
updateStatus('success', time);
|
||||
document.querySelector('#response-content code').textContent = JSON.stringify(responseData, null, 2);
|
||||
document.querySelector('#response-content code').className = 'json hljs';
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
} else if (endpoint === 'crawl_stream') {
|
||||
// Stream processing
|
||||
response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const reader = response.body.getReader();
|
||||
let text = '';
|
||||
let maxMemory = 0;
|
||||
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = new TextDecoder().decode(value);
|
||||
text += chunk;
|
||||
|
||||
// Process each line for memory updates
|
||||
chunk.trim().split('\n').forEach(line => {
|
||||
if (!line) return;
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
if (obj.server_memory_mb) {
|
||||
maxMemory = Math.max(maxMemory, obj.server_memory_mb);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Error parsing stream line:', e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
responseData = { stream: text };
|
||||
const time = Math.round(performance.now() - startTime);
|
||||
updateStatus('success', time, null, maxMemory);
|
||||
document.querySelector('#response-content code').textContent = text;
|
||||
document.querySelector('#response-content code').className = 'json hljs'; // Reset classes
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
} else {
|
||||
// Regular request (handles /crawl and /md)
|
||||
response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
responseData = await response.json();
|
||||
const time = Math.round(performance.now() - startTime);
|
||||
|
||||
if (!response.ok) {
|
||||
updateStatus('error', time);
|
||||
throw new Error(responseData.error || 'Request failed');
|
||||
}
|
||||
|
||||
updateStatus(
|
||||
'success',
|
||||
time,
|
||||
responseData.server_memory_delta_mb,
|
||||
responseData.server_peak_memory_mb
|
||||
);
|
||||
|
||||
document.querySelector('#response-content code').textContent = JSON.stringify(responseData, null, 2);
|
||||
document.querySelector('#response-content code').className = 'json hljs'; // Ensure class is set
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
}
|
||||
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
|
||||
// For generateSnippets, handle the LLM case specially
|
||||
if (endpoint === 'llm') {
|
||||
const url = urls[0];
|
||||
const encodedUrl = encodeURIComponent(url);
|
||||
const question = document.getElementById('llm-question').value.trim() || "What is this page about?";
|
||||
generateSnippets(`${api}/${encodedUrl}?q=${encodeURIComponent(question)}`, null, 'GET');
|
||||
} else {
|
||||
generateSnippets(api, payload);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
updateStatus('error');
|
||||
document.querySelector('#response-content code').textContent = JSON.stringify(
|
||||
{ error: error.message },
|
||||
null,
|
||||
2
|
||||
);
|
||||
forceHighlightElement(document.querySelector('#response-content code'));
|
||||
}
|
||||
}
|
||||
|
||||
// Stress test function
|
||||
async function runStressTest() {
|
||||
const total = parseInt(document.getElementById('st-total').value);
|
||||
const chunkSize = parseInt(document.getElementById('st-chunk').value);
|
||||
const concurrency = parseInt(document.getElementById('st-conc').value);
|
||||
const useStream = document.getElementById('st-stream').checked;
|
||||
|
||||
const logEl = document.getElementById('stress-log');
|
||||
logEl.textContent = '';
|
||||
|
||||
document.getElementById('stress-completed').textContent = '0';
|
||||
document.getElementById('stress-total').textContent = total;
|
||||
document.getElementById('stress-avg-time').textContent = '0';
|
||||
document.getElementById('stress-peak-mem').textContent = '0';
|
||||
|
||||
const api = useStream ? '/crawl/stream' : '/crawl';
|
||||
const urls = Array.from({ length: total }, (_, i) => `https://httpbin.org/anything/stress-${i}-${Date.now()}`);
|
||||
const chunks = [];
|
||||
|
||||
for (let i = 0; i < urls.length; i += chunkSize) {
|
||||
chunks.push(urls.slice(i, i + chunkSize));
|
||||
}
|
||||
|
||||
let completed = 0;
|
||||
let totalTime = 0;
|
||||
let peakMemory = 0;
|
||||
|
||||
const processBatch = async (batch, index) => {
|
||||
const payload = {
|
||||
urls: batch,
|
||||
browser_config: {},
|
||||
crawler_config: { cache_mode: 'BYPASS', stream: useStream }
|
||||
};
|
||||
|
||||
const start = performance.now();
|
||||
let time, memory;
|
||||
|
||||
try {
|
||||
if (useStream) {
|
||||
const response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const reader = response.body.getReader();
|
||||
let maxMem = 0;
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
const text = new TextDecoder().decode(value);
|
||||
text.split('\n').forEach(line => {
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
if (obj.server_memory_mb) {
|
||||
maxMem = Math.max(maxMem, obj.server_memory_mb);
|
||||
}
|
||||
} catch { }
|
||||
});
|
||||
}
|
||||
|
||||
memory = maxMem;
|
||||
} else {
|
||||
const response = await fetch(api, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(payload)
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
memory = data.server_peak_memory_mb;
|
||||
}
|
||||
|
||||
time = Math.round(performance.now() - start);
|
||||
peakMemory = Math.max(peakMemory, memory || 0);
|
||||
totalTime += time;
|
||||
|
||||
logEl.textContent += `[${index + 1}/${chunks.length}] ✔ ${time}ms | Peak ${memory}MB\n`;
|
||||
} catch (error) {
|
||||
time = Math.round(performance.now() - start);
|
||||
logEl.textContent += `[${index + 1}/${chunks.length}] ✖ ${time}ms | ${error.message}\n`;
|
||||
}
|
||||
|
||||
completed += batch.length;
|
||||
document.getElementById('stress-completed').textContent = completed;
|
||||
document.getElementById('stress-peak-mem').textContent = peakMemory;
|
||||
document.getElementById('stress-avg-time').textContent = Math.round(totalTime / (index + 1));
|
||||
|
||||
logEl.scrollTop = logEl.scrollHeight;
|
||||
};
|
||||
|
||||
// Run with concurrency control
|
||||
let active = 0;
|
||||
let index = 0;
|
||||
|
||||
return new Promise(resolve => {
|
||||
const runNext = () => {
|
||||
while (active < concurrency && index < chunks.length) {
|
||||
processBatch(chunks[index], index)
|
||||
.finally(() => {
|
||||
active--;
|
||||
runNext();
|
||||
});
|
||||
active++;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (active === 0 && index >= chunks.length) {
|
||||
logEl.textContent += '\n✅ Stress test completed\n';
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
|
||||
runNext();
|
||||
});
|
||||
}
|
||||
|
||||
// Event listeners
|
||||
document.getElementById('run-btn').addEventListener('click', runCrawl);
|
||||
document.getElementById('st-run').addEventListener('click', runStressTest);
|
||||
|
||||
function forceHighlightElement(element) {
|
||||
if (!element) return;
|
||||
|
||||
// Save current scroll position (important for large code blocks)
|
||||
const scrollTop = element.parentElement.scrollTop;
|
||||
|
||||
// Reset the element
|
||||
const text = element.textContent;
|
||||
element.innerHTML = text;
|
||||
element.removeAttribute('data-highlighted');
|
||||
|
||||
// Reapply highlighting
|
||||
hljs.highlightElement(element);
|
||||
|
||||
// Restore scroll position
|
||||
element.parentElement.scrollTop = scrollTop;
|
||||
}
|
||||
|
||||
// Initialize clipboard for all copy buttons
|
||||
function initCopyButtons() {
|
||||
document.querySelectorAll('.copy-btn').forEach(btn => {
|
||||
new ClipboardJS(btn, {
|
||||
text: () => {
|
||||
const target = document.querySelector(btn.dataset.target);
|
||||
return target ? target.textContent : '';
|
||||
}
|
||||
}).on('success', e => {
|
||||
e.clearSelection();
|
||||
// make button text "copied" for 1 second
|
||||
const originalText = e.trigger.textContent;
|
||||
e.trigger.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
e.trigger.textContent = originalText;
|
||||
}, 1000);
|
||||
// Highlight the copied code
|
||||
const target = document.querySelector(btn.dataset.target);
|
||||
if (target) {
|
||||
target.classList.add('highlighted');
|
||||
setTimeout(() => {
|
||||
target.classList.remove('highlighted');
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
}).on('error', e => {
|
||||
console.error('Error copying:', e);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Function to initialize UI based on selected endpoint
|
||||
function initUI() {
|
||||
// Trigger the endpoint change handler to set initial UI state
|
||||
const endpointSelect = document.getElementById('endpoint');
|
||||
const event = new Event('change');
|
||||
endpointSelect.dispatchEvent(event);
|
||||
|
||||
// Initialize copy buttons
|
||||
initCopyButtons();
|
||||
}
|
||||
|
||||
// Initialize on page load
|
||||
document.addEventListener('DOMContentLoaded', initUI);
|
||||
// Also call it immediately in case the script runs after DOM is already loaded
|
||||
if (document.readyState !== 'loading') {
|
||||
initUI();
|
||||
}
|
||||
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -14,7 +14,7 @@ stderr_logfile=/dev/stderr ; Redirect redis stderr to container stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[program:gunicorn]
|
||||
command=/usr/local/bin/gunicorn --bind 0.0.0.0:11235 --workers 2 --threads 2 --timeout 120 --graceful-timeout 30 --keep-alive 60 --log-level info --worker-class uvicorn.workers.UvicornWorker server:app
|
||||
command=/usr/local/bin/gunicorn --bind 0.0.0.0:11235 --workers 1 --threads 4 --timeout 1800 --graceful-timeout 30 --keep-alive 300 --log-level info --worker-class uvicorn.workers.UvicornWorker server:app
|
||||
directory=/app ; Working directory for the app
|
||||
user=appuser ; Run gunicorn as our non-root user
|
||||
autorestart=true
|
||||
|
||||
@@ -45,10 +45,10 @@ def datetime_handler(obj: any) -> Optional[str]:
|
||||
return obj.isoformat()
|
||||
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
|
||||
|
||||
def should_cleanup_task(created_at: str) -> bool:
|
||||
def should_cleanup_task(created_at: str, ttl_seconds: int = 3600) -> bool:
|
||||
"""Check if task should be cleaned up based on creation time."""
|
||||
created = datetime.fromisoformat(created_at)
|
||||
return (datetime.now() - created).total_seconds() > 3600
|
||||
return (datetime.now() - created).total_seconds() > ttl_seconds
|
||||
|
||||
def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]:
|
||||
"""Decode Redis hash data from bytes to strings."""
|
||||
|
||||
@@ -1,19 +1,11 @@
|
||||
# docker-compose.yml
|
||||
version: '3.8'
|
||||
|
||||
# Base configuration anchor for reusability
|
||||
# Shared configuration for all environments
|
||||
x-base-config: &base-config
|
||||
ports:
|
||||
# Map host port 11235 to container port 11235 (where Gunicorn will listen)
|
||||
- "11235:11235"
|
||||
# - "8080:8080" # Uncomment if needed
|
||||
|
||||
# Load API keys primarily from .llm.env file
|
||||
# Create .llm.env in the root directory .llm.env.example
|
||||
- "11235:11235" # Gunicorn port
|
||||
env_file:
|
||||
- .llm.env
|
||||
|
||||
# Define environment variables, allowing overrides from host environment
|
||||
# Syntax ${VAR:-} uses host env var 'VAR' if set, otherwise uses value from .llm.env
|
||||
- .llm.env # API keys (create from .llm.env.example)
|
||||
environment:
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
@@ -22,10 +14,8 @@ x-base-config: &base-config
|
||||
- TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
- GEMINI_API_TOKEN=${GEMINI_API_TOKEN:-}
|
||||
|
||||
volumes:
|
||||
# Mount /dev/shm for Chromium/Playwright performance
|
||||
- /dev/shm:/dev/shm
|
||||
- /dev/shm:/dev/shm # Chromium performance
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
@@ -34,47 +24,26 @@ x-base-config: &base-config
|
||||
memory: 1G
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
# IMPORTANT: Ensure Gunicorn binds to 11235 in supervisord.conf
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11235/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s # Give the server time to start
|
||||
# Run the container as the non-root user defined in the Dockerfile
|
||||
start_period: 40s
|
||||
user: "appuser"
|
||||
|
||||
services:
|
||||
# --- Local Build Services ---
|
||||
crawl4ai-local-amd64:
|
||||
crawl4ai:
|
||||
# 1. Default: Pull multi-platform test image from Docker Hub
|
||||
# 2. Override with local image via: IMAGE=local-test docker compose up
|
||||
image: ${IMAGE:-unclecode/crawl4ai:${TAG:-latest}}
|
||||
|
||||
# Local build config (used with --build)
|
||||
build:
|
||||
context: . # Build context is the root directory
|
||||
dockerfile: Dockerfile # Dockerfile is in the root directory
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-default}
|
||||
ENABLE_GPU: ${ENABLE_GPU:-false}
|
||||
# PYTHON_VERSION arg is omitted as it's fixed by 'FROM python:3.10-slim' in Dockerfile
|
||||
platform: linux/amd64
|
||||
profiles: ["local-amd64"]
|
||||
<<: *base-config # Inherit base configuration
|
||||
|
||||
crawl4ai-local-arm64:
|
||||
build:
|
||||
context: . # Build context is the root directory
|
||||
dockerfile: Dockerfile # Dockerfile is in the root directory
|
||||
args:
|
||||
INSTALL_TYPE: ${INSTALL_TYPE:-default}
|
||||
ENABLE_GPU: ${ENABLE_GPU:-false}
|
||||
platform: linux/arm64
|
||||
profiles: ["local-arm64"]
|
||||
<<: *base-config
|
||||
|
||||
# --- Docker Hub Image Services ---
|
||||
crawl4ai-hub-amd64:
|
||||
image: unclecode/crawl4ai:${VERSION:-latest}-amd64
|
||||
profiles: ["hub-amd64"]
|
||||
<<: *base-config
|
||||
|
||||
crawl4ai-hub-arm64:
|
||||
image: unclecode/crawl4ai:${VERSION:-latest}-arm64
|
||||
profiles: ["hub-arm64"]
|
||||
|
||||
# Inherit shared config
|
||||
<<: *base-config
|
||||
7715
docs/apps/iseeyou/llms-full.txt
Normal file
7715
docs/apps/iseeyou/llms-full.txt
Normal file
File diff suppressed because it is too large
Load Diff
1323
docs/apps/linkdin/Crawl4ai_Linkedin_Data_Discovery_Part_1.ipynb
Normal file
1323
docs/apps/linkdin/Crawl4ai_Linkedin_Data_Discovery_Part_1.ipynb
Normal file
File diff suppressed because one or more lines are too long
5859
docs/apps/linkdin/Crawl4ai_Linkedin_Data_Discovery_Part_2.ipynb
Normal file
5859
docs/apps/linkdin/Crawl4ai_Linkedin_Data_Discovery_Part_2.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
131
docs/apps/linkdin/README.md
Normal file
131
docs/apps/linkdin/README.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# Crawl4AI Prospect‑Wizard – step‑by‑step guide
|
||||
|
||||
[](https://colab.research.google.com/drive/10nRCwmfxPjVrRUHyJsYlX7BH5bvPoGpx?usp=sharing)
|
||||
|
||||
A three‑stage demo that goes from **LinkedIn scraping** ➜ **LLM reasoning** ➜ **graph visualisation**.
|
||||
|
||||
**Try it in Google Colab!** Click the badge above to run this demo in a cloud environment with zero setup required.
|
||||
|
||||
```
|
||||
prospect‑wizard/
|
||||
├─ c4ai_discover.py # Stage 1 – scrape companies + people
|
||||
├─ c4ai_insights.py # Stage 2 – embeddings, org‑charts, scores
|
||||
├─ graph_view_template.html # Stage 3 – graph viewer (static HTML)
|
||||
└─ data/ # output lands here (*.jsonl / *.json)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1 Install & boot a LinkedIn profile (one‑time)
|
||||
|
||||
### 1.1 Install dependencies
|
||||
```bash
|
||||
pip install crawl4ai litellm sentence-transformers pandas rich
|
||||
```
|
||||
|
||||
### 1.2 Create / warm a LinkedIn browser profile
|
||||
```bash
|
||||
crwl profiles
|
||||
```
|
||||
1. The interactive shell shows **New profile** – hit **enter**.
|
||||
2. Choose a name, e.g. `profile_linkedin_uc`.
|
||||
3. A Chromium window opens – log in to LinkedIn, solve whatever CAPTCHA, then close.
|
||||
|
||||
> Remember the **profile name**. All future runs take `--profile-name <your_name>`.
|
||||
|
||||
---
|
||||
|
||||
## 2 Discovery – scrape companies & people
|
||||
|
||||
```bash
|
||||
python c4ai_discover.py full \
|
||||
--query "health insurance management" \
|
||||
--geo 102713980 \ # Malaysia geoUrn
|
||||
--title-filters "" \ # or "Product,Engineering"
|
||||
--max-companies 10 \ # default set small for workshops
|
||||
--max-people 20 \ # \^ same
|
||||
--profile-name profile_linkedin_uc \
|
||||
--outdir ./data \
|
||||
--concurrency 2 \
|
||||
--log-level debug
|
||||
```
|
||||
**Outputs** in `./data/`:
|
||||
* `companies.jsonl` – one JSON per company
|
||||
* `people.jsonl` – one JSON per employee
|
||||
|
||||
🛠️ **Dry‑run:** `C4AI_DEMO_DEBUG=1 python c4ai_discover.py full --query coffee` uses bundled HTML snippets, no network.
|
||||
|
||||
### Handy geoUrn cheatsheet
|
||||
| Location | geoUrn |
|
||||
|----------|--------|
|
||||
| Singapore | **103644278** |
|
||||
| Malaysia | **102713980** |
|
||||
| United States | **103644922** |
|
||||
| United Kingdom | **102221843** |
|
||||
| Australia | **101452733** |
|
||||
_See more: <https://www.linkedin.com/search/results/companies/?geoUrn=XXX> – the number after `geoUrn=` is what you need._
|
||||
|
||||
---
|
||||
|
||||
## 3 Insights – embeddings, org‑charts, decision makers
|
||||
|
||||
```bash
|
||||
python c4ai_insights.py \
|
||||
--in ./data \
|
||||
--out ./data \
|
||||
--embed-model all-MiniLM-L6-v2 \
|
||||
--llm-provider gemini/gemini-2.0-flash \
|
||||
--llm-api-key "" \
|
||||
--top-k 10 \
|
||||
--max-llm-tokens 8024 \
|
||||
--llm-temperature 1.0 \
|
||||
--workers 4
|
||||
```
|
||||
Emits next to the Stage‑1 files:
|
||||
* `company_graph.json` – inter‑company similarity graph
|
||||
* `org_chart_<handle>.json` – one per company
|
||||
* `decision_makers.csv` – hand‑picked ‘who to pitch’ list
|
||||
|
||||
Flags reference (straight from `build_arg_parser()`):
|
||||
| Flag | Default | Purpose |
|
||||
|------|---------|---------|
|
||||
| `--in` | `.` | Stage‑1 output dir |
|
||||
| `--out` | `.` | Destination dir |
|
||||
| `--embed_model` | `all-MiniLM-L6-v2` | Sentence‑Transformer model |
|
||||
| `--top_k` | `10` | Neighbours per company in graph |
|
||||
| `--openai_model` | `gpt-4.1` | LLM for scoring decision makers |
|
||||
| `--max_llm_tokens` | `8024` | Token budget per LLM call |
|
||||
| `--llm_temperature` | `1.0` | Creativity knob |
|
||||
| `--stub` | off | Skip OpenAI and fabricate tiny charts |
|
||||
| `--workers` | `4` | Parallel LLM workers |
|
||||
|
||||
---
|
||||
|
||||
## 4 Visualise – interactive graph
|
||||
|
||||
After Stage 2 completes, simply open the HTML viewer from the project root:
|
||||
```bash
|
||||
open graph_view_template.html # or Live Server / Python -http
|
||||
```
|
||||
The page fetches `data/company_graph.json` and the `org_chart_*.json` files automatically; keep the `data/` folder beside the HTML file.
|
||||
|
||||
* Left pane → list of companies (clans).
|
||||
* Click a node to load its org‑chart on the right.
|
||||
* Chat drawer lets you ask follow‑up questions; context is pulled from `people.jsonl`.
|
||||
|
||||
---
|
||||
|
||||
## 5 Common snags
|
||||
|
||||
| Symptom | Fix |
|
||||
|---------|-----|
|
||||
| Infinite CAPTCHA | Use a residential proxy: `--proxy http://user:pass@ip:port` |
|
||||
| 429 Too Many Requests | Lower `--concurrency`, rotate profile, add delay |
|
||||
| Blank graph | Check JSON paths, clear `localStorage` in browser |
|
||||
|
||||
---
|
||||
|
||||
### TL;DR
|
||||
`crwl profiles` → `c4ai_discover.py` → `c4ai_insights.py` → open `graph_view_template.html`.
|
||||
Live long and `import crawl4ai`.
|
||||
|
||||
446
docs/apps/linkdin/c4ai_discover.py
Normal file
446
docs/apps/linkdin/c4ai_discover.py
Normal file
@@ -0,0 +1,446 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
c4ai-discover — Stage‑1 Discovery CLI
|
||||
|
||||
Scrapes LinkedIn company search + their people pages and dumps two newline‑delimited
|
||||
JSON files: companies.jsonl and people.jsonl.
|
||||
|
||||
Key design rules
|
||||
----------------
|
||||
* No BeautifulSoup — Crawl4AI only for network + HTML fetch.
|
||||
* JsonCssExtractionStrategy for structured scraping; schema auto‑generated once
|
||||
from sample HTML provided by user and then cached under ./schemas/.
|
||||
* Defaults are embedded so the file runs inside VS Code debugger without CLI args.
|
||||
* If executed as a console script (argv > 1), CLI flags win.
|
||||
* Lightweight deps: argparse + Crawl4AI stack.
|
||||
|
||||
Author: Tom @ Kidocode 2025‑04‑26
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings, re
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r"The pseudo class ':contains' is deprecated, ':-soup-contains' should be used.*",
|
||||
category=FutureWarning,
|
||||
module=r"soupsieve"
|
||||
)
|
||||
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Imports
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
import argparse
|
||||
import random
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
# 3rd-party rich for pretty logging
|
||||
from rich.console import Console
|
||||
from rich.logging import RichHandler
|
||||
|
||||
from datetime import datetime, UTC
|
||||
from textwrap import dedent
|
||||
from types import SimpleNamespace
|
||||
from typing import Dict, List, Optional
|
||||
from urllib.parse import quote
|
||||
from pathlib import Path
|
||||
from glob import glob
|
||||
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CacheMode,
|
||||
CrawlerRunConfig,
|
||||
JsonCssExtractionStrategy,
|
||||
BrowserProfiler,
|
||||
LLMConfig,
|
||||
)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Constants / paths
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
BASE_DIR = pathlib.Path(__file__).resolve().parent
|
||||
SCHEMA_DIR = BASE_DIR / "schemas"
|
||||
SCHEMA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
COMPANY_SCHEMA_PATH = SCHEMA_DIR / "company_card.json"
|
||||
PEOPLE_SCHEMA_PATH = SCHEMA_DIR / "people_card.json"
|
||||
|
||||
# ---------- deterministic target JSON examples ----------
|
||||
_COMPANY_SCHEMA_EXAMPLE = {
|
||||
"handle": "/company/posify/",
|
||||
"profile_image": "https://media.licdn.com/dms/image/v2/.../logo.jpg",
|
||||
"name": "Management Research Services, Inc. (MRS, Inc)",
|
||||
"descriptor": "Insurance • Milwaukee, Wisconsin",
|
||||
"about": "Insurance • Milwaukee, Wisconsin",
|
||||
"followers": 1000
|
||||
}
|
||||
|
||||
_PEOPLE_SCHEMA_EXAMPLE = {
|
||||
"profile_url": "https://www.linkedin.com/in/lily-ng/",
|
||||
"name": "Lily Ng",
|
||||
"headline": "VP Product @ Posify",
|
||||
"followers": 890,
|
||||
"connection_degree": "2nd",
|
||||
"avatar_url": "https://media.licdn.com/dms/image/v2/.../lily.jpg"
|
||||
}
|
||||
|
||||
# Provided sample HTML snippets (trimmed) — used exactly once to cold‑generate schema.
|
||||
_SAMPLE_COMPANY_HTML = (Path(__file__).resolve().parent / "snippets/company.html").read_text()
|
||||
_SAMPLE_PEOPLE_HTML = (Path(__file__).resolve().parent / "snippets/people.html").read_text()
|
||||
|
||||
# --------- tighter schema prompts ----------
|
||||
_COMPANY_SCHEMA_QUERY = dedent(
|
||||
"""
|
||||
Using the supplied <li> company-card HTML, build a JsonCssExtractionStrategy schema that,
|
||||
for every card, outputs *exactly* the keys shown in the example JSON below.
|
||||
JSON spec:
|
||||
• handle – href of the outermost <a> that wraps the logo/title, e.g. "/company/posify/"
|
||||
• profile_image – absolute URL of the <img> inside that link
|
||||
• name – text of the <a> inside the <span class*='t-16'>
|
||||
• descriptor – text line with industry • location
|
||||
• about – text of the <div class*='t-normal'> below the name (industry + geo)
|
||||
• followers – integer parsed from the <div> containing 'followers'
|
||||
|
||||
IMPORTANT: Do not use the base64 kind of classes to target element. It's not reliable.
|
||||
The main div parent contains these li element is "div.search-results-container" you can use this.
|
||||
The <ul> parent has "role" equal to "list". Using these two should be enough to target the <li> elements.
|
||||
|
||||
IMPORTANT: Remember there might be multiple <a> tags that start with https://www.linkedin.com/company/[NAME],
|
||||
so in case you refer to them for different fields, make sure to be more specific. One has the image, and one
|
||||
has the person's name.
|
||||
|
||||
IMPORTANT: Be very smart in selecting the correct and unique way to address the element. You should ensure
|
||||
your selector points to a single element and is unique to the place that contains the information.
|
||||
"""
|
||||
)
|
||||
|
||||
_PEOPLE_SCHEMA_QUERY = dedent(
|
||||
"""
|
||||
Using the supplied <li> people-card HTML, build a JsonCssExtractionStrategy schema that
|
||||
outputs exactly the keys in the example JSON below.
|
||||
Fields:
|
||||
• profile_url – href of the outermost profile link
|
||||
• name – text inside artdeco-entity-lockup__title
|
||||
• headline – inner text of artdeco-entity-lockup__subtitle
|
||||
• followers – integer parsed from the span inside lt-line-clamp--multi-line
|
||||
• connection_degree – '1st', '2nd', etc. from artdeco-entity-lockup__badge
|
||||
• avatar_url – src of the <img> within artdeco-entity-lockup__image
|
||||
|
||||
IMPORTANT: Do not use the base64 kind of classes to target element. It's not reliable.
|
||||
The main div parent contains these li element is a "div" has these classes "artdeco-card org-people-profile-card__card-spacing org-people__card-margin-bottom".
|
||||
"""
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Utility helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _load_or_build_schema(
|
||||
path: pathlib.Path,
|
||||
sample_html: str,
|
||||
query: str,
|
||||
example_json: Dict,
|
||||
force = False
|
||||
) -> Dict:
|
||||
"""Load schema from path, else call generate_schema once and persist."""
|
||||
if path.exists() and not force:
|
||||
return json.loads(path.read_text())
|
||||
|
||||
logging.info("[SCHEMA] Generating schema %s", path.name)
|
||||
schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=sample_html,
|
||||
llm_config=LLMConfig(
|
||||
provider=os.getenv("C4AI_SCHEMA_PROVIDER", "openai/gpt-4o"),
|
||||
api_token=os.getenv("OPENAI_API_KEY", "env:OPENAI_API_KEY"),
|
||||
),
|
||||
query=query,
|
||||
target_json_example=json.dumps(example_json, indent=2),
|
||||
)
|
||||
path.write_text(json.dumps(schema, indent=2))
|
||||
return schema
|
||||
|
||||
|
||||
def _openai_friendly_number(text: str) -> Optional[int]:
|
||||
"""Extract first int from text like '1K followers' (returns 1000)."""
|
||||
import re
|
||||
|
||||
m = re.search(r"(\d[\d,]*)", text.replace(",", ""))
|
||||
if not m:
|
||||
return None
|
||||
val = int(m.group(1))
|
||||
if "k" in text.lower():
|
||||
val *= 1000
|
||||
if "m" in text.lower():
|
||||
val *= 1_000_000
|
||||
return val
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Core async workers
|
||||
# ---------------------------------------------------------------------------
|
||||
async def crawl_company_search(crawler: AsyncWebCrawler, url: str, schema: Dict, limit: int) -> List[Dict]:
|
||||
"""Paginate 10-item company search pages until `limit` reached."""
|
||||
extraction = JsonCssExtractionStrategy(schema)
|
||||
cfg = CrawlerRunConfig(
|
||||
extraction_strategy=extraction,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
wait_for = ".search-marvel-srp",
|
||||
session_id="company_search",
|
||||
delay_before_return_html=1,
|
||||
magic = True,
|
||||
verbose= False,
|
||||
)
|
||||
companies, page = [], 1
|
||||
while len(companies) < max(limit, 10):
|
||||
paged_url = f"{url}&page={page}"
|
||||
res = await crawler.arun(paged_url, config=cfg)
|
||||
batch = json.loads(res[0].extracted_content)
|
||||
if not batch:
|
||||
break
|
||||
for item in batch:
|
||||
name = item.get("name", "").strip()
|
||||
handle = item.get("handle", "").strip()
|
||||
if not handle or not name:
|
||||
continue
|
||||
descriptor = item.get("descriptor")
|
||||
about = item.get("about")
|
||||
followers = _openai_friendly_number(str(item.get("followers", "")))
|
||||
companies.append(
|
||||
{
|
||||
"handle": handle,
|
||||
"name": name,
|
||||
"descriptor": descriptor,
|
||||
"about": about,
|
||||
"followers": followers,
|
||||
"people_url": f"{handle}people/",
|
||||
"captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z",
|
||||
}
|
||||
)
|
||||
page += 1
|
||||
logging.info(
|
||||
f"[dim]Page {page}[/] — running total: {len(companies)}/{limit} companies"
|
||||
)
|
||||
|
||||
return companies[:max(limit, 10)]
|
||||
|
||||
|
||||
async def crawl_people_page(
|
||||
crawler: AsyncWebCrawler,
|
||||
people_url: str,
|
||||
schema: Dict,
|
||||
limit: int,
|
||||
title_kw: str,
|
||||
) -> List[Dict]:
|
||||
people_u = f"{people_url}?keywords={quote(title_kw)}"
|
||||
extraction = JsonCssExtractionStrategy(schema)
|
||||
cfg = CrawlerRunConfig(
|
||||
extraction_strategy=extraction,
|
||||
# scan_full_page=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
magic=True,
|
||||
wait_for=".org-people-profile-card__card-spacing",
|
||||
wait_for_images=5000,
|
||||
delay_before_return_html=1,
|
||||
session_id="people_search",
|
||||
)
|
||||
res = await crawler.arun(people_u, config=cfg)
|
||||
if not res[0].success:
|
||||
return []
|
||||
raw = json.loads(res[0].extracted_content)
|
||||
people = []
|
||||
for p in raw[:limit]:
|
||||
followers = _openai_friendly_number(str(p.get("followers", "")))
|
||||
people.append(
|
||||
{
|
||||
"profile_url": p.get("profile_url"),
|
||||
"name": p.get("name"),
|
||||
"headline": p.get("headline"),
|
||||
"followers": followers,
|
||||
"connection_degree": p.get("connection_degree"),
|
||||
"avatar_url": p.get("avatar_url"),
|
||||
}
|
||||
)
|
||||
return people
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI + main
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def build_arg_parser() -> argparse.ArgumentParser:
|
||||
ap = argparse.ArgumentParser("c4ai-discover — Crawl4AI LinkedIn discovery")
|
||||
sub = ap.add_subparsers(dest="cmd", required=False, help="run scope")
|
||||
|
||||
def add_flags(parser: argparse.ArgumentParser):
|
||||
parser.add_argument("--query", required=False, help="query keyword(s)")
|
||||
parser.add_argument("--geo", required=False, type=int, help="LinkedIn geoUrn")
|
||||
parser.add_argument("--title-filters", default="Product,Engineering", help="comma list of job keywords")
|
||||
parser.add_argument("--max-companies", type=int, default=1000)
|
||||
parser.add_argument("--max-people", type=int, default=500)
|
||||
parser.add_argument("--profile-name", default=str(pathlib.Path.home() / ".crawl4ai/profiles/profile_linkedin_uc"))
|
||||
parser.add_argument("--outdir", default="./output")
|
||||
parser.add_argument("--concurrency", type=int, default=4)
|
||||
parser.add_argument("--log-level", default="info", choices=["debug", "info", "warn", "error"])
|
||||
|
||||
add_flags(sub.add_parser("full"))
|
||||
add_flags(sub.add_parser("companies"))
|
||||
add_flags(sub.add_parser("people"))
|
||||
|
||||
# global flags
|
||||
ap.add_argument(
|
||||
"--debug",
|
||||
action="store_true",
|
||||
help="Use built-in demo defaults (same as C4AI_DEMO_DEBUG=1)",
|
||||
)
|
||||
return ap
|
||||
|
||||
|
||||
def detect_debug_defaults(force = False) -> SimpleNamespace:
|
||||
if not force and sys.gettrace() is None and not os.getenv("C4AI_DEMO_DEBUG"):
|
||||
return SimpleNamespace()
|
||||
# ----- debug‑friendly defaults -----
|
||||
return SimpleNamespace(
|
||||
cmd="full",
|
||||
query="health insurance management",
|
||||
geo=102713980,
|
||||
# title_filters="Product,Engineering",
|
||||
title_filters="",
|
||||
max_companies=10,
|
||||
max_people=5,
|
||||
profile_name="profile_linkedin_uc",
|
||||
outdir="./debug_out",
|
||||
concurrency=2,
|
||||
log_level="debug",
|
||||
)
|
||||
|
||||
|
||||
async def async_main(opts):
|
||||
# ─────────── logging setup ───────────
|
||||
console = Console()
|
||||
logging.basicConfig(
|
||||
level=opts.log_level.upper(),
|
||||
format="%(message)s",
|
||||
handlers=[RichHandler(console=console, markup=True, rich_tracebacks=True)],
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Load or build schemas (one‑time LLM call each)
|
||||
# -------------------------------------------------------------------
|
||||
company_schema = _load_or_build_schema(
|
||||
COMPANY_SCHEMA_PATH,
|
||||
_SAMPLE_COMPANY_HTML,
|
||||
_COMPANY_SCHEMA_QUERY,
|
||||
_COMPANY_SCHEMA_EXAMPLE,
|
||||
# True
|
||||
)
|
||||
people_schema = _load_or_build_schema(
|
||||
PEOPLE_SCHEMA_PATH,
|
||||
_SAMPLE_PEOPLE_HTML,
|
||||
_PEOPLE_SCHEMA_QUERY,
|
||||
_PEOPLE_SCHEMA_EXAMPLE,
|
||||
# True
|
||||
)
|
||||
|
||||
outdir = BASE_DIR / pathlib.Path(opts.outdir)
|
||||
outdir.mkdir(parents=True, exist_ok=True)
|
||||
f_companies = (BASE_DIR / outdir / "companies.jsonl").open("a", encoding="utf-8")
|
||||
f_people = (BASE_DIR / outdir / "people.jsonl").open("a", encoding="utf-8")
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Prepare crawler with cookie pool rotation
|
||||
# -------------------------------------------------------------------
|
||||
profiler = BrowserProfiler()
|
||||
path = profiler.get_profile_path(opts.profile_name)
|
||||
bc = BrowserConfig(
|
||||
headless=False,
|
||||
verbose=False,
|
||||
user_data_dir=path,
|
||||
use_managed_browser=True,
|
||||
user_agent_mode = "random",
|
||||
user_agent_generator_config= {
|
||||
"platforms": "mobile",
|
||||
"os": "Android"
|
||||
}
|
||||
)
|
||||
crawler = AsyncWebCrawler(config=bc)
|
||||
|
||||
await crawler.start()
|
||||
|
||||
# Single worker for simplicity; concurrency can be scaled by arun_many if needed.
|
||||
# crawler = await next_crawler().start()
|
||||
try:
|
||||
# Build LinkedIn search URL
|
||||
search_url = f'https://www.linkedin.com/search/results/companies/?keywords={quote(opts.query)}&companyHqGeo="{opts.geo}"'
|
||||
logging.info("Seed URL => %s", search_url)
|
||||
|
||||
companies: List[Dict] = []
|
||||
if opts.cmd in ("companies", "full"):
|
||||
companies = await crawl_company_search(
|
||||
crawler, search_url, company_schema, opts.max_companies
|
||||
)
|
||||
for c in companies:
|
||||
f_companies.write(json.dumps(c, ensure_ascii=False) + "\n")
|
||||
logging.info(f"[bold green]✓[/] Companies scraped so far: {len(companies)}")
|
||||
|
||||
if opts.cmd in ("people", "full"):
|
||||
if not companies:
|
||||
# load from previous run
|
||||
src = outdir / "companies.jsonl"
|
||||
if not src.exists():
|
||||
logging.error("companies.jsonl missing — run companies/full first")
|
||||
return 10
|
||||
companies = [json.loads(l) for l in src.read_text().splitlines()]
|
||||
total_people = 0
|
||||
title_kw = " ".join([t.strip() for t in opts.title_filters.split(",") if t.strip()]) if opts.title_filters else ""
|
||||
for comp in companies:
|
||||
people = await crawl_people_page(
|
||||
crawler,
|
||||
comp["people_url"],
|
||||
people_schema,
|
||||
opts.max_people,
|
||||
title_kw,
|
||||
)
|
||||
for p in people:
|
||||
rec = p | {
|
||||
"company_handle": comp["handle"],
|
||||
# "captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z",
|
||||
"captured_at": datetime.now(UTC).isoformat(timespec="seconds") + "Z",
|
||||
}
|
||||
f_people.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
||||
total_people += len(people)
|
||||
logging.info(
|
||||
f"{comp['name']} — [cyan]{len(people)}[/] people extracted"
|
||||
)
|
||||
await asyncio.sleep(random.uniform(0.5, 1))
|
||||
logging.info("Total people scraped: %d", total_people)
|
||||
finally:
|
||||
await crawler.close()
|
||||
f_companies.close()
|
||||
f_people.close()
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
parser = build_arg_parser()
|
||||
cli_opts = parser.parse_args()
|
||||
|
||||
# decide on debug defaults
|
||||
if cli_opts.debug:
|
||||
opts = detect_debug_defaults(force=True)
|
||||
cli_opts = opts
|
||||
else:
|
||||
env_defaults = detect_debug_defaults()
|
||||
opts = env_defaults if env_defaults else cli_opts
|
||||
|
||||
if not getattr(opts, "cmd", None):
|
||||
opts.cmd = "full"
|
||||
|
||||
exit_code = asyncio.run(async_main(cli_opts))
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
381
docs/apps/linkdin/c4ai_insights.py
Normal file
381
docs/apps/linkdin/c4ai_insights.py
Normal file
@@ -0,0 +1,381 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Stage-2 Insights builder
|
||||
------------------------
|
||||
Reads companies.jsonl & people.jsonl (Stage-1 output) and produces:
|
||||
• company_graph.json
|
||||
• org_chart_<handle>.json (one per company)
|
||||
• decision_makers.csv
|
||||
• graph_view.html (interactive visualisation)
|
||||
|
||||
Run:
|
||||
python c4ai_insights.py --in ./stage1_out --out ./stage2_out
|
||||
|
||||
Author : Tom @ Kidocode, 2025-04-28
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Imports & Third-party
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
import argparse, asyncio, json, pathlib, random
|
||||
from datetime import datetime, UTC
|
||||
from types import SimpleNamespace
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any
|
||||
# Pretty CLI UX
|
||||
from rich.console import Console
|
||||
from rich.logging import RichHandler
|
||||
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeElapsedColumn
|
||||
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
BASE_DIR = pathlib.Path(__file__).resolve().parent
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# 3rd-party deps
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
import numpy as np
|
||||
# from sentence_transformers import SentenceTransformer
|
||||
# from sklearn.metrics.pairwise import cosine_similarity
|
||||
import pandas as pd
|
||||
import hashlib
|
||||
|
||||
from litellm import completion #Support any LLM Provider
|
||||
|
||||
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Utils
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def load_jsonl(path: Path) -> List[Dict[str, Any]]:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return [json.loads(l) for l in f]
|
||||
|
||||
def dump_json(obj, path: Path):
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(obj, f, ensure_ascii=False, indent=2)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Constants
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
BASE_DIR = pathlib.Path(__file__).resolve().parent
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Debug defaults (mirrors Stage-1 trick)
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def dev_defaults() -> SimpleNamespace:
|
||||
return SimpleNamespace(
|
||||
in_dir="./samples",
|
||||
out_dir="./samples/insights",
|
||||
embed_model="all-MiniLM-L6-v2",
|
||||
top_k=10,
|
||||
llm_provider="openai/gpt-4.1",
|
||||
llm_api_key=None,
|
||||
max_llm_tokens=8000,
|
||||
llm_temperature=1.0,
|
||||
stub=False, # Set to True to use a stub for org-chart inference
|
||||
llm_base_url=None, # e.g., "https://api.openai.com/v1" for OpenAI
|
||||
workers=4
|
||||
)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Graph builders
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def embed_descriptions(companies, model_name:str, opts) -> np.ndarray:
|
||||
from sentence_transformers import SentenceTransformer
|
||||
|
||||
console = Console()
|
||||
console.print(f"Using embedding model: [bold cyan]{model_name}[/]")
|
||||
cache_path = BASE_DIR / Path(opts.out_dir) / "embeds_cache.json"
|
||||
cache = {}
|
||||
if cache_path.exists():
|
||||
with open(cache_path) as f:
|
||||
cache = json.load(f)
|
||||
# flush cache if model differs
|
||||
if cache.get("_model") != model_name:
|
||||
cache = {}
|
||||
|
||||
model = SentenceTransformer(model_name)
|
||||
new_texts, new_indices = [], []
|
||||
vectors = np.zeros((len(companies), 384), dtype=np.float32)
|
||||
|
||||
for idx, comp in enumerate(companies):
|
||||
text = comp.get("about") or comp.get("descriptor","")
|
||||
h = hashlib.sha1(text.encode("utf-8")).hexdigest()
|
||||
cached = cache.get(comp["handle"])
|
||||
if cached and cached["hash"] == h:
|
||||
vectors[idx] = np.array(cached["vector"], dtype=np.float32)
|
||||
else:
|
||||
new_texts.append(text)
|
||||
new_indices.append((idx, comp["handle"], h))
|
||||
|
||||
if new_texts:
|
||||
embeds = model.encode(new_texts, show_progress_bar=False, convert_to_numpy=True)
|
||||
for vec, (idx, handle, h) in zip(embeds, new_indices):
|
||||
vectors[idx] = vec
|
||||
cache[handle] = {"hash": h, "vector": vec.tolist()}
|
||||
cache["_model"] = model_name
|
||||
with open(cache_path, "w") as f:
|
||||
json.dump(cache, f)
|
||||
|
||||
return vectors
|
||||
|
||||
def build_company_graph(companies, embeds:np.ndarray, top_k:int) -> Dict[str,Any]:
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
sims = cosine_similarity(embeds)
|
||||
nodes, edges = [], []
|
||||
for i,c in enumerate(companies):
|
||||
node = dict(
|
||||
id=c["handle"].strip("/"),
|
||||
name=c["name"],
|
||||
handle=c["handle"],
|
||||
about=c.get("about",""),
|
||||
people_url=c.get("people_url",""),
|
||||
industry=c.get("descriptor","").split("•")[0].strip(),
|
||||
geoUrn=c.get("geoUrn"),
|
||||
followers=c.get("followers",0),
|
||||
# desc_embed=embeds[i].tolist(),
|
||||
desc_embed=[],
|
||||
)
|
||||
nodes.append(node)
|
||||
# pick top-k most similar except itself
|
||||
top_idx = np.argsort(sims[i])[::-1][1:top_k+1]
|
||||
for j in top_idx:
|
||||
tgt = companies[j]
|
||||
weight = float(sims[i,j])
|
||||
if node["industry"] == tgt.get("descriptor","").split("•")[0].strip():
|
||||
weight += 0.10
|
||||
if node["geoUrn"] == tgt.get("geoUrn"):
|
||||
weight += 0.05
|
||||
tgt['followers'] = tgt.get("followers", None) or 1
|
||||
node["followers"] = node.get("followers", None) or 1
|
||||
follower_ratio = min(node["followers"], tgt.get("followers",1)) / max(node["followers"] or 1, tgt.get("followers",1))
|
||||
weight += 0.05 * follower_ratio
|
||||
edges.append(dict(
|
||||
source=node["id"],
|
||||
target=tgt["handle"].strip("/"),
|
||||
weight=round(weight,4),
|
||||
drivers=dict(
|
||||
embed_sim=round(float(sims[i,j]),4),
|
||||
industry_match=0.10 if node["industry"] == tgt.get("descriptor","").split("•")[0].strip() else 0,
|
||||
geo_overlap=0.05 if node["geoUrn"] == tgt.get("geoUrn") else 0,
|
||||
)
|
||||
))
|
||||
# return {"nodes":nodes,"edges":edges,"meta":{"generated_at":datetime.now(UTC).isoformat()}}
|
||||
return {"nodes":nodes,"edges":edges,"meta":{"generated_at":datetime.now(UTC).isoformat()}}
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Org-chart via LLM
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
async def infer_org_chart_llm(company, people, llm_provider:str, api_key:str, max_tokens:int, temperature:float, stub:bool=False, base_url:str=None):
|
||||
if stub:
|
||||
# Tiny fake org-chart when debugging offline
|
||||
chief = random.choice(people)
|
||||
nodes = [{
|
||||
"id": chief["profile_url"],
|
||||
"name": chief["name"],
|
||||
"title": chief["headline"],
|
||||
"dept": chief["headline"].split()[:1][0],
|
||||
"yoe_total": 8,
|
||||
"yoe_current": 2,
|
||||
"seniority_score": 0.8,
|
||||
"decision_score": 0.9,
|
||||
"avatar_url": chief.get("avatar_url")
|
||||
}]
|
||||
return {"nodes":nodes,"edges":[],"meta":{"debug_stub":True,"generated_at":datetime.now(UTC).isoformat()}}
|
||||
|
||||
prompt = [
|
||||
{"role":"system","content":"You are an expert B2B org-chart reasoner."},
|
||||
{"role":"user","content":f"""Here is the company description:
|
||||
|
||||
<company>
|
||||
{json.dumps(company, ensure_ascii=False)}
|
||||
</company>
|
||||
|
||||
Here is a JSON list of employees:
|
||||
<employees>
|
||||
{json.dumps(people, ensure_ascii=False)}
|
||||
</employees>
|
||||
|
||||
1) Build a reporting tree (manager -> direct reports)
|
||||
2) For each person output a decision_score 0-1 for buying new software
|
||||
|
||||
Return JSON: {{ "nodes":[{{id,name,title,dept,yoe_total,yoe_current,seniority_score,decision_score,avatar_url,profile_url}}], "edges":[{{source,target,type,confidence}}] }}
|
||||
"""}
|
||||
]
|
||||
resp = completion(
|
||||
model=llm_provider,
|
||||
messages=prompt,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
response_format={"type":"json_object"},
|
||||
api_key=api_key,
|
||||
base_url=base_url
|
||||
)
|
||||
chart = json.loads(resp.choices[0].message.content)
|
||||
chart["meta"] = dict(
|
||||
model=llm_provider,
|
||||
generated_at=datetime.now(UTC).isoformat()
|
||||
)
|
||||
return chart
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# CSV flatten
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def export_decision_makers(charts_dir:Path, csv_path:Path, threshold:float=0.5):
|
||||
rows=[]
|
||||
for p in charts_dir.glob("org_chart_*.json"):
|
||||
data=json.loads(p.read_text())
|
||||
comp = p.stem.split("org_chart_")[1]
|
||||
for n in data.get("nodes",[]):
|
||||
if n.get("decision_score",0)>=threshold:
|
||||
rows.append(dict(
|
||||
company=comp,
|
||||
person=n["name"],
|
||||
title=n["title"],
|
||||
decision_score=n["decision_score"],
|
||||
profile_url=n["id"]
|
||||
))
|
||||
pd.DataFrame(rows).to_csv(csv_path,index=False)
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# HTML rendering
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def render_html(out:Path, template_dir:Path):
|
||||
# From template folder cp graph_view.html and ai.js in out folder
|
||||
import shutil
|
||||
shutil.copy(template_dir/"graph_view_template.html", out / "graph_view.html")
|
||||
shutil.copy(template_dir/"ai.js", out)
|
||||
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# Main async pipeline
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
async def run(opts):
|
||||
# ── silence SDK noise ──────────────────────────────────────────────────────
|
||||
# for noisy in ("openai", "httpx", "httpcore"):
|
||||
# lg = logging.getLogger(noisy)
|
||||
# lg.setLevel(logging.WARNING) # or ERROR if you want total silence
|
||||
# lg.propagate = False # optional: stop them reaching root
|
||||
|
||||
# ────────────── logging bootstrap ──────────────
|
||||
console = Console()
|
||||
# logging.basicConfig(
|
||||
# level="INFO",
|
||||
# format="%(message)s",
|
||||
# handlers=[RichHandler(console=console, markup=True, rich_tracebacks=True)],
|
||||
# )
|
||||
|
||||
in_dir = BASE_DIR / Path(opts.in_dir)
|
||||
out_dir = BASE_DIR / Path(opts.out_dir)
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
companies = load_jsonl(in_dir/"companies.jsonl")
|
||||
people = load_jsonl(in_dir/"people.jsonl")
|
||||
|
||||
console.print(f"[bold cyan]Loaded[/] {len(companies)} companies, {len(people)} people")
|
||||
|
||||
console.print("[bold]⇢[/] Embedding company descriptions…")
|
||||
embeds = embed_descriptions(companies, opts.embed_model, opts)
|
||||
|
||||
console.print("[bold]⇢[/] Building similarity graph")
|
||||
company_graph = build_company_graph(companies, embeds, opts.top_k)
|
||||
dump_json(company_graph, out_dir/"company_graph.json")
|
||||
|
||||
# Filter companies that need processing
|
||||
to_process = []
|
||||
for comp in companies:
|
||||
handle = comp["handle"].strip("/").replace("/","_")
|
||||
out_file = out_dir/f"org_chart_{handle}.json"
|
||||
if out_file.exists():
|
||||
console.print(f"[green]✓[/] Skipping existing {comp['name']}")
|
||||
continue
|
||||
to_process.append(comp)
|
||||
|
||||
|
||||
if not to_process:
|
||||
console.print("[yellow]All companies already processed[/]")
|
||||
else:
|
||||
workers = getattr(opts, 'workers', 1)
|
||||
parallel = workers > 1
|
||||
|
||||
console.print(f"[bold]⇢[/] Inferring org-charts via LLM {f'(parallel={workers} workers)' if parallel else ''}")
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
BarColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
TimeElapsedColumn(),
|
||||
console=console,
|
||||
) as progress:
|
||||
task = progress.add_task("Org charts", total=len(to_process))
|
||||
|
||||
async def process_one(comp):
|
||||
handle = comp["handle"].strip("/").replace("/","_")
|
||||
persons = [p for p in people if p["company_handle"].strip("/") == comp["handle"].strip("/")]
|
||||
chart = await infer_org_chart_llm(
|
||||
comp, persons,
|
||||
llm_provider=opts.llm_provider,
|
||||
api_key=opts.llm_api_key or None,
|
||||
max_tokens=opts.max_llm_tokens,
|
||||
temperature=opts.llm_temperature,
|
||||
stub=opts.stub or False,
|
||||
base_url=opts.llm_base_url or None
|
||||
)
|
||||
chart["meta"]["company"] = comp["name"]
|
||||
|
||||
# Save the result immediately
|
||||
dump_json(chart, out_dir/f"org_chart_{handle}.json")
|
||||
|
||||
progress.update(task, advance=1, description=f"{comp['name']} ({len(persons)} ppl)")
|
||||
|
||||
# Create tasks for all companies
|
||||
tasks = [process_one(comp) for comp in to_process]
|
||||
|
||||
# Process in batches based on worker count
|
||||
semaphore = asyncio.Semaphore(workers)
|
||||
|
||||
async def bounded_process(coro):
|
||||
async with semaphore:
|
||||
return await coro
|
||||
|
||||
# Run with concurrency control
|
||||
await asyncio.gather(*(bounded_process(task) for task in tasks))
|
||||
|
||||
console.print("[bold]⇢[/] Flattening decision-makers CSV")
|
||||
export_decision_makers(out_dir, out_dir/"decision_makers.csv")
|
||||
|
||||
render_html(out_dir, template_dir=BASE_DIR/"templates")
|
||||
console.print(f"[bold green]✓[/] Stage-2 artefacts written to {out_dir}")
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
# CLI
|
||||
# ───────────────────────────────────────────────────────────────────────────────
|
||||
def build_arg_parser():
|
||||
p = argparse.ArgumentParser(description="Build graphs & visualisation from Stage-1 output")
|
||||
p.add_argument("--in", dest="in_dir", required=False, help="Stage-1 output dir", default=".")
|
||||
p.add_argument("--out", dest="out_dir", required=False, help="Destination dir", default=".")
|
||||
p.add_argument("--embed-model", default="all-MiniLM-L6-v2")
|
||||
p.add_argument("--top-k", type=int, default=10, help="Top-k neighbours per company")
|
||||
p.add_argument("--llm-provider", default="openai/gpt-4.1",
|
||||
help="LLM model to use in format 'provider/model_name' (e.g., 'anthropic/claude-3')")
|
||||
p.add_argument("--llm-api-key", help="API key for LLM provider (defaults to env vars)")
|
||||
p.add_argument("--llm-base-url", help="Base URL for LLM API endpoint")
|
||||
p.add_argument("--max-llm-tokens", type=int, default=8024)
|
||||
p.add_argument("--llm-temperature", type=float, default=1.0)
|
||||
p.add_argument("--stub", action="store_true", help="Skip OpenAI call and generate tiny fake org charts")
|
||||
p.add_argument("--workers", type=int, default=4, help="Number of parallel workers for LLM inference")
|
||||
return p
|
||||
|
||||
def main():
|
||||
dbg = dev_defaults()
|
||||
opts = dbg if True else build_arg_parser().parse_args()
|
||||
# opts = build_arg_parser().parse_args()
|
||||
asyncio.run(run(opts))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
9
docs/apps/linkdin/samples/companies.jsonl
Normal file
9
docs/apps/linkdin/samples/companies.jsonl
Normal file
@@ -0,0 +1,9 @@
|
||||
{"handle": "https://www.linkedin.com/company/healthpartnersng/", "name": "Health Partners HMO", "descriptor": "Hospitals and Health Care • Ikoyi, LAGOS", "about": "Healthpartners Ltd is a leading HMO in Nigeria providing affordablehealthinsuranceandhealthmanagementservices for companies and individuals in Nigeria. We have several individual and group plans that meets yourhealthmanagementneeds. Call us now at 0807-460-9165, 0807-714-0759 or email...", "followers": null, "people_url": "https://www.linkedin.com/company/healthpartnersng/people/", "captured_at": "2025-04-29T10:46:08Z"}
|
||||
{"handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "name": "Health & Insurance Management Services Organization", "descriptor": "Non-profit Organizations • Mbeya", "about": "Health&InsuranceManagementServices Organization (HIMSO) was established and registered in 2012 as a Non- Government Organization (NGO) with the aim...", "followers": 35, "people_url": "https://www.linkedin.com/company/health-insurance-management-services-organization/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
{"handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "name": "National Health Insurance Management Authority", "descriptor": "Insurance • Lusaka, Lusaka", "about": "The NationalHealthInsuranceManagementAuthority (NHIMA) is established pursuant to section 4 of the NationalHealthInsurance(NHI) Act No. 2 of 2018. The compulsory NationalHealthInsurancescheme seeks to provide for a sound and reliable healthcare financing for Zambian households and the entirehealthsector...", "followers": null, "people_url": "https://www.linkedin.com/company/national-health-insurance-management-authority/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
{"handle": "https://www.linkedin.com/company/health-alliance-plan/", "name": "Health Alliance Plan", "descriptor": "Hospitals and Health Care • Detroit, MI", "about": "...organizations to enhance the lives of those we touch. We offer six distincthealthinsurancelines: • Group Insured Commercial • Individual • Medicare • Medicaid • Self-Funded • Network Leasing HAP also provides: • Award-winning wellness programs • Community outreach • Digitalhealthtools • Diseasemanagement...", "followers": null, "people_url": "https://www.linkedin.com/company/health-alliance-plan/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
{"handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "name": "Insurance Recruiting Solutions", "descriptor": "Insurance • Waukee, Iowa", "about": "InsuranceRecruiting Solutions provides staffing and recruiting services exclusively to theinsuranceindustry. We are committed to providing highly personalized recruiting services, tailored to each candidate and employer. With years ofinsuranceindustry experience, we speak your language. As a leading national...", "followers": null, "people_url": "https://www.linkedin.com/company/insurance-recruiting-solutions/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
{"handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "name": "Health Plan of San Mateo (HPSM)", "descriptor": "Hospitals and Health Care • South San Francisco, California", "about": "TheHealthPlan of San Mateo (HPSM) is a local non-profithealthcare plan that offershealthcoverage and a provider network to San Mateo County's under-insured population. We currently serve more than 145,000 County residents.", "followers": null, "people_url": "https://www.linkedin.com/company/healthplanofsanmateo/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
{"handle": "https://www.linkedin.com/company/insurance-management-group_2/", "name": "Insurance Management Group", "descriptor": "Insurance • Marion, Indiana", "about": "InsuranceManagementGroup is an all-riskinsuranceagency with over 140 years of experience, specializing in Home, Auto, BusinessInsurance, Individual Life &Health, and Employee Benefits. We represent highly rated and financially soundinsurancecarriers, to ensure that our clients are getting the best coverage...", "followers": null, "people_url": "https://www.linkedin.com/company/insurance-management-group_2/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
{"handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "name": "CareCard Health Insurance Management Co", "descriptor": "Insurance • Damascus", "about": "CareCard offers Business Process Outsourcing (BPO) services toInsurance, Self Funded and Retireehealthplan market. CareCard provides operational outsourcing...", "followers": 187, "people_url": "https://www.linkedin.com/company/carecard-health-insurance-management-co/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
{"handle": "https://www.linkedin.com/company/healthcluster/", "name": "Health Cluster", "descriptor": "Technology, Information and Internet • Dubai", "about": "..., knowledge and interaction. The company has solutions and products inHealthTech, eHealth, DigitalHealth, Revenue CycleManagement– RCM Solutions, AI & ML, Internet...", "followers": null, "people_url": "https://www.linkedin.com/company/healthcluster/people/", "captured_at": "2025-04-29T13:15:04Z"}
|
||||
108
docs/apps/linkdin/samples/people.jsonl
Normal file
108
docs/apps/linkdin/samples/people.jsonl
Normal file
@@ -0,0 +1,108 @@
|
||||
{"profile_url": null, "name": "Yahya Ipuge", "headline": "Senior Health Specialist, Independent Consultant, Certified Board Director, Board Chair in NGO and Private Entities", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQFuqPObSyLPMQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1517757008397?e=1751500800&v=beta&t=zaHc2CY7AJ-eX1MCSvazp8ny37iBAu3YsyaZjwq6gB0", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-29T13:15:33Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Field officer at Health and Insurance Management Services Organization", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5103AQEVmdDwTIhsjQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1540989154156?e=1751500800&v=beta&t=7N0baJNfZ26dbrNNbv2055sbGlacQUwQu07wUTN0whs", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-29T13:15:33Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Medical Practitioner @ Health & Insurance | Master's Degree in Infection Control", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQHjMXy7dSmmLg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1725975429410?e=1751500800&v=beta&t=lDIL2KhDw471XYvtCrRfkHAnG3Q-npDJnwDdK0sYvpA", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-29T13:15:34Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "--", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-29T13:15:38Z"}
|
||||
{"profile_url": null, "name": "Fadhy Mtanga", "headline": "Executive Director at Health & Insurance Management Services Organization (HIMSO) Author | Creative Writer | Social Scientist", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQEloEreyg3qVQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1704391866585?e=1751500800&v=beta&t=86am-v3cjBPBldLTwgt8-AY-YbxFY6QZQzObwLTtMEA", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-29T13:15:38Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Business Administrator at Consultancy Business investments", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQEuKXJmknr2YA/profile-displayphoto-shrink_800_800/profile-displayphoto-shrink_800_800/0/1714545221728?e=1751500800&v=beta&t=zJG-rDZgYJJ0eROibf-Wag-v_JecCghwU3ul4TaH2Eg", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-29T13:15:48Z"}
|
||||
{"profile_url": null, "name": "Tamani Phiri", "headline": "Corporate Business Strategy | Thought Leadership | Corporate Governance", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQF4mFx8jY2n-w/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1730302954035?e=1751500800&v=beta&t=i4QIrHA6A9eLtKolwTRNhuoiaTad28sf5KHxAFuXG-w", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-29T13:15:48Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Member Service Assistant @ National Health Insurance Management Authority (NHIMA) | Clinical Officer | Health Insurance & Public Health | Claims Processing & Customer Support | Data & Policy Analyst", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQGob13KyxrB0g/profile-displayphoto-shrink_100_100/B4DZYCgreeHIAU-/0/1743798848889?e=1751500800&v=beta&t=uXxTsMLi5s7hr8FBEzVTDw7V3eJ85kpTaIC7i_5fM-Y", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-29T13:15:48Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Economist/ Development Analyst/ Planner/ Customer Care", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQFEc3EgfdpZeg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1727782989867?e=1751500800&v=beta&t=dWjKzSu5FDRgmxAVret9jQPhWF2VjcrnmEpR2LDMC1Q", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-29T13:15:48Z"}
|
||||
{"profile_url": null, "name": "Samantha Ngandwe", "headline": "Quality Assurance and Accreditation Officer at National Health Insurance Management Authority", "followers": 382, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQHyOjyoz7d95g/profile-displayphoto-shrink_100_100/B4DZYvvhP5GwAY-/0/1744557712084?e=1751500800&v=beta&t=DLYRpz20zmwUWx1UY1Dn-ykvgWBnwn8XHWLaDMf199M", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-29T13:15:48Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Dental Surgery Assistant at Health Promotion Board", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-29T13:16:11Z"}
|
||||
{"profile_url": null, "name": "Liz England Tucker", "headline": "Medical Performance Optimization", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQFY6yx360QunQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1713831102587?e=1751500800&v=beta&t=u-C8Ozpl_ITkTpdgt5QD-C5_Qt7MA0DagLRmiuGKngQ", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-29T13:16:11Z"}
|
||||
{"profile_url": null, "name": "Merrill Hausenfluck", "headline": "Chief Financial Officer", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQGKxDKRJM_BCg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1696292650180?e=1751500800&v=beta&t=NbUVC-QP-XL3frBpQcn3GtGrZ04Fl0xdko4V-mHxPag", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-29T13:16:11Z"}
|
||||
{"profile_url": null, "name": "Mike Treash", "headline": "Senior Vice President and Chief Operating Officer at Health Alliance Plan", "followers": 2000, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQH_c6tIq929gw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1720478900599?e=1751500800&v=beta&t=l9RLnLDKBBJjJQTsFMJMa_1MpWCKcV4AUa3dcjGnSXQ", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-29T13:16:11Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Manager at Health Alliance Plan", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-29T13:16:11Z"}
|
||||
{"profile_url": null, "name": "Scot Dickerson", "headline": "Insurance Industry Specialist, Insurance Recruiter, Talent Acquisition, Talent Sourcing, Hiring Consultant, Career Consultant, Staffing, Executive Recruiter at Insurance Recruiting Solutions #insurancejobs #insurance", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQGLFvtPPU3HEw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1724950672124?e=1751500800&v=beta&t=uT4SFSMF32O1d50Z0dbnd6zRRKdABHxSGlOZdxWdXBM", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-29T13:16:24Z"}
|
||||
{"profile_url": null, "name": "Steele Dickerson", "headline": "Insurance Recruiting Solutions", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQEyICWaE_PvXA/profile-displayphoto-shrink_100_100/B56ZQuDHyZH0Ac-/0/1735939358232?e=1751500800&v=beta&t=9FdnWHrjnPQ7LQ5FdwC7sY8sS6hm-R4zfWO5Vmwm46w", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-29T13:16:24Z"}
|
||||
{"profile_url": null, "name": "Madeline Judas", "headline": "Recruiting Operations & Business Development Specialist", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQG6xiTaJ71UiA/profile-displayphoto-shrink_100_100/B56ZU_N_jPHoAY-/0/1740522388021?e=1751500800&v=beta&t=CxvAsYgU0zelghZsRhUJOC26ILVovP3ZPn4nMnWkEJE", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-29T13:16:24Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "All Lines Claims Adjuster / General Lines Agent (Property & Casualty : Life, Accident, Health & HMO)", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQFTjkb7SxTWWg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1725920318474?e=1751500800&v=beta&t=BGEzQg1c2l8qxuy2iKJ896nElsiYcaWnhkf-mqc-KhY", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-29T13:16:24Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Clinical Pharmacy Manager at Health Plan of San Mateo (HPSM)", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQEPO0pZOxznoA/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1551565536585?e=1751500800&v=beta&t=qwMGzWX_Zefkciq8h2m9daLMflT0WoDr5F1R5pXvyM4", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-29T13:16:40Z"}
|
||||
{"profile_url": null, "name": "Tamana M.", "headline": "MPH Candidate at Brown University | Data Coordinator", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQEY3iDtFmpzlg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1714197678074?e=1751500800&v=beta&t=IsVT0uC7A-T-Tp22gZFDG9wiT7LMB5GmhccuI8f9c-I", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-29T13:16:40Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Program Manager", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-29T13:16:40Z"}
|
||||
{"profile_url": null, "name": "Mackenzie Baysinger Moniz, MSW", "headline": "Program Manager at Health Plan of San Mateo", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQHAd3A4zLyuWA/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1675716742150?e=1751500800&v=beta&t=ot3fMyJFnHwwNfKJiA_YxZp6MOK_iVGtSCUgVNq867g", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-29T13:16:40Z"}
|
||||
{"profile_url": null, "name": "John O.", "headline": "Healthcare Delivery Strategy Execution", "followers": null, "connection_degree": "· 3rd", "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-29T13:16:40Z"}
|
||||
{"profile_url": null, "name": "Daniel McQuilkin", "headline": "Senior Vice President", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQFkScOqwhxvfQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1521406683682?e=1751500800&v=beta&t=iohhak3lrV1gpmA6dnoCxTRJidskfgmZUXKbNQbkxjs", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-29T13:17:05Z"}
|
||||
{"profile_url": null, "name": "Tony Bonacuse", "headline": "Senior Vice President at Insurance Management Group", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQF_JJOFLjkZoQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1516269003018?e=1751500800&v=beta&t=0APZt5RNhvUj4IxsSdi7JO9KxezZzOH_WQCibn5Szgs", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-29T13:17:05Z"}
|
||||
{"profile_url": null, "name": "Mark Bilger", "headline": "Director - Sr. Vice President at Insurance Management Group", "followers": 1000, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQEzX5qUfqhd2g/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1663842785708?e=1751500800&v=beta&t=YyKXRQol0cDntoq8vbdxyaRvEFf0vWKNHPxk0cyWiG8", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-29T13:17:05Z"}
|
||||
{"profile_url": null, "name": "Adam Young, MBA", "headline": "Husband | Father | Traveler | Sports Fanatic | Food Enthusiast | Independent Insurance Professional", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQErWIq1AVyxKg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1601480475688?e=1751500800&v=beta&t=jK_mhX0PkDdG8WBZaipIIYRDm1PnWIuFR7sCKDhDi6s", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-29T13:17:05Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Senior Vice President at Insurance Management Group / Partner", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQH3dm30dXH82w/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1572228299104?e=1751500800&v=beta&t=iuBQYs4iLHJgRgjFbSA2YiNiAI8zDILqg-nVsLR9Qjk", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-29T13:17:05Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Doctor at CareCard Health Insurance Management Co", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-29T13:17:09Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Pharmacist", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQHyPi4Amu_Dkw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1640460490377?e=1751500800&v=beta&t=q7R_b7bD9CR-1-Dvu81WoEHN_ljHK16l6ioTIA0LN7Q", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-29T13:17:09Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "IT Manager at CareCard Health Insurance Management Co", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-29T13:17:09Z"}
|
||||
{"profile_url": null, "name": "Amal Shabani", "headline": "at carecard", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQFLzeP3yPkjgg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1519625412373?e=1751500800&v=beta&t=GULSoesSn83F_fYkkH_nPxWIjjs1d9Pucc3dUDNei6I", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-29T13:17:09Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "--", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-29T13:17:09Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Biologiste | Pharmaco-épidemiologie & Pharmaco-économie | Software Helath Care Management", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQHOPXrX5-oeug/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1663013895834?e=1751500800&v=beta&t=yE2RGp0rfhcJkjh_vdM0VwpaPUtoPewM80lTlr20OHU", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-29T13:17:14Z"}
|
||||
{"profile_url": null, "name": "Ruqaia Ali Alkhalifa", "headline": " RN,BSN, MSN,NE Database Officer for Scholarship Programs and Central Committee rapporteur at Al-Ahsa Health Cluster.", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQGfNujqDnuZDA/profile-displayphoto-shrink_100_100/B4EZOvsQThH0AU-/0/1733819436577?e=1751500800&v=beta&t=jleAVvhbg0H85tSi9TG96x0fqdkS1oytfaU02LHsFEI", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-29T13:17:14Z"}
|
||||
{"profile_url": null, "name": "Fahad Mohyuddin", "headline": "Healthcare AI Strategist | Digital Health | SaaS | Telehealth | HIS | EHR | IoT", "followers": 7000, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQFLnPh8fu-HHg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1647320077586?e=1751500800&v=beta&t=S__knVzEVrGZuyqwszCe_5V_kawbG5tejmmEe3fkMJE", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-29T13:17:14Z"}
|
||||
{"profile_url": null, "name": "Muhammad Moid Shams", "headline": "Azure DevOps | AWS Cloud Infrastructure| Freight Tech | Health Tech | HL7- NABIDH | HL7+ FHIR | KSA -NPHIES | FHIR - MOPH | HL7- Riayati | Freight Tech | Insure Tech | with Azure, Azure AI , PowerApps, D365 , M365", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQEzousRurY2Zg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1711283874675?e=1751500800&v=beta&t=ZheuoRIAkS_9M8WXafdwB1nJEuy-a5HEsrXlfOANx80", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-29T13:17:14Z"}
|
||||
{"profile_url": null, "name": "Muhammad Shahzaib (PMP® - SCRUM®)", "headline": "PMP-Certified Project Manager | Health Care & Web Solutions Expert | Customer Success & Operations Management Expert | Business Transformation Expert", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D35AQFyp7WcBZinYA/profile-framedphoto-shrink_100_100/profile-framedphoto-shrink_100_100/0/1730638721808?e=1746540000&v=beta&t=QoGze1AlotUfm3K9kMWG6ZGVHS3ADu38THVPlxlYUys", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-29T13:17:14Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Music Professional at Health Options Worldwide", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5103AQGF-Dp6v6nkGw/profile-displayphoto-shrink_800_800/profile-displayphoto-shrink_800_800/0/1585401654822?e=1751500800&v=beta&t=7yeO-dGz1p_B66cJVSlTSdAYJLMFFwxPIhwwcR8uWWo", "company_handle": "https://www.linkedin.com/company/health-options-worldwide/", "captured_at": "2025-04-29T13:17:17Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Trainer/instructor at Health Options Worldwide", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-options-worldwide/", "captured_at": "2025-04-29T13:17:17Z"}
|
||||
{"profile_url": null, "name": "Michael Akpoarebe-Isaac", "headline": "Chief Operating officer, Health Partners HMO", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQE7KNFaLMyqYg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1602714385413?e=1751500800&v=beta&t=In5GaREqoXtO3sPCx9ZJJBwIPY4008ii13RPRl0w0Fw", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-29T13:17:22Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "REGISTERED NURSE/CLAIMS SUPERVISOR/HEALTH EDUCATOR/ CASE MANAGER/ Lekki.", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4E35AQEEqf5i5pD76g/profile-framedphoto-shrink_100_100/profile-framedphoto-shrink_100_100/0/1724219552412?e=1746540000&v=beta&t=1PAfKEpQFL196LZHfY0wHAZ35TH0fRjku9ihSfDdOk4", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-29T13:17:22Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Data Analyst|Dedicated Retention Officer Boosting Customer Loyalty| Business Developer/ Event planner", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D35AQHIgeS1H7w65w/profile-framedphoto-shrink_100_100/B4DZV1QfcCGcAk-/0/1741429012517?e=1746540000&v=beta&t=NCIbW7MWY7Cy4YEC2xzLoX54-Lm5CNhorbuSQe0lZSk", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-29T13:17:22Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Former Group managing director at Health Partners Ltd", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQHPQPvIQbPQPg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1583328612508?e=1751500800&v=beta&t=LpynArccJCWrdWMSBvYLH4SI5G-xae7ECoWUUAl_CeU", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-29T13:17:22Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "HEAD, FINCON, @ HEALTH PARTNERS (HMO) LTD", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQG8XOvnazEibQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1518882054975?e=1751500800&v=beta&t=5gT6GAWGTqYfpvkjOk0ArvV73I_KspkWXgoG-VhoStg", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-29T13:17:22Z"}
|
||||
{"profile_url": null, "name": "Yahya Ipuge", "headline": "Senior Health Specialist, Independent Consultant, Certified Board Director, Board Chair in NGO and Private Entities", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQFuqPObSyLPMQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1517757008397?e=1751500800&v=beta&t=zaHc2CY7AJ-eX1MCSvazp8ny37iBAu3YsyaZjwq6gB0", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:36:39Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Field officer at Health and Insurance Management Services Organization", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5103AQEVmdDwTIhsjQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1540989154156?e=1751500800&v=beta&t=7N0baJNfZ26dbrNNbv2055sbGlacQUwQu07wUTN0whs", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:36:39Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Medical Practitioner @ Health & Insurance | Master's Degree in Infection Control", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQHjMXy7dSmmLg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1725975429410?e=1751500800&v=beta&t=lDIL2KhDw471XYvtCrRfkHAnG3Q-npDJnwDdK0sYvpA", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:36:39Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "--", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:36:39Z"}
|
||||
{"profile_url": null, "name": "Fadhy Mtanga", "headline": "Executive Director at Health & Insurance Management Services Organization (HIMSO) Author | Creative Writer | Social Scientist", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQEloEreyg3qVQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1704391866585?e=1751500800&v=beta&t=86am-v3cjBPBldLTwgt8-AY-YbxFY6QZQzObwLTtMEA", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:36:39Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Member Service Assistant @ National Health Insurance Management Authority (NHIMA) | Clinical Officer | Health Insurance & Public Health | Claims Processing & Customer Support | Data & Policy Analyst", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQGob13KyxrB0g/profile-displayphoto-shrink_100_100/B4DZYCgreeHIAU-/0/1743798848889?e=1751500800&v=beta&t=uXxTsMLi5s7hr8FBEzVTDw7V3eJ85kpTaIC7i_5fM-Y", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:36:45Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Business Administrator at Consultancy Business investments", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQEuKXJmknr2YA/profile-displayphoto-shrink_800_800/profile-displayphoto-shrink_800_800/0/1714545221728?e=1751500800&v=beta&t=zJG-rDZgYJJ0eROibf-Wag-v_JecCghwU3ul4TaH2Eg", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:36:45Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Economist/ Development Analyst/ Planner/ Customer Care", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQFEc3EgfdpZeg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1727782989867?e=1751500800&v=beta&t=dWjKzSu5FDRgmxAVret9jQPhWF2VjcrnmEpR2LDMC1Q", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:36:45Z"}
|
||||
{"profile_url": null, "name": "Tamani Phiri", "headline": "Corporate Business Strategy | Thought Leadership | Corporate Governance", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQF4mFx8jY2n-w/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1730302954035?e=1751500800&v=beta&t=i4QIrHA6A9eLtKolwTRNhuoiaTad28sf5KHxAFuXG-w", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:36:45Z"}
|
||||
{"profile_url": null, "name": "Samantha Ngandwe", "headline": "Quality Assurance and Accreditation Officer at National Health Insurance Management Authority", "followers": 382, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQHyOjyoz7d95g/profile-displayphoto-shrink_100_100/B4DZYvvhP5GwAY-/0/1744557712084?e=1751500800&v=beta&t=DLYRpz20zmwUWx1UY1Dn-ykvgWBnwn8XHWLaDMf199M", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:36:45Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Dental Surgery Assistant at Health Promotion Board", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:36:51Z"}
|
||||
{"profile_url": null, "name": "Merrill Hausenfluck", "headline": "Chief Financial Officer", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQGKxDKRJM_BCg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1696292650180?e=1751500800&v=beta&t=NbUVC-QP-XL3frBpQcn3GtGrZ04Fl0xdko4V-mHxPag", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:36:51Z"}
|
||||
{"profile_url": null, "name": "Mike Treash", "headline": "Senior Vice President and Chief Operating Officer at Health Alliance Plan", "followers": 2000, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQH_c6tIq929gw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1720478900599?e=1751500800&v=beta&t=l9RLnLDKBBJjJQTsFMJMa_1MpWCKcV4AUa3dcjGnSXQ", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:36:51Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Manager at Health Alliance Plan", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:36:51Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Manager, Government Programs at Health Alliance Plan", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQF473eFGZeIpQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1654455840818?e=1751500800&v=beta&t=FllKCznSi0Ndm75QYy2i5UDtflCojNGkVzRpoChPC8c", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:36:51Z"}
|
||||
{"profile_url": null, "name": "Steele Dickerson", "headline": "Insurance Recruiting Solutions", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQEyICWaE_PvXA/profile-displayphoto-shrink_100_100/B56ZQuDHyZH0Ac-/0/1735939358232?e=1751500800&v=beta&t=9FdnWHrjnPQ7LQ5FdwC7sY8sS6hm-R4zfWO5Vmwm46w", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-30T07:36:56Z"}
|
||||
{"profile_url": null, "name": "Yahya Ipuge", "headline": "Senior Health Specialist, Independent Consultant, Certified Board Director, Board Chair in NGO and Private Entities", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQFuqPObSyLPMQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1517757008397?e=1751500800&v=beta&t=zaHc2CY7AJ-eX1MCSvazp8ny37iBAu3YsyaZjwq6gB0", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:44:32+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Field officer at Health and Insurance Management Services Organization", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5103AQEVmdDwTIhsjQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1540989154156?e=1751500800&v=beta&t=7N0baJNfZ26dbrNNbv2055sbGlacQUwQu07wUTN0whs", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:44:32+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Medical Practitioner @ Health & Insurance | Master's Degree in Infection Control", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQHjMXy7dSmmLg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1725975429410?e=1751500800&v=beta&t=lDIL2KhDw471XYvtCrRfkHAnG3Q-npDJnwDdK0sYvpA", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:44:32+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "--", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:44:32+00:00Z"}
|
||||
{"profile_url": null, "name": "Fadhy Mtanga", "headline": "Executive Director at Health & Insurance Management Services Organization (HIMSO) Author | Creative Writer | Social Scientist", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQEloEreyg3qVQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1704391866585?e=1751500800&v=beta&t=86am-v3cjBPBldLTwgt8-AY-YbxFY6QZQzObwLTtMEA", "company_handle": "https://www.linkedin.com/company/health-insurance-management-services-organization/", "captured_at": "2025-04-30T07:44:32+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Member Service Assistant @ National Health Insurance Management Authority (NHIMA) | Clinical Officer | Health Insurance & Public Health | Claims Processing & Customer Support | Data & Policy Analyst", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQGob13KyxrB0g/profile-displayphoto-shrink_100_100/B4DZYCgreeHIAU-/0/1743798848889?e=1751500800&v=beta&t=uXxTsMLi5s7hr8FBEzVTDw7V3eJ85kpTaIC7i_5fM-Y", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:44:38+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Business Administrator at Consultancy Business investments", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQEuKXJmknr2YA/profile-displayphoto-shrink_800_800/profile-displayphoto-shrink_800_800/0/1714545221728?e=1751500800&v=beta&t=zJG-rDZgYJJ0eROibf-Wag-v_JecCghwU3ul4TaH2Eg", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:44:38+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Economist/ Development Analyst/ Planner/ Customer Care", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQFEc3EgfdpZeg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1727782989867?e=1751500800&v=beta&t=dWjKzSu5FDRgmxAVret9jQPhWF2VjcrnmEpR2LDMC1Q", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:44:38+00:00Z"}
|
||||
{"profile_url": null, "name": "Tamani Phiri", "headline": "Corporate Business Strategy | Thought Leadership | Corporate Governance", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQF4mFx8jY2n-w/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1730302954035?e=1751500800&v=beta&t=i4QIrHA6A9eLtKolwTRNhuoiaTad28sf5KHxAFuXG-w", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:44:38+00:00Z"}
|
||||
{"profile_url": null, "name": "Samantha Ngandwe", "headline": "Quality Assurance and Accreditation Officer at National Health Insurance Management Authority", "followers": 382, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQHyOjyoz7d95g/profile-displayphoto-shrink_100_100/B4DZYvvhP5GwAY-/0/1744557712084?e=1751500800&v=beta&t=DLYRpz20zmwUWx1UY1Dn-ykvgWBnwn8XHWLaDMf199M", "company_handle": "https://www.linkedin.com/company/national-health-insurance-management-authority/", "captured_at": "2025-04-30T07:44:38+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Dental Surgery Assistant at Health Promotion Board", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:44:43+00:00Z"}
|
||||
{"profile_url": null, "name": "Merrill Hausenfluck", "headline": "Chief Financial Officer", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQGKxDKRJM_BCg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1696292650180?e=1751500800&v=beta&t=NbUVC-QP-XL3frBpQcn3GtGrZ04Fl0xdko4V-mHxPag", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:44:43+00:00Z"}
|
||||
{"profile_url": null, "name": "Mike Treash", "headline": "Senior Vice President and Chief Operating Officer at Health Alliance Plan", "followers": 2000, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQH_c6tIq929gw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1720478900599?e=1751500800&v=beta&t=l9RLnLDKBBJjJQTsFMJMa_1MpWCKcV4AUa3dcjGnSXQ", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:44:43+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Manager at Health Alliance Plan", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:44:43+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Manager, Government Programs at Health Alliance Plan", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQF473eFGZeIpQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1654455840818?e=1751500800&v=beta&t=FllKCznSi0Ndm75QYy2i5UDtflCojNGkVzRpoChPC8c", "company_handle": "https://www.linkedin.com/company/health-alliance-plan/", "captured_at": "2025-04-30T07:44:43+00:00Z"}
|
||||
{"profile_url": null, "name": "Steele Dickerson", "headline": "Insurance Recruiting Solutions", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQEyICWaE_PvXA/profile-displayphoto-shrink_100_100/B56ZQuDHyZH0Ac-/0/1735939358232?e=1751500800&v=beta&t=9FdnWHrjnPQ7LQ5FdwC7sY8sS6hm-R4zfWO5Vmwm46w", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-30T07:44:48+00:00Z"}
|
||||
{"profile_url": null, "name": "Scot Dickerson", "headline": "Insurance Industry Specialist, Insurance Recruiter, Talent Acquisition, Talent Sourcing, Hiring Consultant, Career Consultant, Staffing, Executive Recruiter at Insurance Recruiting Solutions #insurancejobs #insurance", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQGLFvtPPU3HEw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1724950672124?e=1751500800&v=beta&t=uT4SFSMF32O1d50Z0dbnd6zRRKdABHxSGlOZdxWdXBM", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-30T07:44:48+00:00Z"}
|
||||
{"profile_url": null, "name": "Madeline Judas", "headline": "Recruiting Operations & Business Development Specialist", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQG6xiTaJ71UiA/profile-displayphoto-shrink_100_100/B56ZU_N_jPHoAY-/0/1740522388021?e=1751500800&v=beta&t=CxvAsYgU0zelghZsRhUJOC26ILVovP3ZPn4nMnWkEJE", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-30T07:44:48+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "All Lines Claims Adjuster / General Lines Agent (Property & Casualty : Life, Accident, Health & HMO)", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQFTjkb7SxTWWg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1725920318474?e=1751500800&v=beta&t=BGEzQg1c2l8qxuy2iKJ896nElsiYcaWnhkf-mqc-KhY", "company_handle": "https://www.linkedin.com/company/insurance-recruiting-solutions/", "captured_at": "2025-04-30T07:44:48+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Clinical Pharmacy Manager at Health Plan of San Mateo (HPSM)", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQEPO0pZOxznoA/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1551565536585?e=1751500800&v=beta&t=qwMGzWX_Zefkciq8h2m9daLMflT0WoDr5F1R5pXvyM4", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-30T07:44:54+00:00Z"}
|
||||
{"profile_url": null, "name": "Tamana M.", "headline": "MPH Candidate at Brown University | Data Coordinator", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQEY3iDtFmpzlg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1714197678074?e=1751500800&v=beta&t=IsVT0uC7A-T-Tp22gZFDG9wiT7LMB5GmhccuI8f9c-I", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-30T07:44:54+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Program Manager", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-30T07:44:54+00:00Z"}
|
||||
{"profile_url": null, "name": "Mackenzie Baysinger Moniz, MSW", "headline": "Program Manager at Health Plan of San Mateo", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D5603AQHAd3A4zLyuWA/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1675716742150?e=1751500800&v=beta&t=ot3fMyJFnHwwNfKJiA_YxZp6MOK_iVGtSCUgVNq867g", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-30T07:44:54+00:00Z"}
|
||||
{"profile_url": null, "name": "John O.", "headline": "Healthcare Delivery Strategy Execution", "followers": null, "connection_degree": "· 3rd", "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/healthplanofsanmateo/", "captured_at": "2025-04-30T07:44:54+00:00Z"}
|
||||
{"profile_url": null, "name": "Daniel McQuilkin", "headline": "Senior Vice President", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQFkScOqwhxvfQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1521406683682?e=1751500800&v=beta&t=iohhak3lrV1gpmA6dnoCxTRJidskfgmZUXKbNQbkxjs", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-30T07:44:59+00:00Z"}
|
||||
{"profile_url": null, "name": "Tony Bonacuse", "headline": "Senior Vice President at Insurance Management Group", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQF_JJOFLjkZoQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1516269003018?e=1751500800&v=beta&t=0APZt5RNhvUj4IxsSdi7JO9KxezZzOH_WQCibn5Szgs", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-30T07:44:59+00:00Z"}
|
||||
{"profile_url": null, "name": "Mark Bilger", "headline": "Director - Sr. Vice President at Insurance Management Group", "followers": 1000, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQEzX5qUfqhd2g/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1663842785708?e=1751500800&v=beta&t=YyKXRQol0cDntoq8vbdxyaRvEFf0vWKNHPxk0cyWiG8", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-30T07:44:59+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Senior Vice President at Insurance Management Group / Partner", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQH3dm30dXH82w/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1572228299104?e=1751500800&v=beta&t=iuBQYs4iLHJgRgjFbSA2YiNiAI8zDILqg-nVsLR9Qjk", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-30T07:44:59+00:00Z"}
|
||||
{"profile_url": null, "name": "Adam Young, MBA", "headline": "Husband | Father | Traveler | Sports Fanatic | Food Enthusiast | Independent Insurance Professional", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQErWIq1AVyxKg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1601480475688?e=1751500800&v=beta&t=jK_mhX0PkDdG8WBZaipIIYRDm1PnWIuFR7sCKDhDi6s", "company_handle": "https://www.linkedin.com/company/insurance-management-group_2/", "captured_at": "2025-04-30T07:44:59+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Doctor at CareCard Health Insurance Management Co", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-30T07:45:04+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Pharmacist", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQHyPi4Amu_Dkw/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1640460490377?e=1751500800&v=beta&t=q7R_b7bD9CR-1-Dvu81WoEHN_ljHK16l6ioTIA0LN7Q", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-30T07:45:04+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "IT Manager at CareCard Health Insurance Management Co", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-30T07:45:04+00:00Z"}
|
||||
{"profile_url": null, "name": "Amal Shabani", "headline": "at carecard", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C5603AQFLzeP3yPkjgg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1519625412373?e=1751500800&v=beta&t=GULSoesSn83F_fYkkH_nPxWIjjs1d9Pucc3dUDNei6I", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-30T07:45:04+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "--", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/carecard-health-insurance-management-co/", "captured_at": "2025-04-30T07:45:04+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Biologiste | Pharmaco-épidemiologie & Pharmaco-économie | Software Helath Care Management", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQHOPXrX5-oeug/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1663013895834?e=1751500800&v=beta&t=yE2RGp0rfhcJkjh_vdM0VwpaPUtoPewM80lTlr20OHU", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-30T07:45:09+00:00Z"}
|
||||
{"profile_url": null, "name": "Ruqaia Ali Alkhalifa", "headline": " RN,BSN, MSN,NE Database Officer for Scholarship Programs and Central Committee rapporteur at Al-Ahsa Health Cluster.", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4E03AQGfNujqDnuZDA/profile-displayphoto-shrink_100_100/B4EZOvsQThH0AU-/0/1733819436577?e=1751500800&v=beta&t=jleAVvhbg0H85tSi9TG96x0fqdkS1oytfaU02LHsFEI", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-30T07:45:09+00:00Z"}
|
||||
{"profile_url": null, "name": "Fahad Mohyuddin", "headline": "Healthcare AI Strategist | Digital Health | SaaS | Telehealth | HIS | EHR | IoT", "followers": 7000, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQFLnPh8fu-HHg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1647320077586?e=1751500800&v=beta&t=S__knVzEVrGZuyqwszCe_5V_kawbG5tejmmEe3fkMJE", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-30T07:45:09+00:00Z"}
|
||||
{"profile_url": null, "name": "Muhammad Moid Shams", "headline": "Azure DevOps | AWS Cloud Infrastructure| Freight Tech | Health Tech | HL7- NABIDH | HL7+ FHIR | KSA -NPHIES | FHIR - MOPH | HL7- Riayati | Freight Tech | Insure Tech | with Azure, Azure AI , PowerApps, D365 , M365", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D03AQEzousRurY2Zg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1711283874675?e=1751500800&v=beta&t=ZheuoRIAkS_9M8WXafdwB1nJEuy-a5HEsrXlfOANx80", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-30T07:45:09+00:00Z"}
|
||||
{"profile_url": null, "name": "Muhammad Shahzaib (PMP® - SCRUM®)", "headline": "PMP-Certified Project Manager | Health Care & Web Solutions Expert | Customer Success & Operations Management Expert | Business Transformation Expert", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/D4D35AQFyp7WcBZinYA/profile-framedphoto-shrink_100_100/profile-framedphoto-shrink_100_100/0/1730638721808?e=1746604800&v=beta&t=oewST3uZcxrt48z76eiJgTxl1EPoo63Cq-JcTwrFTbs", "company_handle": "https://www.linkedin.com/company/healthcluster/", "captured_at": "2025-04-30T07:45:09+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Music Professional at Health Options Worldwide", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C5103AQGF-Dp6v6nkGw/profile-displayphoto-shrink_800_800/profile-displayphoto-shrink_800_800/0/1585401654822?e=1751500800&v=beta&t=7yeO-dGz1p_B66cJVSlTSdAYJLMFFwxPIhwwcR8uWWo", "company_handle": "https://www.linkedin.com/company/health-options-worldwide/", "captured_at": "2025-04-30T07:45:13+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Trainer/instructor at Health Options Worldwide", "followers": null, "connection_degree": null, "avatar_url": "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", "company_handle": "https://www.linkedin.com/company/health-options-worldwide/", "captured_at": "2025-04-30T07:45:13+00:00Z"}
|
||||
{"profile_url": null, "name": "Michael Akpoarebe-Isaac", "headline": "Chief Operating officer, Health Partners HMO", "followers": null, "connection_degree": "· 3rd", "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQE7KNFaLMyqYg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1602714385413?e=1751500800&v=beta&t=In5GaREqoXtO3sPCx9ZJJBwIPY4008ii13RPRl0w0Fw", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-30T07:45:19+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "REGISTERED NURSE/CLAIMS SUPERVISOR/HEALTH EDUCATOR/ CASE MANAGER/ Lekki.", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4E35AQEEqf5i5pD76g/profile-framedphoto-shrink_100_100/profile-framedphoto-shrink_100_100/0/1724219552412?e=1746604800&v=beta&t=h0kqmp2KnpqQxsCCwyy7NpA8CAkSQ6qgbsZ0p0H7mXM", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-30T07:45:19+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Data Analyst|Dedicated Retention Officer Boosting Customer Loyalty| Business Developer/ Event planner", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/D4D35AQHIgeS1H7w65w/profile-framedphoto-shrink_100_100/B4DZV1QfcCGcAk-/0/1741429012517?e=1746604800&v=beta&t=zZi8WjnLpDrQD271jAId2mnfld_hO538QrN1-q2G4Zw", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-30T07:45:19+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "Former Group managing director at Health Partners Ltd", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4E03AQHPQPvIQbPQPg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1583328612508?e=1751500800&v=beta&t=LpynArccJCWrdWMSBvYLH4SI5G-xae7ECoWUUAl_CeU", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-30T07:45:19+00:00Z"}
|
||||
{"profile_url": null, "name": "LinkedIn Member", "headline": "HEAD, FINCON, @ HEALTH PARTNERS (HMO) LTD", "followers": null, "connection_degree": null, "avatar_url": "https://media.licdn.com/dms/image/v2/C4D03AQG8XOvnazEibQ/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1518882054975?e=1751500800&v=beta&t=5gT6GAWGTqYfpvkjOk0ArvV73I_KspkWXgoG-VhoStg", "company_handle": "https://www.linkedin.com/company/healthpartnersng/", "captured_at": "2025-04-30T07:45:19+00:00Z"}
|
||||
51
docs/apps/linkdin/schemas/company_card.json
Normal file
51
docs/apps/linkdin/schemas/company_card.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"name": "LinkedIn Company Search Result Card",
|
||||
"baseSelector": "div[data-chameleon-result-urn][data-view-name=\"search-entity-result-universal-template\"]",
|
||||
"baseFields": [
|
||||
{
|
||||
"name": "chameleon_result_urn",
|
||||
"type": "attribute",
|
||||
"attribute": "data-chameleon-result-urn"
|
||||
},
|
||||
{
|
||||
"name": "view_name",
|
||||
"type": "attribute",
|
||||
"attribute": "data-view-name"
|
||||
}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "handle",
|
||||
"selector": "div.mb1 div.display-flex span a[data-test-app-aware-link]",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
},
|
||||
{
|
||||
"name": "profile_image",
|
||||
"selector": "div.ivm-image-view-model img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"selector": "div.mb1 div.display-flex span a[data-test-app-aware-link]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "descriptor",
|
||||
"selector": "div.mb1 > div[class*=\"t-14 t-black\"]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "about",
|
||||
"selector": "p.entity-result__summary--2-lines",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "followers",
|
||||
"selector": "div.mb1 > div:nth-of-type(3)",
|
||||
"type": "regex",
|
||||
"pattern": "(\\d+[KM]?) followers"
|
||||
}
|
||||
]
|
||||
}
|
||||
41
docs/apps/linkdin/schemas/people_card.json
Normal file
41
docs/apps/linkdin/schemas/people_card.json
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"name": "LinkedIn People Profile Card",
|
||||
"baseSelector": "li.org-people-profile-card__profile-card-spacing",
|
||||
"baseFields": [],
|
||||
"fields": [
|
||||
{
|
||||
"name": "profile_url",
|
||||
"selector": "div.artdeco-entity-lockup__title a[data-test-app-aware-link]",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
},
|
||||
{
|
||||
"name": "avatar_url",
|
||||
"selector": "div.artdeco-entity-lockup__image img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"selector": "div.artdeco-entity-lockup__title a div.lt-line-clamp--single-line",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "headline",
|
||||
"selector": "div.artdeco-entity-lockup__subtitle div.lt-line-clamp--multi-line",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "followers",
|
||||
"selector": "span.text-align-center span.lt-line-clamp--multi-line",
|
||||
"type": "regex",
|
||||
"pattern": "(\\d+)"
|
||||
},
|
||||
{
|
||||
"name": "connection_degree",
|
||||
"selector": "span.artdeco-entity-lockup__degree",
|
||||
"type": "regex",
|
||||
"pattern": "(\\d+\\w+)"
|
||||
}
|
||||
]
|
||||
}
|
||||
138
docs/apps/linkdin/snippets/company.html
Normal file
138
docs/apps/linkdin/snippets/company.html
Normal file
@@ -0,0 +1,138 @@
|
||||
<li class="kZRArQqqhjjrHYceWaFbyEGWHRZbtqjTMawKA">
|
||||
<!----><!---->
|
||||
|
||||
|
||||
|
||||
<div class="xAuWirHJDUTuhkfOpmJApZWziplUyPIc" data-chameleon-result-urn="urn:li:company:2095237"
|
||||
data-view-name="search-entity-result-universal-template">
|
||||
|
||||
|
||||
|
||||
|
||||
<div class="linked-area flex-1
|
||||
cursor-pointer">
|
||||
|
||||
<div class="qMGLeKnJyQnibGOueKodvnfLgWpsuA">
|
||||
<div class="cBPGFfFovHsbNhBFmECDIsPgMWmtMozOUfIAbs">
|
||||
<div class="display-flex align-items-center">
|
||||
<!---->
|
||||
|
||||
<a class="sDWEFrcVubKuUVGggeBOYqLlgYgPbojOc scale-down " aria-hidden="true" tabindex="-1"
|
||||
href="https://www.linkedin.com/company/health-insurance/" data-test-app-aware-link="">
|
||||
|
||||
<div class="ivm-image-view-model ">
|
||||
|
||||
<div class="ivm-view-attr__img-wrapper
|
||||
|
||||
">
|
||||
<!---->
|
||||
<!----> <img width="48"
|
||||
src="https://media.licdn.com/dms/image/v2/C560BAQEXIoLSJbShlw/company-logo_100_100/company-logo_100_100/0/1662748332921/health_insurance_logo?e=1753920000&v=beta&t=p2ZNMYNsC9KSlp-sIqMYuc88avBTjKF4CqDobq1Xr2M"
|
||||
loading="lazy" height="48" alt="Health Insurance" id="ember28"
|
||||
class="ivm-view-attr__img--centered EntityPhoto-square-3 evi-image lazy-image ember-view">
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</a>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
class="BNxZPngZfeRnDrIUbICgBZvQjRvMAUnwCHuDrmRg yNRlrJOHDflDBnYPLbVmiAkUsCUZKUznmAc pt3 pb3 t-12 t-black--light">
|
||||
<div class="mb1">
|
||||
|
||||
<div class="t-roman t-sans">
|
||||
|
||||
|
||||
|
||||
<div class="display-flex">
|
||||
<span
|
||||
class="kmApjJVnFerynwITxTBSCqzqgoHwVfkiA HHGiVqODTCkszDUDWwPGPJGUPfAeRpygAKwwLePrQ ">
|
||||
<span class="OjTMoZLoiuspGuWWptwqxZRcMcHZBoSDxfig
|
||||
t-16">
|
||||
<a class="sDWEFrcVubKuUVGggeBOYqLlgYgPbojOc "
|
||||
href="https://www.linkedin.com/company/health-insurance/"
|
||||
data-test-app-aware-link="">
|
||||
<!---->Health Insurance<!---->
|
||||
<!----> </a>
|
||||
<!----> </span>
|
||||
</span>
|
||||
<!---->
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div class="kFTZPhxHBbvnnRxiRPmTxafKGLUNSiaeInag
|
||||
t-14 t-black t-normal">
|
||||
<!---->Insurance ⢠Cardiff, CA<!---->
|
||||
</div>
|
||||
|
||||
<div class="FlWUwyrEUZpkVCgzGTDwUHTLntfZNseavlY
|
||||
t-14 t-normal">
|
||||
<!---->3K followers<!---->
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
<!---->
|
||||
<p class="JBUEKeXhPyClEtYwdsASPYsZsCkTvUBqsDUs
|
||||
entity-result__summary--2-lines
|
||||
t-12 t-black--light
|
||||
">
|
||||
<!---->Your<span class="white-space-pre"> </span><strong><!---->health<!----></strong><span
|
||||
class="white-space-pre"> </span><!----><!----><strong><!---->insurance<!----></strong><span
|
||||
class="white-space-pre"> </span>expert for all stages of your life; Medicare, Individuals,
|
||||
Families, Small Groups, CoveredCA.<!---->
|
||||
</p>
|
||||
|
||||
<!---->
|
||||
</div>
|
||||
<div class="JZcKRppsWfaxfMaqtvfVwEeAtzNwryBOMdo yNRlrJOHDflDBnYPLbVmiAkUsCUZKUznmAc">
|
||||
<!---->
|
||||
|
||||
|
||||
<div>
|
||||
|
||||
|
||||
|
||||
|
||||
<button aria-label="Follow Health Insurance" id="ember49"
|
||||
class="artdeco-button artdeco-button--2 artdeco-button--secondary ember-view"
|
||||
type="button"><!---->
|
||||
<span class="artdeco-button__text">
|
||||
Follow
|
||||
</span></button>
|
||||
|
||||
|
||||
|
||||
<!---->
|
||||
<!---->
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</li>
|
||||
93
docs/apps/linkdin/snippets/people.html
Normal file
93
docs/apps/linkdin/snippets/people.html
Normal file
@@ -0,0 +1,93 @@
|
||||
<li class="grid grid__col--lg-8 block org-people-profile-card__profile-card-spacing">
|
||||
<div>
|
||||
|
||||
|
||||
<section class="artdeco-card full-width IxXiAcHfbZpayHVZUYdQwfYOkMbOirmr">
|
||||
<!---->
|
||||
|
||||
<img width="210" src="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
|
||||
ariarole="presentation" loading="lazy" height="210" alt="" id="ember102"
|
||||
class="evi-image lazy-image ghost-default ember-view org-people-profile-card__cover-photo org-people-profile-card__cover-photo--people">
|
||||
|
||||
<div class="org-people-profile-card__profile-info">
|
||||
<div id="ember103"
|
||||
class="artdeco-entity-lockup artdeco-entity-lockup--stacked-center artdeco-entity-lockup--size-7 ember-view">
|
||||
<div id="ember104"
|
||||
class="artdeco-entity-lockup__image artdeco-entity-lockup__image--type-circle ember-view"
|
||||
type="circle">
|
||||
|
||||
<a class="sDWEFrcVubKuUVGggeBOYqLlgYgPbojOc " id="org-people-profile-card__profile-image-0"
|
||||
href="https://www.linkedin.com/in/ericweberhcbd?miniProfileUrn=urn%3Ali%3Afs_miniProfile%3AACoAAABVh2MBFoyTaAxDqYQQcW8oGxVsqsKioHw"
|
||||
data-test-app-aware-link="">
|
||||
<img width="104"
|
||||
src="https://media.licdn.com/dms/image/v2/C4D03AQHNP9KoXtSrkg/profile-displayphoto-shrink_100_100/profile-displayphoto-shrink_100_100/0/1573501774845?e=1753920000&v=beta&t=JYsY56biGUmDzbYj2ORZMcd1dSm2IRWCA-IM3KNFLw8"
|
||||
loading="lazy" height="104" alt="Eric Weber" id="ember105"
|
||||
class="evi-image lazy-image ember-view">
|
||||
</a>
|
||||
|
||||
|
||||
</div>
|
||||
<div id="ember106" class="artdeco-entity-lockup__content ember-view">
|
||||
<div id="ember107" class="artdeco-entity-lockup__title ember-view">
|
||||
<a class="sDWEFrcVubKuUVGggeBOYqLlgYgPbojOc link-without-visited-state"
|
||||
aria-label="View Eric Weberâs profile"
|
||||
href="https://www.linkedin.com/in/ericweberhcbd?miniProfileUrn=urn%3Ali%3Afs_miniProfile%3AACoAAABVh2MBFoyTaAxDqYQQcW8oGxVsqsKioHw"
|
||||
data-test-app-aware-link="">
|
||||
<div id="ember109" class="ember-view lt-line-clamp lt-line-clamp--single-line rMKrzkehlCEvJWoQjDQJFaHmBFAYQLMGrNY
|
||||
t-black" style="">
|
||||
Eric Weber
|
||||
|
||||
<!---->
|
||||
</div>
|
||||
|
||||
</a>
|
||||
|
||||
</div>
|
||||
<div id="ember110" class="artdeco-entity-lockup__badge ember-view"> <span class="a11y-text">3rd+
|
||||
degree connection</span>
|
||||
<span class="artdeco-entity-lockup__degree" aria-hidden="true">
|
||||
· 3rd
|
||||
</span>
|
||||
<!----><!---->
|
||||
</div>
|
||||
<div id="ember111" class="artdeco-entity-lockup__subtitle ember-view">
|
||||
<div class="t-14 t-black--light t-normal">
|
||||
<div id="ember113" class="ember-view lt-line-clamp lt-line-clamp--multi-line"
|
||||
style="-webkit-line-clamp: 2">
|
||||
HIPN Executive Editor | Healthcare BizDev CEO â Health Insurance Plan News.
|
||||
|
||||
<!---->
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div id="ember114" class="artdeco-entity-lockup__caption ember-view"></div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<span class="text-align-center">
|
||||
<span id="ember116"
|
||||
class="ember-view lt-line-clamp lt-line-clamp--multi-line t-12 t-black--light mt2"
|
||||
style="-webkit-line-clamp: 3">
|
||||
10K followers
|
||||
|
||||
<!----> </span>
|
||||
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<footer class="ph3 pb3">
|
||||
<button aria-label="Follow Eric Weber" id="ember117"
|
||||
class="artdeco-button artdeco-button--2 artdeco-button--secondary ember-view full-width"
|
||||
type="button"><!---->
|
||||
<span class="artdeco-button__text">
|
||||
Follow
|
||||
</span></button>
|
||||
</footer>
|
||||
|
||||
</section>
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
</li>
|
||||
50
docs/apps/linkdin/templates/ai.js
Normal file
50
docs/apps/linkdin/templates/ai.js
Normal file
@@ -0,0 +1,50 @@
|
||||
// ==== File: ai.js ====
|
||||
|
||||
class ApiHandler {
|
||||
constructor(apiKey = null) {
|
||||
this.apiKey = apiKey || localStorage.getItem("openai_api_key") || "";
|
||||
console.log("ApiHandler ready");
|
||||
}
|
||||
|
||||
setApiKey(k) {
|
||||
this.apiKey = k.trim();
|
||||
if (this.apiKey) localStorage.setItem("openai_api_key", this.apiKey);
|
||||
}
|
||||
|
||||
async *chatStream(messages, {model = "gpt-4o", temperature = 0.7} = {}) {
|
||||
if (!this.apiKey) throw new Error("OpenAI API key missing");
|
||||
const payload = {model, messages, stream: true, max_tokens: 1024};
|
||||
const controller = new AbortController();
|
||||
|
||||
const res = await fetch("https://api.openai.com/v1/chat/completions", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${this.apiKey}`,
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
signal: controller.signal,
|
||||
});
|
||||
if (!res.ok) throw new Error(`OpenAI: ${res.statusText}`);
|
||||
const reader = res.body.getReader();
|
||||
const dec = new TextDecoder();
|
||||
|
||||
let buf = "";
|
||||
while (true) {
|
||||
const {done, value} = await reader.read();
|
||||
if (done) break;
|
||||
buf += dec.decode(value, {stream: true});
|
||||
for (const line of buf.split("\n")) {
|
||||
if (!line.startsWith("data: ")) continue;
|
||||
if (line.includes("[DONE]")) return;
|
||||
const json = JSON.parse(line.slice(6));
|
||||
const delta = json.choices?.[0]?.delta?.content;
|
||||
if (delta) yield delta;
|
||||
}
|
||||
buf = buf.endsWith("\n") ? "" : buf; // keep partial line
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
window.API = new ApiHandler();
|
||||
|
||||
1168
docs/apps/linkdin/templates/graph_view_template.html
Normal file
1168
docs/apps/linkdin/templates/graph_view_template.html
Normal file
File diff suppressed because it is too large
Load Diff
343
docs/blog/release-v0.7.0.md
Normal file
343
docs/blog/release-v0.7.0.md
Normal file
@@ -0,0 +1,343 @@
|
||||
# 🚀 Crawl4AI v0.7.0: The Adaptive Intelligence Update
|
||||
|
||||
*January 28, 2025 • 10 min read*
|
||||
|
||||
---
|
||||
|
||||
Today I'm releasing Crawl4AI v0.7.0—the Adaptive Intelligence Update. This release introduces fundamental improvements in how Crawl4AI handles modern web complexity through adaptive learning, intelligent content discovery, and advanced extraction capabilities.
|
||||
|
||||
## 🎯 What's New at a Glance
|
||||
|
||||
- **Adaptive Crawling**: Your crawler now learns and adapts to website patterns
|
||||
- **Virtual Scroll Support**: Complete content extraction from infinite scroll pages
|
||||
- **Link Preview with Intelligent Scoring**: Intelligent link analysis and prioritization
|
||||
- **Async URL Seeder**: Discover thousands of URLs in seconds with intelligent filtering
|
||||
- **Performance Optimizations**: Significant speed and memory improvements
|
||||
|
||||
## 🧠 Adaptive Crawling: Intelligence Through Pattern Learning
|
||||
|
||||
**The Problem:** Websites change. Class names shift. IDs disappear. Your carefully crafted selectors break at 3 AM, and you wake up to empty datasets and angry stakeholders.
|
||||
|
||||
**My Solution:** I implemented an adaptive learning system that observes patterns, builds confidence scores, and adjusts extraction strategies on the fly. It's like having a junior developer who gets better at their job with every page they scrape.
|
||||
|
||||
### Technical Deep-Dive
|
||||
|
||||
The Adaptive Crawler maintains a persistent state for each domain, tracking:
|
||||
- Pattern success rates
|
||||
- Selector stability over time
|
||||
- Content structure variations
|
||||
- Extraction confidence scores
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
|
||||
import asyncio
|
||||
|
||||
async def main():
|
||||
|
||||
# Configure adaptive crawler
|
||||
config = AdaptiveConfig(
|
||||
strategy="statistical", # or "embedding" for semantic understanding
|
||||
max_pages=10,
|
||||
confidence_threshold=0.7, # Stop at 70% confidence
|
||||
top_k_links=3, # Follow top 3 links per page
|
||||
min_gain_threshold=0.05 # Need 5% information gain to continue
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
print("Starting adaptive crawl about Python decorators...")
|
||||
result = await adaptive.digest(
|
||||
start_url="https://docs.python.org/3/glossary.html",
|
||||
query="python decorators functions wrapping"
|
||||
)
|
||||
|
||||
print(f"\n✅ Crawling Complete!")
|
||||
print(f"• Confidence Level: {adaptive.confidence:.0%}")
|
||||
print(f"• Pages Crawled: {len(result.crawled_urls)}")
|
||||
print(f"• Knowledge Base: {len(adaptive.state.knowledge_base)} documents")
|
||||
|
||||
# Get most relevant content
|
||||
relevant = adaptive.get_relevant_content(top_k=3)
|
||||
print(f"\nMost Relevant Pages:")
|
||||
for i, page in enumerate(relevant, 1):
|
||||
print(f"{i}. {page['url']} (relevance: {page['score']:.2%})")
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
**Expected Real-World Impact:**
|
||||
- **News Aggregation**: Maintain 95%+ extraction accuracy even as news sites update their templates
|
||||
- **E-commerce Monitoring**: Track product changes across hundreds of stores without constant maintenance
|
||||
- **Research Data Collection**: Build robust academic datasets that survive website redesigns
|
||||
- **Reduced Maintenance**: Cut selector update time by 80% for frequently-changing sites
|
||||
|
||||
## 🌊 Virtual Scroll: Complete Content Capture
|
||||
|
||||
**The Problem:** Modern web apps only render what's visible. Scroll down, new content appears, old content vanishes into the void. Traditional crawlers capture that first viewport and miss 90% of the content. It's like reading only the first page of every book.
|
||||
|
||||
**My Solution:** I built Virtual Scroll support that mimics human browsing behavior, capturing content as it loads and preserving it before the browser's garbage collector strikes.
|
||||
|
||||
### Implementation Details
|
||||
|
||||
```python
|
||||
from crawl4ai import VirtualScrollConfig
|
||||
|
||||
# For social media feeds (Twitter/X style)
|
||||
twitter_config = VirtualScrollConfig(
|
||||
container_selector="[data-testid='primaryColumn']",
|
||||
scroll_count=20, # Number of scrolls
|
||||
scroll_by="container_height", # Smart scrolling by container size
|
||||
wait_after_scroll=1.0 # Let content load
|
||||
)
|
||||
|
||||
# For e-commerce product grids (Instagram style)
|
||||
grid_config = VirtualScrollConfig(
|
||||
container_selector="main .product-grid",
|
||||
scroll_count=30,
|
||||
scroll_by=800, # Fixed pixel scrolling
|
||||
wait_after_scroll=1.5 # Images need time
|
||||
)
|
||||
|
||||
# For news feeds with lazy loading
|
||||
news_config = VirtualScrollConfig(
|
||||
container_selector=".article-feed",
|
||||
scroll_count=50,
|
||||
scroll_by="page_height", # Viewport-based scrolling
|
||||
wait_after_scroll=0.5 # Wait for content to load
|
||||
)
|
||||
|
||||
# Use it in your crawl
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
"https://twitter.com/trending",
|
||||
config=CrawlerRunConfig(
|
||||
virtual_scroll_config=twitter_config,
|
||||
# Combine with other features
|
||||
extraction_strategy=JsonCssExtractionStrategy({
|
||||
"tweets": {
|
||||
"selector": "[data-testid='tweet']",
|
||||
"fields": {
|
||||
"text": {"selector": "[data-testid='tweetText']", "type": "text"},
|
||||
"likes": {"selector": "[data-testid='like']", "type": "text"}
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
print(f"Captured {len(result.extracted_content['tweets'])} tweets")
|
||||
```
|
||||
|
||||
**Key Capabilities:**
|
||||
- **DOM Recycling Awareness**: Detects and handles virtual DOM element recycling
|
||||
- **Smart Scroll Physics**: Three modes - container height, page height, or fixed pixels
|
||||
- **Content Preservation**: Captures content before it's destroyed
|
||||
- **Intelligent Stopping**: Stops when no new content appears
|
||||
- **Memory Efficient**: Streams content instead of holding everything in memory
|
||||
|
||||
**Expected Real-World Impact:**
|
||||
- **Social Media Analysis**: Capture entire Twitter threads with hundreds of replies, not just top 10
|
||||
- **E-commerce Scraping**: Extract 500+ products from infinite scroll catalogs vs. 20-50 with traditional methods
|
||||
- **News Aggregation**: Get all articles from modern news sites, not just above-the-fold content
|
||||
- **Research Applications**: Complete data extraction from academic databases using virtual pagination
|
||||
|
||||
## 🔗 Link Preview: Intelligent Link Analysis and Scoring
|
||||
|
||||
**The Problem:** You crawl a page and get 200 links. Which ones matter? Which lead to the content you actually want? Traditional crawlers force you to follow everything or build complex filters.
|
||||
|
||||
**My Solution:** I implemented a three-layer scoring system that analyzes links like a human would—considering their position, context, and relevance to your goals.
|
||||
|
||||
### Intelligent Link Analysis and Scoring
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import CrawlerRunConfig, CacheMode, AsyncWebCrawler
|
||||
from crawl4ai.adaptive_crawler import LinkPreviewConfig
|
||||
|
||||
async def main():
|
||||
# Configure intelligent link analysis
|
||||
link_config = LinkPreviewConfig(
|
||||
include_internal=True,
|
||||
include_external=False,
|
||||
max_links=10,
|
||||
concurrency=5,
|
||||
query="python tutorial", # For contextual scoring
|
||||
score_threshold=0.3,
|
||||
verbose=True
|
||||
)
|
||||
# Use in your crawl
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
"https://www.geeksforgeeks.org/",
|
||||
config=CrawlerRunConfig(
|
||||
link_preview_config=link_config,
|
||||
score_links=True, # Enable intrinsic scoring
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
)
|
||||
|
||||
# Access scored and sorted links
|
||||
if result.success and result.links:
|
||||
for link in result.links.get("internal", []):
|
||||
text = link.get('text', 'No text')[:40]
|
||||
print(
|
||||
text,
|
||||
f"{link.get('intrinsic_score', 0):.1f}/10" if link.get('intrinsic_score') is not None else "0.0/10",
|
||||
f"{link.get('contextual_score', 0):.2f}/1" if link.get('contextual_score') is not None else "0.00/1",
|
||||
f"{link.get('total_score', 0):.3f}" if link.get('total_score') is not None else "0.000"
|
||||
)
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
**Scoring Components:**
|
||||
|
||||
1. **Intrinsic Score**: Based on link quality indicators
|
||||
- Position on page (navigation, content, footer)
|
||||
- Link attributes (rel, title, class names)
|
||||
- Anchor text quality and length
|
||||
- URL structure and depth
|
||||
|
||||
2. **Contextual Score**: Relevance to your query using BM25 algorithm
|
||||
- Keyword matching in link text and title
|
||||
- Meta description analysis
|
||||
- Content preview scoring
|
||||
|
||||
3. **Total Score**: Combined score for final ranking
|
||||
|
||||
**Expected Real-World Impact:**
|
||||
- **Research Efficiency**: Find relevant papers 10x faster by following only high-score links
|
||||
- **Competitive Analysis**: Automatically identify important pages on competitor sites
|
||||
- **Content Discovery**: Build topic-focused crawlers that stay on track
|
||||
- **SEO Audits**: Identify and prioritize high-value internal linking opportunities
|
||||
|
||||
## 🎣 Async URL Seeder: Automated URL Discovery at Scale
|
||||
|
||||
**The Problem:** You want to crawl an entire domain but only have the homepage. Or worse, you want specific content types across thousands of pages. Manual URL discovery? That's a job for machines, not humans.
|
||||
|
||||
**My Solution:** I built Async URL Seeder—a turbocharged URL discovery engine that combines multiple sources with intelligent filtering and relevance scoring.
|
||||
|
||||
### Technical Architecture
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncUrlSeeder, SeedingConfig
|
||||
|
||||
async def main():
|
||||
async with AsyncUrlSeeder() as seeder:
|
||||
# Discover Python tutorial URLs
|
||||
config = SeedingConfig(
|
||||
source="sitemap", # Use sitemap
|
||||
pattern="*python*", # URL pattern filter
|
||||
extract_head=True, # Get metadata
|
||||
query="python tutorial", # For relevance scoring
|
||||
scoring_method="bm25",
|
||||
score_threshold=0.2,
|
||||
max_urls=10
|
||||
)
|
||||
|
||||
print("Discovering Python async tutorial URLs...")
|
||||
urls = await seeder.urls("https://www.geeksforgeeks.org/", config)
|
||||
|
||||
print(f"\n✅ Found {len(urls)} relevant URLs:")
|
||||
for i, url_info in enumerate(urls[:5], 1):
|
||||
print(f"\n{i}. {url_info['url']}")
|
||||
if url_info.get('relevance_score'):
|
||||
print(f" Relevance: {url_info['relevance_score']:.3f}")
|
||||
if url_info.get('head_data', {}).get('title'):
|
||||
print(f" Title: {url_info['head_data']['title'][:60]}...")
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
**Discovery Methods:**
|
||||
- **Sitemap Mining**: Parses robots.txt and all linked sitemaps
|
||||
- **Common Crawl**: Queries the Common Crawl index for historical URLs
|
||||
- **Intelligent Crawling**: Follows links with smart depth control
|
||||
- **Pattern Analysis**: Learns URL structures and generates variations
|
||||
|
||||
**Expected Real-World Impact:**
|
||||
- **Migration Projects**: Discover 10,000+ URLs from legacy sites in under 60 seconds
|
||||
- **Market Research**: Map entire competitor ecosystems automatically
|
||||
- **Academic Research**: Build comprehensive datasets without manual URL collection
|
||||
- **SEO Audits**: Find every indexable page with content scoring
|
||||
- **Content Archival**: Ensure no content is left behind during site migrations
|
||||
|
||||
## ⚡ Performance Optimizations
|
||||
|
||||
This release includes significant performance improvements through optimized resource handling, better concurrency management, and reduced memory footprint.
|
||||
|
||||
### What We Optimized
|
||||
|
||||
```python
|
||||
# Optimized crawling with v0.7.0 improvements
|
||||
results = []
|
||||
for url in urls:
|
||||
result = await crawler.arun(
|
||||
url,
|
||||
config=CrawlerRunConfig(
|
||||
# Performance optimizations
|
||||
wait_until="domcontentloaded", # Faster than networkidle
|
||||
cache_mode=CacheMode.ENABLED # Enable caching
|
||||
)
|
||||
)
|
||||
results.append(result)
|
||||
```
|
||||
|
||||
**Performance Gains:**
|
||||
- **Startup Time**: 70% faster browser initialization
|
||||
- **Page Loading**: 40% reduction with smart resource blocking
|
||||
- **Extraction**: 3x faster with compiled CSS selectors
|
||||
- **Memory Usage**: 60% reduction with streaming processing
|
||||
- **Concurrent Crawls**: Handle 5x more parallel requests
|
||||
|
||||
|
||||
## 🔧 Important Changes
|
||||
|
||||
### Breaking Changes
|
||||
- `link_extractor` renamed to `link_preview` (better reflects functionality)
|
||||
- Minimum Python version now 3.9
|
||||
- `CrawlerConfig` split into `CrawlerRunConfig` and `BrowserConfig`
|
||||
|
||||
### Migration Guide
|
||||
```python
|
||||
# Old (v0.6.x)
|
||||
from crawl4ai import CrawlerConfig
|
||||
config = CrawlerConfig(timeout=30000)
|
||||
|
||||
# New (v0.7.0)
|
||||
from crawl4ai import CrawlerRunConfig, BrowserConfig
|
||||
browser_config = BrowserConfig(timeout=30000)
|
||||
run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
```
|
||||
|
||||
## 🤖 Coming Soon: Intelligent Web Automation
|
||||
|
||||
I'm currently working on bringing advanced automation capabilities to Crawl4AI. This includes:
|
||||
|
||||
- **Crawl Agents**: Autonomous crawlers that understand your goals and adapt their strategies
|
||||
- **Auto JS Generation**: Automatic JavaScript code generation for complex interactions
|
||||
- **Smart Form Handling**: Intelligent form detection and filling
|
||||
- **Context-Aware Actions**: Crawlers that understand page context and make decisions
|
||||
|
||||
These features are under active development and will revolutionize how we approach web automation. Stay tuned!
|
||||
|
||||
## 🚀 Get Started
|
||||
|
||||
```bash
|
||||
pip install crawl4ai==0.7.0
|
||||
```
|
||||
|
||||
Check out the [updated documentation](https://docs.crawl4ai.com).
|
||||
|
||||
Questions? Issues? I'm always listening:
|
||||
- GitHub: [github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)
|
||||
- Discord: [discord.gg/crawl4ai](https://discord.gg/jP8KfhDhyN)
|
||||
- Twitter: [@unclecode](https://x.com/unclecode)
|
||||
|
||||
Happy crawling! 🕷️
|
||||
|
||||
---
|
||||
|
||||
*P.S. If you're using Crawl4AI in production, I'd love to hear about it. Your use cases inspire the next features.*
|
||||
43
docs/blog/release-v0.7.1.md
Normal file
43
docs/blog/release-v0.7.1.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# 🛠️ Crawl4AI v0.7.1: Minor Cleanup Update
|
||||
|
||||
*July 17, 2025 • 2 min read*
|
||||
|
||||
---
|
||||
|
||||
A small maintenance release that removes unused code and improves documentation.
|
||||
|
||||
## 🎯 What's Changed
|
||||
|
||||
- **Removed unused StealthConfig** from `crawl4ai/browser_manager.py`
|
||||
- **Updated documentation** with better examples and parameter explanations
|
||||
- **Fixed virtual scroll configuration** examples in docs
|
||||
|
||||
## 🧹 Code Cleanup
|
||||
|
||||
Removed unused `StealthConfig` import and configuration that wasn't being used anywhere in the codebase. The project uses its own custom stealth implementation through JavaScript injection instead.
|
||||
|
||||
```python
|
||||
# Removed unused code:
|
||||
from playwright_stealth import StealthConfig
|
||||
stealth_config = StealthConfig(...) # This was never used
|
||||
```
|
||||
|
||||
## 📖 Documentation Updates
|
||||
|
||||
- Fixed adaptive crawling parameter examples
|
||||
- Updated session management documentation
|
||||
- Corrected virtual scroll configuration examples
|
||||
|
||||
## 🚀 Installation
|
||||
|
||||
```bash
|
||||
pip install crawl4ai==0.7.1
|
||||
```
|
||||
|
||||
No breaking changes - upgrade directly from v0.7.0.
|
||||
|
||||
---
|
||||
|
||||
Questions? Issues?
|
||||
- GitHub: [github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)
|
||||
- Discord: [discord.gg/crawl4ai](https://discord.gg/jP8KfhDhyN)
|
||||
51
docs/codebase/browser.md
Normal file
51
docs/codebase/browser.md
Normal file
@@ -0,0 +1,51 @@
|
||||
### browser_manager.py
|
||||
|
||||
| Function | What it does |
|
||||
|---|---|
|
||||
| `ManagedBrowser.build_browser_flags` | Returns baseline Chromium CLI flags, disables GPU and sandbox, plugs locale, timezone, stealth tweaks, and any extras from `BrowserConfig`. |
|
||||
| `ManagedBrowser.__init__` | Stores config and logger, creates temp dir, preps internal state. |
|
||||
| `ManagedBrowser.start` | Spawns or connects to the Chromium process, returns its CDP endpoint plus the `subprocess.Popen` handle. |
|
||||
| `ManagedBrowser._initial_startup_check` | Pings the CDP endpoint once to be sure the browser is alive, raises if not. |
|
||||
| `ManagedBrowser._monitor_browser_process` | Async-loops on the subprocess, logs exits or crashes, restarts if policy allows. |
|
||||
| `ManagedBrowser._get_browser_path_WIP` | Old helper that maps OS + browser type to an executable path. |
|
||||
| `ManagedBrowser._get_browser_path` | Current helper, checks env vars, Playwright cache, and OS defaults for the real executable. |
|
||||
| `ManagedBrowser._get_browser_args` | Builds the final CLI arg list by merging user flags, stealth flags, and defaults. |
|
||||
| `ManagedBrowser.cleanup` | Terminates the browser, stops monitors, deletes the temp dir. |
|
||||
| `ManagedBrowser.create_profile` | Opens a visible browser so a human can log in, then zips the resulting user-data-dir to `~/.crawl4ai/profiles/<name>`. |
|
||||
| `ManagedBrowser.list_profiles` | Thin wrapper, now forwarded to `BrowserProfiler.list_profiles()`. |
|
||||
| `ManagedBrowser.delete_profile` | Thin wrapper, now forwarded to `BrowserProfiler.delete_profile()`. |
|
||||
| `BrowserManager.__init__` | Holds the global Playwright instance, browser handle, config signature cache, session map, and logger. |
|
||||
| `BrowserManager.start` | Boots the underlying `ManagedBrowser`, then spins up the default Playwright browser context with stealth patches. |
|
||||
| `BrowserManager._build_browser_args` | Translates `CrawlerRunConfig` (proxy, UA, timezone, headless flag, etc.) into Playwright `launch_args`. |
|
||||
| `BrowserManager.setup_context` | Applies locale, geolocation, permissions, cookies, and UA overrides on a fresh context. |
|
||||
| `BrowserManager.create_browser_context` | Internal helper that actually calls `browser.new_context(**options)` after running `setup_context`. |
|
||||
| `BrowserManager._make_config_signature` | Hashes the non-ephemeral parts of `CrawlerRunConfig` so contexts can be reused safely. |
|
||||
| `BrowserManager.get_page` | Returns a ready `Page` for a given session id, reusing an existing one or creating a new context/page, injects helper scripts, updates `last_used`. |
|
||||
| `BrowserManager.kill_session` | Force-closes a context/page for a session and removes it from the session map. |
|
||||
| `BrowserManager._cleanup_expired_sessions` | Periodic sweep that drops sessions idle longer than `ttl_seconds`. |
|
||||
| `BrowserManager.close` | Gracefully shuts down all contexts, the browser, Playwright, and background tasks. |
|
||||
|
||||
---
|
||||
|
||||
### browser_profiler.py
|
||||
|
||||
| Function | What it does |
|
||||
|---|---|
|
||||
| `BrowserProfiler.__init__` | Sets up profile folder paths, async logger, and signal handlers. |
|
||||
| `BrowserProfiler.create_profile` | Launches a visible browser with a new user-data-dir for manual login, on exit compresses and stores it as a named profile. |
|
||||
| `BrowserProfiler.cleanup_handler` | General SIGTERM/SIGINT cleanup wrapper that kills child processes. |
|
||||
| `BrowserProfiler.sigint_handler` | Handles Ctrl-C during an interactive session, makes sure the browser shuts down cleanly. |
|
||||
| `BrowserProfiler.listen_for_quit_command` | Async REPL that exits when the user types `q`. |
|
||||
| `BrowserProfiler.list_profiles` | Enumerates `~/.crawl4ai/profiles`, prints profile name, browser type, size, and last modified. |
|
||||
| `BrowserProfiler.get_profile_path` | Returns the absolute path of a profile given its name, or `None` if missing. |
|
||||
| `BrowserProfiler.delete_profile` | Removes a profile folder or a direct path from disk, with optional confirmation prompt. |
|
||||
| `BrowserProfiler.interactive_manager` | Text UI loop for listing, creating, deleting, or launching profiles. |
|
||||
| `BrowserProfiler.launch_standalone_browser` | Starts a non-headless Chromium with remote debugging enabled and keeps it alive for manual tests. |
|
||||
| `BrowserProfiler.get_cdp_json` | Pulls `/json/version` from a CDP endpoint and returns the parsed JSON. |
|
||||
| `BrowserProfiler.launch_builtin_browser` | Spawns a headless Chromium in the background, saves `{wsEndpoint, pid, started_at}` to `~/.crawl4ai/builtin_browser.json`. |
|
||||
| `BrowserProfiler.get_builtin_browser_info` | Reads that JSON file, verifies the PID, and returns browser status info. |
|
||||
| `BrowserProfiler._is_browser_running` | Cross-platform helper that checks if a PID is still alive. |
|
||||
| `BrowserProfiler.kill_builtin_browser` | Terminates the background builtin browser and removes its status file. |
|
||||
| `BrowserProfiler.get_builtin_browser_status` | Returns `{running: bool, wsEndpoint, pid, started_at}` for quick health checks. |
|
||||
|
||||
Let me know what you want to tweak or dive into next.
|
||||
40
docs/codebase/cli.md
Normal file
40
docs/codebase/cli.md
Normal file
@@ -0,0 +1,40 @@
|
||||
### `cli.py` command surface
|
||||
|
||||
| Command | Inputs / flags | What it does |
|
||||
|---|---|---|
|
||||
| **profiles** | *(none)* | Opens the interactive profile manager, lets you list, create, delete saved browser profiles that live in `~/.crawl4ai/profiles`. |
|
||||
| **browser status** | – | Prints whether the always-on *builtin* browser is running, shows its CDP URL, PID, start time. |
|
||||
| **browser stop** | – | Kills the builtin browser and deletes its status file. |
|
||||
| **browser view** | `--url, -u` URL *(optional)* | Pops a visible window of the builtin browser, navigates to `URL` or `about:blank`. |
|
||||
| **config list** | – | Dumps every global setting, showing current value, default, and description. |
|
||||
| **config get** | `key` | Prints the value of a single setting, falls back to default if unset. |
|
||||
| **config set** | `key value` | Persists a new value in the global config (stored under `~/.crawl4ai/config.yml`). |
|
||||
| **examples** | – | Just spits out real-world CLI usage samples. |
|
||||
| **crawl** | `url` *(positional)*<br>`--browser-config,-B` path<br>`--crawler-config,-C` path<br>`--filter-config,-f` path<br>`--extraction-config,-e` path<br>`--json-extract,-j` [desc]\*<br>`--schema,-s` path<br>`--browser,-b` k=v list<br>`--crawler,-c` k=v list<br>`--output,-o` all,json,markdown,md,markdown-fit,md-fit *(default all)*<br>`--output-file,-O` path<br>`--bypass-cache,-b` *(flag, default true — note flag reuse)*<br>`--question,-q` str<br>`--verbose,-v` *(flag)*<br>`--profile,-p` profile-name | One-shot crawl + extraction. Builds `BrowserConfig` and `CrawlerRunConfig` from inline flags or separate YAML/JSON files, runs `AsyncWebCrawler.run()`, can route through a named saved profile and pipe the result to stdout or a file. |
|
||||
| **(default)** | Same flags as **crawl**, plus `--example` | Shortcut so you can type just `crwl https://site.com`. When first arg is not a known sub-command, it falls through to *crawl*. |
|
||||
|
||||
\* `--json-extract/-j` with no value turns on LLM-based JSON extraction using an auto schema, supplying a string lets you prompt-engineer the field descriptions.
|
||||
|
||||
> Quick mental model
|
||||
> `profiles` = manage identities,
|
||||
> `browser ...` = control long-running headless Chrome that all crawls can piggy-back on,
|
||||
> `crawl` = do the actual work,
|
||||
> `config` = tweak global defaults,
|
||||
> everything else is sugar.
|
||||
|
||||
### Quick-fire “profile” usage cheatsheet
|
||||
|
||||
| Scenario | Command (copy-paste ready) | Notes |
|
||||
|---|---|---|
|
||||
| **Launch interactive Profile Manager UI** | `crwl profiles` | Opens TUI with options: 1 List, 2 Create, 3 Delete, 4 Use-to-crawl, 5 Exit. |
|
||||
| **Create a fresh profile** | `crwl profiles` → choose **2** → name it → browser opens → log in → press **q** in terminal | Saves to `~/.crawl4ai/profiles/<name>`. |
|
||||
| **List saved profiles** | `crwl profiles` → choose **1** | Shows name, browser type, size, last-modified. |
|
||||
| **Delete a profile** | `crwl profiles` → choose **3** → pick the profile index → confirm | Removes the folder. |
|
||||
| **Crawl with a profile (default alias)** | `crwl https://site.com/dashboard -p my-profile` | Keeps login cookies, sets `use_managed_browser=true` under the hood. |
|
||||
| **Crawl + verbose JSON output** | `crwl https://site.com -p my-profile -o json -v` | Any other `crawl` flags work the same. |
|
||||
| **Crawl with extra browser tweaks** | `crwl https://site.com -p my-profile -b "headless=true,viewport_width=1680"` | CLI overrides go on top of the profile. |
|
||||
| **Same but via explicit sub-command** | `crwl crawl https://site.com -p my-profile` | Identical to default alias. |
|
||||
| **Use profile from inside Profile Manager** | `crwl profiles` → choose **4** → pick profile → enter URL → follow prompts | Handy when demo-ing to non-CLI folks. |
|
||||
| **One-off crawl with a profile folder path (no name lookup)** | `crwl https://site.com -b "user_data_dir=$HOME/.crawl4ai/profiles/my-profile,use_managed_browser=true"` | Bypasses registry, useful for CI scripts. |
|
||||
| **Launch a dev browser on CDP port with the same identity** | `crwl cdp -d $HOME/.crawl4ai/profiles/my-profile -P 9223` | Lets Puppeteer/Playwright attach for debugging. |
|
||||
|
||||
85
docs/examples/adaptive_crawling/README.md
Normal file
85
docs/examples/adaptive_crawling/README.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# Adaptive Crawling Examples
|
||||
|
||||
This directory contains examples demonstrating various aspects of Crawl4AI's Adaptive Crawling feature.
|
||||
|
||||
## Examples Overview
|
||||
|
||||
### 1. `basic_usage.py`
|
||||
- Simple introduction to adaptive crawling
|
||||
- Uses default statistical strategy
|
||||
- Shows how to get crawl statistics and relevant content
|
||||
|
||||
### 2. `embedding_strategy.py` ⭐ NEW
|
||||
- Demonstrates the embedding-based strategy for semantic understanding
|
||||
- Shows query expansion and irrelevance detection
|
||||
- Includes configuration for both local and API-based embeddings
|
||||
|
||||
### 3. `embedding_vs_statistical.py` ⭐ NEW
|
||||
- Direct comparison between statistical and embedding strategies
|
||||
- Helps you choose the right strategy for your use case
|
||||
- Shows performance and accuracy trade-offs
|
||||
|
||||
### 4. `embedding_configuration.py` ⭐ NEW
|
||||
- Advanced configuration options for embedding strategy
|
||||
- Parameter tuning guide for different scenarios
|
||||
- Examples for research, exploration, and quality-focused crawling
|
||||
|
||||
### 5. `advanced_configuration.py`
|
||||
- Shows various configuration options for both strategies
|
||||
- Demonstrates threshold tuning and performance optimization
|
||||
|
||||
### 6. `custom_strategies.py`
|
||||
- How to implement your own crawling strategy
|
||||
- Extends the base CrawlStrategy class
|
||||
- Advanced use case for specialized requirements
|
||||
|
||||
### 7. `export_import_kb.py`
|
||||
- Export crawled knowledge base to JSONL
|
||||
- Import and continue crawling from saved state
|
||||
- Useful for building persistent knowledge bases
|
||||
|
||||
## Quick Start
|
||||
|
||||
For your first adaptive crawling experience, run:
|
||||
|
||||
```bash
|
||||
python basic_usage.py
|
||||
```
|
||||
|
||||
To try the new embedding strategy with semantic understanding:
|
||||
|
||||
```bash
|
||||
python embedding_strategy.py
|
||||
```
|
||||
|
||||
To compare strategies and see which works best for your use case:
|
||||
|
||||
```bash
|
||||
python embedding_vs_statistical.py
|
||||
```
|
||||
|
||||
## Strategy Selection Guide
|
||||
|
||||
### Use Statistical Strategy (Default) When:
|
||||
- Working with technical documentation
|
||||
- Queries contain specific terms or code
|
||||
- Speed is critical
|
||||
- No API access available
|
||||
|
||||
### Use Embedding Strategy When:
|
||||
- Queries are conceptual or ambiguous
|
||||
- Need semantic understanding beyond exact matches
|
||||
- Want to detect irrelevant content
|
||||
- Working with diverse content sources
|
||||
|
||||
## Requirements
|
||||
|
||||
- Crawl4AI installed
|
||||
- For embedding strategy with local models: `sentence-transformers`
|
||||
- For embedding strategy with OpenAI: Set `OPENAI_API_KEY` environment variable
|
||||
|
||||
## Learn More
|
||||
|
||||
- [Adaptive Crawling Documentation](https://docs.crawl4ai.com/core/adaptive-crawling/)
|
||||
- [Mathematical Framework](https://github.com/unclecode/crawl4ai/blob/main/PROGRESSIVE_CRAWLING.md)
|
||||
- [Blog: The Adaptive Crawling Revolution](https://docs.crawl4ai.com/blog/adaptive-crawling-revolution/)
|
||||
207
docs/examples/adaptive_crawling/advanced_configuration.py
Normal file
207
docs/examples/adaptive_crawling/advanced_configuration.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""
|
||||
Advanced Adaptive Crawling Configuration
|
||||
|
||||
This example demonstrates all configuration options available for adaptive crawling,
|
||||
including threshold tuning, persistence, and custom parameters.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
|
||||
|
||||
|
||||
async def main():
|
||||
"""Demonstrate advanced configuration options"""
|
||||
|
||||
# Example 1: Custom thresholds for different use cases
|
||||
print("="*60)
|
||||
print("EXAMPLE 1: Custom Confidence Thresholds")
|
||||
print("="*60)
|
||||
|
||||
# High-precision configuration (exhaustive crawling)
|
||||
high_precision_config = AdaptiveConfig(
|
||||
confidence_threshold=0.9, # Very high confidence required
|
||||
max_pages=50, # Allow more pages
|
||||
top_k_links=5, # Follow more links per page
|
||||
min_gain_threshold=0.02 # Lower threshold to continue
|
||||
)
|
||||
|
||||
# Balanced configuration (default use case)
|
||||
balanced_config = AdaptiveConfig(
|
||||
confidence_threshold=0.7, # Moderate confidence
|
||||
max_pages=20, # Reasonable limit
|
||||
top_k_links=3, # Moderate branching
|
||||
min_gain_threshold=0.05 # Standard gain threshold
|
||||
)
|
||||
|
||||
# Quick exploration configuration
|
||||
quick_config = AdaptiveConfig(
|
||||
confidence_threshold=0.5, # Lower confidence acceptable
|
||||
max_pages=10, # Strict limit
|
||||
top_k_links=2, # Minimal branching
|
||||
min_gain_threshold=0.1 # High gain required
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
# Test different configurations
|
||||
for config_name, config in [
|
||||
("High Precision", high_precision_config),
|
||||
("Balanced", balanced_config),
|
||||
("Quick Exploration", quick_config)
|
||||
]:
|
||||
print(f"\nTesting {config_name} configuration...")
|
||||
adaptive = AdaptiveCrawler(crawler, config=config)
|
||||
|
||||
result = await adaptive.digest(
|
||||
start_url="https://httpbin.org",
|
||||
query="http headers authentication"
|
||||
)
|
||||
|
||||
print(f" - Pages crawled: {len(result.crawled_urls)}")
|
||||
print(f" - Confidence achieved: {adaptive.confidence:.2%}")
|
||||
print(f" - Coverage score: {adaptive.coverage_stats['coverage']:.2f}")
|
||||
|
||||
# Example 2: Persistence and state management
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 2: State Persistence")
|
||||
print("="*60)
|
||||
|
||||
state_file = "crawl_state_demo.json"
|
||||
|
||||
# Configuration with persistence
|
||||
persistent_config = AdaptiveConfig(
|
||||
confidence_threshold=0.8,
|
||||
max_pages=30,
|
||||
save_state=True, # Enable auto-save
|
||||
state_path=state_file # Specify save location
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
# First crawl - will be interrupted
|
||||
print("\nStarting initial crawl (will interrupt after 5 pages)...")
|
||||
|
||||
interrupt_config = AdaptiveConfig(
|
||||
confidence_threshold=0.8,
|
||||
max_pages=5, # Artificially low to simulate interruption
|
||||
save_state=True,
|
||||
state_path=state_file
|
||||
)
|
||||
|
||||
adaptive = AdaptiveCrawler(crawler, config=interrupt_config)
|
||||
result1 = await adaptive.digest(
|
||||
start_url="https://docs.python.org/3/",
|
||||
query="exception handling try except finally"
|
||||
)
|
||||
|
||||
print(f"First crawl completed: {len(result1.crawled_urls)} pages")
|
||||
print(f"Confidence reached: {adaptive.confidence:.2%}")
|
||||
|
||||
# Resume crawl with higher page limit
|
||||
print("\nResuming crawl from saved state...")
|
||||
|
||||
resume_config = AdaptiveConfig(
|
||||
confidence_threshold=0.8,
|
||||
max_pages=20, # Increase limit
|
||||
save_state=True,
|
||||
state_path=state_file
|
||||
)
|
||||
|
||||
adaptive2 = AdaptiveCrawler(crawler, config=resume_config)
|
||||
result2 = await adaptive2.digest(
|
||||
start_url="https://docs.python.org/3/",
|
||||
query="exception handling try except finally",
|
||||
resume_from=state_file
|
||||
)
|
||||
|
||||
print(f"Resumed crawl completed: {len(result2.crawled_urls)} total pages")
|
||||
print(f"Final confidence: {adaptive2.confidence:.2%}")
|
||||
|
||||
# Clean up
|
||||
Path(state_file).unlink(missing_ok=True)
|
||||
|
||||
# Example 3: Link selection strategies
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 3: Link Selection Strategies")
|
||||
print("="*60)
|
||||
|
||||
# Conservative link following
|
||||
conservative_config = AdaptiveConfig(
|
||||
confidence_threshold=0.7,
|
||||
max_pages=15,
|
||||
top_k_links=1, # Only follow best link
|
||||
min_gain_threshold=0.15 # High threshold
|
||||
)
|
||||
|
||||
# Aggressive link following
|
||||
aggressive_config = AdaptiveConfig(
|
||||
confidence_threshold=0.7,
|
||||
max_pages=15,
|
||||
top_k_links=10, # Follow many links
|
||||
min_gain_threshold=0.01 # Very low threshold
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
for strategy_name, config in [
|
||||
("Conservative", conservative_config),
|
||||
("Aggressive", aggressive_config)
|
||||
]:
|
||||
print(f"\n{strategy_name} link selection:")
|
||||
adaptive = AdaptiveCrawler(crawler, config=config)
|
||||
|
||||
result = await adaptive.digest(
|
||||
start_url="https://httpbin.org",
|
||||
query="api endpoints"
|
||||
)
|
||||
|
||||
# Analyze crawl pattern
|
||||
print(f" - Total pages: {len(result.crawled_urls)}")
|
||||
print(f" - Unique domains: {len(set(url.split('/')[2] for url in result.crawled_urls))}")
|
||||
print(f" - Max depth reached: {max(url.count('/') for url in result.crawled_urls) - 2}")
|
||||
|
||||
# Show saturation trend
|
||||
if hasattr(result, 'new_terms_history') and result.new_terms_history:
|
||||
print(f" - New terms discovered: {result.new_terms_history[:5]}...")
|
||||
print(f" - Saturation trend: {'decreasing' if result.new_terms_history[-1] < result.new_terms_history[0] else 'increasing'}")
|
||||
|
||||
# Example 4: Monitoring crawl progress
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 4: Progress Monitoring")
|
||||
print("="*60)
|
||||
|
||||
# Configuration with detailed monitoring
|
||||
monitor_config = AdaptiveConfig(
|
||||
confidence_threshold=0.75,
|
||||
max_pages=10,
|
||||
top_k_links=3
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
adaptive = AdaptiveCrawler(crawler, config=monitor_config)
|
||||
|
||||
# Start crawl
|
||||
print("\nMonitoring crawl progress...")
|
||||
result = await adaptive.digest(
|
||||
start_url="https://httpbin.org",
|
||||
query="http methods headers"
|
||||
)
|
||||
|
||||
# Detailed statistics
|
||||
print("\nDetailed crawl analysis:")
|
||||
adaptive.print_stats(detailed=True)
|
||||
|
||||
# Export for analysis
|
||||
print("\nExporting knowledge base for external analysis...")
|
||||
adaptive.export_knowledge_base("knowledge_export_demo.jsonl")
|
||||
print("Knowledge base exported to: knowledge_export_demo.jsonl")
|
||||
|
||||
# Show sample of exported data
|
||||
with open("knowledge_export_demo.jsonl", 'r') as f:
|
||||
first_line = f.readline()
|
||||
print(f"Sample export: {first_line[:100]}...")
|
||||
|
||||
# Clean up
|
||||
Path("knowledge_export_demo.jsonl").unlink(missing_ok=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
76
docs/examples/adaptive_crawling/basic_usage.py
Normal file
76
docs/examples/adaptive_crawling/basic_usage.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
Basic Adaptive Crawling Example
|
||||
|
||||
This example demonstrates the simplest use case of adaptive crawling:
|
||||
finding information about a specific topic and knowing when to stop.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler
|
||||
|
||||
|
||||
async def main():
|
||||
"""Basic adaptive crawling example"""
|
||||
|
||||
# Initialize the crawler
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# Create an adaptive crawler with default settings (statistical strategy)
|
||||
adaptive = AdaptiveCrawler(crawler)
|
||||
|
||||
# Note: You can also use embedding strategy for semantic understanding:
|
||||
# from crawl4ai import AdaptiveConfig
|
||||
# config = AdaptiveConfig(strategy="embedding")
|
||||
# adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
# Start adaptive crawling
|
||||
print("Starting adaptive crawl for Python async programming information...")
|
||||
result = await adaptive.digest(
|
||||
start_url="https://docs.python.org/3/library/asyncio.html",
|
||||
query="async await context managers coroutines"
|
||||
)
|
||||
|
||||
# Display crawl statistics
|
||||
print("\n" + "="*50)
|
||||
print("CRAWL STATISTICS")
|
||||
print("="*50)
|
||||
adaptive.print_stats(detailed=False)
|
||||
|
||||
# Get the most relevant content found
|
||||
print("\n" + "="*50)
|
||||
print("MOST RELEVANT PAGES")
|
||||
print("="*50)
|
||||
|
||||
relevant_pages = adaptive.get_relevant_content(top_k=5)
|
||||
for i, page in enumerate(relevant_pages, 1):
|
||||
print(f"\n{i}. {page['url']}")
|
||||
print(f" Relevance Score: {page['score']:.2%}")
|
||||
|
||||
# Show a snippet of the content
|
||||
content = page['content'] or ""
|
||||
if content:
|
||||
snippet = content[:200].replace('\n', ' ')
|
||||
if len(content) > 200:
|
||||
snippet += "..."
|
||||
print(f" Preview: {snippet}")
|
||||
|
||||
# Show final confidence
|
||||
print(f"\n{'='*50}")
|
||||
print(f"Final Confidence: {adaptive.confidence:.2%}")
|
||||
print(f"Total Pages Crawled: {len(result.crawled_urls)}")
|
||||
print(f"Knowledge Base Size: {len(adaptive.state.knowledge_base)} documents")
|
||||
|
||||
# Example: Check if we can answer specific questions
|
||||
print(f"\n{'='*50}")
|
||||
print("INFORMATION SUFFICIENCY CHECK")
|
||||
print(f"{'='*50}")
|
||||
|
||||
if adaptive.confidence >= 0.8:
|
||||
print("✓ High confidence - can answer detailed questions about async Python")
|
||||
elif adaptive.confidence >= 0.6:
|
||||
print("~ Moderate confidence - can answer basic questions")
|
||||
else:
|
||||
print("✗ Low confidence - need more information")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
373
docs/examples/adaptive_crawling/custom_strategies.py
Normal file
373
docs/examples/adaptive_crawling/custom_strategies.py
Normal file
@@ -0,0 +1,373 @@
|
||||
"""
|
||||
Custom Adaptive Crawling Strategies
|
||||
|
||||
This example demonstrates how to implement custom scoring strategies
|
||||
for domain-specific crawling needs.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
from typing import List, Dict, Set
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
|
||||
from crawl4ai.adaptive_crawler import CrawlState, Link
|
||||
import math
|
||||
|
||||
|
||||
class APIDocumentationStrategy:
|
||||
"""
|
||||
Custom strategy optimized for API documentation crawling.
|
||||
Prioritizes endpoint references, code examples, and parameter descriptions.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Keywords that indicate high-value API documentation
|
||||
self.api_keywords = {
|
||||
'endpoint', 'request', 'response', 'parameter', 'authentication',
|
||||
'header', 'body', 'query', 'path', 'method', 'get', 'post', 'put',
|
||||
'delete', 'patch', 'status', 'code', 'example', 'curl', 'python'
|
||||
}
|
||||
|
||||
# URL patterns that typically contain API documentation
|
||||
self.valuable_patterns = [
|
||||
r'/api/',
|
||||
r'/reference/',
|
||||
r'/endpoints?/',
|
||||
r'/methods?/',
|
||||
r'/resources?/'
|
||||
]
|
||||
|
||||
# Patterns to avoid
|
||||
self.avoid_patterns = [
|
||||
r'/blog/',
|
||||
r'/news/',
|
||||
r'/about/',
|
||||
r'/contact/',
|
||||
r'/legal/'
|
||||
]
|
||||
|
||||
def score_link(self, link: Link, query: str, state: CrawlState) -> float:
|
||||
"""Custom link scoring for API documentation"""
|
||||
score = 1.0
|
||||
url = link.href.lower()
|
||||
|
||||
# Boost API-related URLs
|
||||
for pattern in self.valuable_patterns:
|
||||
if re.search(pattern, url):
|
||||
score *= 2.0
|
||||
break
|
||||
|
||||
# Reduce score for non-API content
|
||||
for pattern in self.avoid_patterns:
|
||||
if re.search(pattern, url):
|
||||
score *= 0.1
|
||||
break
|
||||
|
||||
# Boost if preview contains API keywords
|
||||
if link.text:
|
||||
preview_lower = link.text.lower()
|
||||
keyword_count = sum(1 for kw in self.api_keywords if kw in preview_lower)
|
||||
score *= (1 + keyword_count * 0.2)
|
||||
|
||||
# Prioritize shallow URLs (likely overview pages)
|
||||
depth = url.count('/') - 2 # Subtract protocol slashes
|
||||
if depth <= 3:
|
||||
score *= 1.5
|
||||
elif depth > 6:
|
||||
score *= 0.5
|
||||
|
||||
return score
|
||||
|
||||
def calculate_api_coverage(self, state: CrawlState, query: str) -> Dict[str, float]:
|
||||
"""Calculate specialized coverage metrics for API documentation"""
|
||||
metrics = {
|
||||
'endpoint_coverage': 0.0,
|
||||
'example_coverage': 0.0,
|
||||
'parameter_coverage': 0.0
|
||||
}
|
||||
|
||||
# Analyze knowledge base for API-specific content
|
||||
endpoint_patterns = [r'GET\s+/', r'POST\s+/', r'PUT\s+/', r'DELETE\s+/']
|
||||
example_patterns = [r'```\w+', r'curl\s+-', r'import\s+requests']
|
||||
param_patterns = [r'param(?:eter)?s?\s*:', r'required\s*:', r'optional\s*:']
|
||||
|
||||
total_docs = len(state.knowledge_base)
|
||||
if total_docs == 0:
|
||||
return metrics
|
||||
|
||||
docs_with_endpoints = 0
|
||||
docs_with_examples = 0
|
||||
docs_with_params = 0
|
||||
|
||||
for doc in state.knowledge_base:
|
||||
content = doc.markdown.raw_markdown if hasattr(doc, 'markdown') else str(doc)
|
||||
|
||||
# Check for endpoints
|
||||
if any(re.search(pattern, content, re.IGNORECASE) for pattern in endpoint_patterns):
|
||||
docs_with_endpoints += 1
|
||||
|
||||
# Check for examples
|
||||
if any(re.search(pattern, content, re.IGNORECASE) for pattern in example_patterns):
|
||||
docs_with_examples += 1
|
||||
|
||||
# Check for parameters
|
||||
if any(re.search(pattern, content, re.IGNORECASE) for pattern in param_patterns):
|
||||
docs_with_params += 1
|
||||
|
||||
metrics['endpoint_coverage'] = docs_with_endpoints / total_docs
|
||||
metrics['example_coverage'] = docs_with_examples / total_docs
|
||||
metrics['parameter_coverage'] = docs_with_params / total_docs
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
class ResearchPaperStrategy:
|
||||
"""
|
||||
Strategy optimized for crawling research papers and academic content.
|
||||
Prioritizes citations, abstracts, and methodology sections.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.academic_keywords = {
|
||||
'abstract', 'introduction', 'methodology', 'results', 'conclusion',
|
||||
'references', 'citation', 'paper', 'study', 'research', 'analysis',
|
||||
'hypothesis', 'experiment', 'findings', 'doi'
|
||||
}
|
||||
|
||||
self.citation_patterns = [
|
||||
r'\[\d+\]', # [1] style citations
|
||||
r'\(\w+\s+\d{4}\)', # (Author 2024) style
|
||||
r'doi:\s*\S+', # DOI references
|
||||
]
|
||||
|
||||
def calculate_academic_relevance(self, content: str, query: str) -> float:
|
||||
"""Calculate relevance score for academic content"""
|
||||
score = 0.0
|
||||
content_lower = content.lower()
|
||||
|
||||
# Check for academic keywords
|
||||
keyword_matches = sum(1 for kw in self.academic_keywords if kw in content_lower)
|
||||
score += keyword_matches * 0.1
|
||||
|
||||
# Check for citations
|
||||
citation_count = sum(
|
||||
len(re.findall(pattern, content))
|
||||
for pattern in self.citation_patterns
|
||||
)
|
||||
score += min(citation_count * 0.05, 1.0) # Cap at 1.0
|
||||
|
||||
# Check for query terms in academic context
|
||||
query_terms = query.lower().split()
|
||||
for term in query_terms:
|
||||
# Boost if term appears near academic keywords
|
||||
for keyword in ['abstract', 'conclusion', 'results']:
|
||||
if keyword in content_lower:
|
||||
section = content_lower[content_lower.find(keyword):content_lower.find(keyword) + 500]
|
||||
if term in section:
|
||||
score += 0.2
|
||||
|
||||
return min(score, 2.0) # Cap total score
|
||||
|
||||
|
||||
async def demo_custom_strategies():
|
||||
"""Demonstrate custom strategy usage"""
|
||||
|
||||
# Example 1: API Documentation Strategy
|
||||
print("="*60)
|
||||
print("EXAMPLE 1: Custom API Documentation Strategy")
|
||||
print("="*60)
|
||||
|
||||
api_strategy = APIDocumentationStrategy()
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Standard adaptive crawler
|
||||
config = AdaptiveConfig(
|
||||
confidence_threshold=0.8,
|
||||
max_pages=15
|
||||
)
|
||||
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
# Override link scoring with custom strategy
|
||||
original_rank_links = adaptive._rank_links
|
||||
|
||||
def custom_rank_links(links, query, state):
|
||||
# Apply custom scoring
|
||||
scored_links = []
|
||||
for link in links:
|
||||
base_score = api_strategy.score_link(link, query, state)
|
||||
scored_links.append((link, base_score))
|
||||
|
||||
# Sort by score
|
||||
scored_links.sort(key=lambda x: x[1], reverse=True)
|
||||
return [link for link, _ in scored_links[:config.top_k_links]]
|
||||
|
||||
adaptive._rank_links = custom_rank_links
|
||||
|
||||
# Crawl API documentation
|
||||
print("\nCrawling API documentation with custom strategy...")
|
||||
state = await adaptive.digest(
|
||||
start_url="https://httpbin.org",
|
||||
query="api endpoints authentication headers"
|
||||
)
|
||||
|
||||
# Calculate custom metrics
|
||||
api_metrics = api_strategy.calculate_api_coverage(state, "api endpoints")
|
||||
|
||||
print(f"\nResults:")
|
||||
print(f"Pages crawled: {len(state.crawled_urls)}")
|
||||
print(f"Confidence: {adaptive.confidence:.2%}")
|
||||
print(f"\nAPI-Specific Metrics:")
|
||||
print(f" - Endpoint coverage: {api_metrics['endpoint_coverage']:.2%}")
|
||||
print(f" - Example coverage: {api_metrics['example_coverage']:.2%}")
|
||||
print(f" - Parameter coverage: {api_metrics['parameter_coverage']:.2%}")
|
||||
|
||||
# Example 2: Combined Strategy
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 2: Hybrid Strategy Combining Multiple Approaches")
|
||||
print("="*60)
|
||||
|
||||
class HybridStrategy:
|
||||
"""Combines multiple strategies with weights"""
|
||||
|
||||
def __init__(self):
|
||||
self.api_strategy = APIDocumentationStrategy()
|
||||
self.research_strategy = ResearchPaperStrategy()
|
||||
self.weights = {
|
||||
'api': 0.7,
|
||||
'research': 0.3
|
||||
}
|
||||
|
||||
def score_content(self, content: str, query: str) -> float:
|
||||
# Get scores from each strategy
|
||||
api_score = self._calculate_api_score(content, query)
|
||||
research_score = self.research_strategy.calculate_academic_relevance(content, query)
|
||||
|
||||
# Weighted combination
|
||||
total_score = (
|
||||
api_score * self.weights['api'] +
|
||||
research_score * self.weights['research']
|
||||
)
|
||||
|
||||
return total_score
|
||||
|
||||
def _calculate_api_score(self, content: str, query: str) -> float:
|
||||
# Simplified API scoring based on keyword presence
|
||||
content_lower = content.lower()
|
||||
api_keywords = self.api_strategy.api_keywords
|
||||
|
||||
keyword_count = sum(1 for kw in api_keywords if kw in content_lower)
|
||||
return min(keyword_count * 0.1, 2.0)
|
||||
|
||||
hybrid_strategy = HybridStrategy()
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
adaptive = AdaptiveCrawler(crawler)
|
||||
|
||||
# Crawl with hybrid scoring
|
||||
print("\nTesting hybrid strategy on technical documentation...")
|
||||
state = await adaptive.digest(
|
||||
start_url="https://docs.python.org/3/library/asyncio.html",
|
||||
query="async await coroutines api"
|
||||
)
|
||||
|
||||
# Analyze results with hybrid strategy
|
||||
print(f"\nHybrid Strategy Analysis:")
|
||||
total_score = 0
|
||||
for doc in adaptive.get_relevant_content(top_k=5):
|
||||
content = doc['content'] or ""
|
||||
score = hybrid_strategy.score_content(content, "async await api")
|
||||
total_score += score
|
||||
print(f" - {doc['url'][:50]}... Score: {score:.2f}")
|
||||
|
||||
print(f"\nAverage hybrid score: {total_score/5:.2f}")
|
||||
|
||||
|
||||
async def demo_performance_optimization():
|
||||
"""Demonstrate performance optimization with custom strategies"""
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("EXAMPLE 3: Performance-Optimized Strategy")
|
||||
print("="*60)
|
||||
|
||||
class PerformanceOptimizedStrategy:
|
||||
"""Strategy that balances thoroughness with speed"""
|
||||
|
||||
def __init__(self):
|
||||
self.url_cache: Set[str] = set()
|
||||
self.domain_scores: Dict[str, float] = {}
|
||||
|
||||
def should_crawl_domain(self, url: str) -> bool:
|
||||
"""Implement domain-level filtering"""
|
||||
domain = url.split('/')[2] if url.startswith('http') else url
|
||||
|
||||
# Skip if we've already crawled many pages from this domain
|
||||
domain_count = sum(1 for cached in self.url_cache if domain in cached)
|
||||
if domain_count > 5:
|
||||
return False
|
||||
|
||||
# Skip low-scoring domains
|
||||
if domain in self.domain_scores and self.domain_scores[domain] < 0.3:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def update_domain_score(self, url: str, relevance: float):
|
||||
"""Track domain-level performance"""
|
||||
domain = url.split('/')[2] if url.startswith('http') else url
|
||||
|
||||
if domain not in self.domain_scores:
|
||||
self.domain_scores[domain] = relevance
|
||||
else:
|
||||
# Moving average
|
||||
self.domain_scores[domain] = (
|
||||
0.7 * self.domain_scores[domain] + 0.3 * relevance
|
||||
)
|
||||
|
||||
perf_strategy = PerformanceOptimizedStrategy()
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
config = AdaptiveConfig(
|
||||
confidence_threshold=0.7,
|
||||
max_pages=10,
|
||||
top_k_links=2 # Fewer links for speed
|
||||
)
|
||||
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
# Track performance
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
state = await adaptive.digest(
|
||||
start_url="https://httpbin.org",
|
||||
query="http methods headers"
|
||||
)
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
print(f"\nPerformance Results:")
|
||||
print(f" - Time elapsed: {elapsed:.2f} seconds")
|
||||
print(f" - Pages crawled: {len(state.crawled_urls)}")
|
||||
print(f" - Pages per second: {len(state.crawled_urls)/elapsed:.2f}")
|
||||
print(f" - Final confidence: {adaptive.confidence:.2%}")
|
||||
print(f" - Efficiency: {adaptive.confidence/len(state.crawled_urls):.2%} confidence per page")
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all demonstrations"""
|
||||
try:
|
||||
await demo_custom_strategies()
|
||||
await demo_performance_optimization()
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("All custom strategy examples completed!")
|
||||
print("="*60)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
206
docs/examples/adaptive_crawling/embedding_configuration.py
Normal file
206
docs/examples/adaptive_crawling/embedding_configuration.py
Normal file
@@ -0,0 +1,206 @@
|
||||
"""
|
||||
Advanced Embedding Configuration Example
|
||||
|
||||
This example demonstrates all configuration options available for the
|
||||
embedding strategy, including fine-tuning parameters for different use cases.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
|
||||
|
||||
|
||||
async def test_configuration(name: str, config: AdaptiveConfig, url: str, query: str):
|
||||
"""Test a specific configuration"""
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Configuration: {name}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
result = await adaptive.digest(start_url=url, query=query)
|
||||
|
||||
print(f"Pages crawled: {len(result.crawled_urls)}")
|
||||
print(f"Final confidence: {adaptive.confidence:.1%}")
|
||||
print(f"Stopped reason: {result.metrics.get('stopped_reason', 'max_pages')}")
|
||||
|
||||
if result.metrics.get('is_irrelevant', False):
|
||||
print("⚠️ Query detected as irrelevant!")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def main():
|
||||
"""Demonstrate various embedding configurations"""
|
||||
|
||||
print("EMBEDDING STRATEGY CONFIGURATION EXAMPLES")
|
||||
print("=" * 60)
|
||||
|
||||
# Base URL and query for testing
|
||||
test_url = "https://docs.python.org/3/library/asyncio.html"
|
||||
|
||||
# 1. Default Configuration
|
||||
config_default = AdaptiveConfig(
|
||||
strategy="embedding",
|
||||
max_pages=10
|
||||
)
|
||||
|
||||
await test_configuration(
|
||||
"Default Settings",
|
||||
config_default,
|
||||
test_url,
|
||||
"async programming patterns"
|
||||
)
|
||||
|
||||
# 2. Strict Coverage Requirements
|
||||
config_strict = AdaptiveConfig(
|
||||
strategy="embedding",
|
||||
max_pages=20,
|
||||
|
||||
# Stricter similarity requirements
|
||||
embedding_k_exp=5.0, # Default is 3.0, higher = stricter
|
||||
embedding_coverage_radius=0.15, # Default is 0.2, lower = stricter
|
||||
|
||||
# Higher validation threshold
|
||||
embedding_validation_min_score=0.6, # Default is 0.3
|
||||
|
||||
# More query variations for better coverage
|
||||
n_query_variations=15 # Default is 10
|
||||
)
|
||||
|
||||
await test_configuration(
|
||||
"Strict Coverage (Research/Academic)",
|
||||
config_strict,
|
||||
test_url,
|
||||
"comprehensive guide async await"
|
||||
)
|
||||
|
||||
# 3. Fast Exploration
|
||||
config_fast = AdaptiveConfig(
|
||||
strategy="embedding",
|
||||
max_pages=10,
|
||||
top_k_links=5, # Follow more links per page
|
||||
|
||||
# Relaxed requirements for faster convergence
|
||||
embedding_k_exp=1.0, # Lower = more lenient
|
||||
embedding_min_relative_improvement=0.05, # Stop earlier
|
||||
|
||||
# Lower quality thresholds
|
||||
embedding_quality_min_confidence=0.5, # Display lower confidence
|
||||
embedding_quality_max_confidence=0.85,
|
||||
|
||||
# Fewer query variations for speed
|
||||
n_query_variations=5
|
||||
)
|
||||
|
||||
await test_configuration(
|
||||
"Fast Exploration (Quick Overview)",
|
||||
config_fast,
|
||||
test_url,
|
||||
"async basics"
|
||||
)
|
||||
|
||||
# 4. Irrelevance Detection Focus
|
||||
config_irrelevance = AdaptiveConfig(
|
||||
strategy="embedding",
|
||||
max_pages=5,
|
||||
|
||||
# Aggressive irrelevance detection
|
||||
embedding_min_confidence_threshold=0.2, # Higher threshold (default 0.1)
|
||||
embedding_k_exp=5.0, # Strict similarity
|
||||
|
||||
# Quick stopping for irrelevant content
|
||||
embedding_min_relative_improvement=0.15
|
||||
)
|
||||
|
||||
await test_configuration(
|
||||
"Irrelevance Detection",
|
||||
config_irrelevance,
|
||||
test_url,
|
||||
"recipe for chocolate cake" # Irrelevant query
|
||||
)
|
||||
|
||||
# 5. High-Quality Knowledge Base
|
||||
config_quality = AdaptiveConfig(
|
||||
strategy="embedding",
|
||||
max_pages=30,
|
||||
|
||||
# Deduplication settings
|
||||
embedding_overlap_threshold=0.75, # More aggressive deduplication
|
||||
|
||||
# Quality focus
|
||||
embedding_validation_min_score=0.5,
|
||||
embedding_quality_scale_factor=1.0, # Linear quality mapping
|
||||
|
||||
# Balanced parameters
|
||||
embedding_k_exp=3.0,
|
||||
embedding_nearest_weight=0.8, # Focus on best matches
|
||||
embedding_top_k_weight=0.2
|
||||
)
|
||||
|
||||
await test_configuration(
|
||||
"High-Quality Knowledge Base",
|
||||
config_quality,
|
||||
test_url,
|
||||
"asyncio advanced patterns best practices"
|
||||
)
|
||||
|
||||
# 6. Custom Embedding Provider
|
||||
if os.getenv('OPENAI_API_KEY'):
|
||||
config_openai = AdaptiveConfig(
|
||||
strategy="embedding",
|
||||
max_pages=10,
|
||||
|
||||
# Use OpenAI embeddings
|
||||
embedding_llm_config={
|
||||
'provider': 'openai/text-embedding-3-small',
|
||||
'api_token': os.getenv('OPENAI_API_KEY')
|
||||
},
|
||||
|
||||
# OpenAI embeddings are high quality, can be stricter
|
||||
embedding_k_exp=4.0,
|
||||
n_query_variations=12
|
||||
)
|
||||
|
||||
await test_configuration(
|
||||
"OpenAI Embeddings",
|
||||
config_openai,
|
||||
test_url,
|
||||
"event-driven architecture patterns"
|
||||
)
|
||||
|
||||
# Parameter Guide
|
||||
print("\n" + "="*60)
|
||||
print("PARAMETER TUNING GUIDE")
|
||||
print("="*60)
|
||||
|
||||
print("\n📊 Key Parameters and Their Effects:")
|
||||
print("\n1. embedding_k_exp (default: 3.0)")
|
||||
print(" - Lower (1-2): More lenient, faster convergence")
|
||||
print(" - Higher (4-5): Stricter, better precision")
|
||||
|
||||
print("\n2. embedding_coverage_radius (default: 0.2)")
|
||||
print(" - Lower (0.1-0.15): Requires closer matches")
|
||||
print(" - Higher (0.25-0.3): Accepts broader matches")
|
||||
|
||||
print("\n3. n_query_variations (default: 10)")
|
||||
print(" - Lower (5-7): Faster, less comprehensive")
|
||||
print(" - Higher (15-20): Better coverage, slower")
|
||||
|
||||
print("\n4. embedding_min_confidence_threshold (default: 0.1)")
|
||||
print(" - Set to 0.15-0.2 for aggressive irrelevance detection")
|
||||
print(" - Set to 0.05 to crawl even barely relevant content")
|
||||
|
||||
print("\n5. embedding_validation_min_score (default: 0.3)")
|
||||
print(" - Higher (0.5-0.6): Requires strong validation")
|
||||
print(" - Lower (0.2): More permissive stopping")
|
||||
|
||||
print("\n💡 Tips:")
|
||||
print("- For research: High k_exp, more variations, strict validation")
|
||||
print("- For exploration: Low k_exp, fewer variations, relaxed thresholds")
|
||||
print("- For quality: Focus on overlap_threshold and validation scores")
|
||||
print("- For speed: Reduce variations, increase min_relative_improvement")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
109
docs/examples/adaptive_crawling/embedding_strategy.py
Normal file
109
docs/examples/adaptive_crawling/embedding_strategy.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Embedding Strategy Example for Adaptive Crawling
|
||||
|
||||
This example demonstrates how to use the embedding-based strategy
|
||||
for semantic understanding and intelligent crawling.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
|
||||
|
||||
|
||||
async def main():
|
||||
"""Demonstrate embedding strategy for adaptive crawling"""
|
||||
|
||||
# Configure embedding strategy
|
||||
config = AdaptiveConfig(
|
||||
strategy="embedding", # Use embedding strategy
|
||||
embedding_model="sentence-transformers/all-MiniLM-L6-v2", # Default model
|
||||
n_query_variations=10, # Generate 10 semantic variations
|
||||
max_pages=15,
|
||||
top_k_links=3,
|
||||
min_gain_threshold=0.05,
|
||||
|
||||
# Embedding-specific parameters
|
||||
embedding_k_exp=3.0, # Higher = stricter similarity requirements
|
||||
embedding_min_confidence_threshold=0.1, # Stop if <10% relevant
|
||||
embedding_validation_min_score=0.4 # Validation threshold
|
||||
)
|
||||
|
||||
# Optional: Use OpenAI embeddings instead
|
||||
if os.getenv('OPENAI_API_KEY'):
|
||||
config.embedding_llm_config = {
|
||||
'provider': 'openai/text-embedding-3-small',
|
||||
'api_token': os.getenv('OPENAI_API_KEY')
|
||||
}
|
||||
print("Using OpenAI embeddings")
|
||||
else:
|
||||
print("Using sentence-transformers (local embeddings)")
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
# Test 1: Relevant query with semantic understanding
|
||||
print("\n" + "="*50)
|
||||
print("TEST 1: Semantic Query Understanding")
|
||||
print("="*50)
|
||||
|
||||
result = await adaptive.digest(
|
||||
start_url="https://docs.python.org/3/library/asyncio.html",
|
||||
query="concurrent programming event-driven architecture"
|
||||
)
|
||||
|
||||
print("\nQuery Expansion:")
|
||||
print(f"Original query expanded to {len(result.expanded_queries)} variations")
|
||||
for i, q in enumerate(result.expanded_queries[:3], 1):
|
||||
print(f" {i}. {q}")
|
||||
print(" ...")
|
||||
|
||||
print("\nResults:")
|
||||
adaptive.print_stats(detailed=False)
|
||||
|
||||
# Test 2: Detecting irrelevant queries
|
||||
print("\n" + "="*50)
|
||||
print("TEST 2: Irrelevant Query Detection")
|
||||
print("="*50)
|
||||
|
||||
# Reset crawler for new query
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
result = await adaptive.digest(
|
||||
start_url="https://docs.python.org/3/library/asyncio.html",
|
||||
query="how to bake chocolate chip cookies"
|
||||
)
|
||||
|
||||
if result.metrics.get('is_irrelevant', False):
|
||||
print("\n✅ Successfully detected irrelevant query!")
|
||||
print(f"Stopped after just {len(result.crawled_urls)} pages")
|
||||
print(f"Reason: {result.metrics.get('stopped_reason', 'unknown')}")
|
||||
else:
|
||||
print("\n❌ Failed to detect irrelevance")
|
||||
|
||||
print(f"Final confidence: {adaptive.confidence:.1%}")
|
||||
|
||||
# Test 3: Semantic gap analysis
|
||||
print("\n" + "="*50)
|
||||
print("TEST 3: Semantic Gap Analysis")
|
||||
print("="*50)
|
||||
|
||||
# Show how embedding strategy identifies gaps
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
result = await adaptive.digest(
|
||||
start_url="https://realpython.com",
|
||||
query="python decorators advanced patterns"
|
||||
)
|
||||
|
||||
print(f"\nSemantic gaps identified: {len(result.semantic_gaps)}")
|
||||
print(f"Knowledge base embeddings shape: {result.kb_embeddings.shape if result.kb_embeddings is not None else 'None'}")
|
||||
|
||||
# Show coverage metrics specific to embedding strategy
|
||||
print("\nEmbedding-specific metrics:")
|
||||
print(f" Average best similarity: {result.metrics.get('avg_best_similarity', 0):.3f}")
|
||||
print(f" Coverage score: {result.metrics.get('coverage_score', 0):.3f}")
|
||||
print(f" Validation confidence: {result.metrics.get('validation_confidence', 0):.2%}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
167
docs/examples/adaptive_crawling/embedding_vs_statistical.py
Normal file
167
docs/examples/adaptive_crawling/embedding_vs_statistical.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
Comparison: Embedding vs Statistical Strategy
|
||||
|
||||
This example demonstrates the differences between statistical and embedding
|
||||
strategies for adaptive crawling, showing when to use each approach.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
import os
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
|
||||
|
||||
|
||||
async def crawl_with_strategy(url: str, query: str, strategy: str, **kwargs):
|
||||
"""Helper function to crawl with a specific strategy"""
|
||||
config = AdaptiveConfig(
|
||||
strategy=strategy,
|
||||
max_pages=20,
|
||||
top_k_links=3,
|
||||
min_gain_threshold=0.05,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
adaptive = AdaptiveCrawler(crawler, config)
|
||||
|
||||
start_time = time.time()
|
||||
result = await adaptive.digest(start_url=url, query=query)
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
return {
|
||||
'result': result,
|
||||
'crawler': adaptive,
|
||||
'elapsed': elapsed,
|
||||
'pages': len(result.crawled_urls),
|
||||
'confidence': adaptive.confidence
|
||||
}
|
||||
|
||||
|
||||
async def main():
|
||||
"""Compare embedding and statistical strategies"""
|
||||
|
||||
# Test scenarios
|
||||
test_cases = [
|
||||
{
|
||||
'name': 'Technical Documentation (Specific Terms)',
|
||||
'url': 'https://docs.python.org/3/library/asyncio.html',
|
||||
'query': 'asyncio.create_task event_loop.run_until_complete'
|
||||
},
|
||||
{
|
||||
'name': 'Conceptual Query (Semantic Understanding)',
|
||||
'url': 'https://docs.python.org/3/library/asyncio.html',
|
||||
'query': 'concurrent programming patterns'
|
||||
},
|
||||
{
|
||||
'name': 'Ambiguous Query',
|
||||
'url': 'https://realpython.com',
|
||||
'query': 'python performance optimization'
|
||||
}
|
||||
]
|
||||
|
||||
# Configure embedding strategy
|
||||
embedding_config = {}
|
||||
if os.getenv('OPENAI_API_KEY'):
|
||||
embedding_config['embedding_llm_config'] = {
|
||||
'provider': 'openai/text-embedding-3-small',
|
||||
'api_token': os.getenv('OPENAI_API_KEY')
|
||||
}
|
||||
|
||||
for test in test_cases:
|
||||
print("\n" + "="*70)
|
||||
print(f"TEST: {test['name']}")
|
||||
print(f"URL: {test['url']}")
|
||||
print(f"Query: '{test['query']}'")
|
||||
print("="*70)
|
||||
|
||||
# Run statistical strategy
|
||||
print("\n📊 Statistical Strategy:")
|
||||
stat_result = await crawl_with_strategy(
|
||||
test['url'],
|
||||
test['query'],
|
||||
'statistical'
|
||||
)
|
||||
|
||||
print(f" Pages crawled: {stat_result['pages']}")
|
||||
print(f" Time taken: {stat_result['elapsed']:.2f}s")
|
||||
print(f" Confidence: {stat_result['confidence']:.1%}")
|
||||
print(f" Sufficient: {'Yes' if stat_result['crawler'].is_sufficient else 'No'}")
|
||||
|
||||
# Show term coverage
|
||||
if hasattr(stat_result['result'], 'term_frequencies'):
|
||||
query_terms = test['query'].lower().split()
|
||||
covered = sum(1 for term in query_terms
|
||||
if term in stat_result['result'].term_frequencies)
|
||||
print(f" Term coverage: {covered}/{len(query_terms)} query terms found")
|
||||
|
||||
# Run embedding strategy
|
||||
print("\n🧠 Embedding Strategy:")
|
||||
emb_result = await crawl_with_strategy(
|
||||
test['url'],
|
||||
test['query'],
|
||||
'embedding',
|
||||
**embedding_config
|
||||
)
|
||||
|
||||
print(f" Pages crawled: {emb_result['pages']}")
|
||||
print(f" Time taken: {emb_result['elapsed']:.2f}s")
|
||||
print(f" Confidence: {emb_result['confidence']:.1%}")
|
||||
print(f" Sufficient: {'Yes' if emb_result['crawler'].is_sufficient else 'No'}")
|
||||
|
||||
# Show semantic understanding
|
||||
if emb_result['result'].expanded_queries:
|
||||
print(f" Query variations: {len(emb_result['result'].expanded_queries)}")
|
||||
print(f" Semantic gaps: {len(emb_result['result'].semantic_gaps)}")
|
||||
|
||||
# Compare results
|
||||
print("\n📈 Comparison:")
|
||||
efficiency_diff = ((stat_result['pages'] - emb_result['pages']) /
|
||||
stat_result['pages'] * 100) if stat_result['pages'] > 0 else 0
|
||||
|
||||
print(f" Efficiency: ", end="")
|
||||
if efficiency_diff > 0:
|
||||
print(f"Embedding used {efficiency_diff:.0f}% fewer pages")
|
||||
else:
|
||||
print(f"Statistical used {-efficiency_diff:.0f}% fewer pages")
|
||||
|
||||
print(f" Speed: ", end="")
|
||||
if stat_result['elapsed'] < emb_result['elapsed']:
|
||||
print(f"Statistical was {emb_result['elapsed']/stat_result['elapsed']:.1f}x faster")
|
||||
else:
|
||||
print(f"Embedding was {stat_result['elapsed']/emb_result['elapsed']:.1f}x faster")
|
||||
|
||||
print(f" Confidence difference: {abs(stat_result['confidence'] - emb_result['confidence'])*100:.0f} percentage points")
|
||||
|
||||
# Recommendation
|
||||
print("\n💡 Recommendation:")
|
||||
if 'specific' in test['name'].lower() or all(len(term) > 5 for term in test['query'].split()):
|
||||
print(" → Statistical strategy is likely better for this use case (specific terms)")
|
||||
elif 'conceptual' in test['name'].lower() or 'semantic' in test['name'].lower():
|
||||
print(" → Embedding strategy is likely better for this use case (semantic understanding)")
|
||||
else:
|
||||
if emb_result['confidence'] > stat_result['confidence'] + 0.1:
|
||||
print(" → Embedding strategy achieved significantly better understanding")
|
||||
elif stat_result['elapsed'] < emb_result['elapsed'] / 2:
|
||||
print(" → Statistical strategy is much faster with similar results")
|
||||
else:
|
||||
print(" → Both strategies performed similarly; choose based on your priorities")
|
||||
|
||||
# Summary recommendations
|
||||
print("\n" + "="*70)
|
||||
print("STRATEGY SELECTION GUIDE")
|
||||
print("="*70)
|
||||
print("\n✅ Use STATISTICAL strategy when:")
|
||||
print(" - Queries contain specific technical terms")
|
||||
print(" - Speed is critical")
|
||||
print(" - No API access available")
|
||||
print(" - Working with well-structured documentation")
|
||||
|
||||
print("\n✅ Use EMBEDDING strategy when:")
|
||||
print(" - Queries are conceptual or ambiguous")
|
||||
print(" - Semantic understanding is important")
|
||||
print(" - Need to detect irrelevant content")
|
||||
print(" - Working with diverse content sources")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
232
docs/examples/adaptive_crawling/export_import_kb.py
Normal file
232
docs/examples/adaptive_crawling/export_import_kb.py
Normal file
@@ -0,0 +1,232 @@
|
||||
"""
|
||||
Knowledge Base Export and Import
|
||||
|
||||
This example demonstrates how to export crawled knowledge bases and
|
||||
import them for reuse, sharing, or analysis.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from pathlib import Path
|
||||
from crawl4ai import AsyncWebCrawler, AdaptiveCrawler, AdaptiveConfig
|
||||
|
||||
|
||||
async def build_knowledge_base():
|
||||
"""Build a knowledge base about web technologies"""
|
||||
print("="*60)
|
||||
print("PHASE 1: Building Knowledge Base")
|
||||
print("="*60)
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
adaptive = AdaptiveCrawler(crawler)
|
||||
|
||||
# Crawl information about HTTP
|
||||
print("\n1. Gathering HTTP protocol information...")
|
||||
await adaptive.digest(
|
||||
start_url="https://httpbin.org",
|
||||
query="http methods headers status codes"
|
||||
)
|
||||
print(f" - Pages crawled: {len(adaptive.state.crawled_urls)}")
|
||||
print(f" - Confidence: {adaptive.confidence:.2%}")
|
||||
|
||||
# Add more information about APIs
|
||||
print("\n2. Adding API documentation knowledge...")
|
||||
await adaptive.digest(
|
||||
start_url="https://httpbin.org/anything",
|
||||
query="rest api json response request"
|
||||
)
|
||||
print(f" - Total pages: {len(adaptive.state.crawled_urls)}")
|
||||
print(f" - Confidence: {adaptive.confidence:.2%}")
|
||||
|
||||
# Export the knowledge base
|
||||
export_path = "web_tech_knowledge.jsonl"
|
||||
print(f"\n3. Exporting knowledge base to {export_path}")
|
||||
adaptive.export_knowledge_base(export_path)
|
||||
|
||||
# Show export statistics
|
||||
export_size = Path(export_path).stat().st_size / 1024
|
||||
with open(export_path, 'r') as f:
|
||||
line_count = sum(1 for _ in f)
|
||||
|
||||
print(f" - Exported {line_count} documents")
|
||||
print(f" - File size: {export_size:.1f} KB")
|
||||
|
||||
return export_path
|
||||
|
||||
|
||||
async def analyze_knowledge_base(kb_path):
|
||||
"""Analyze the exported knowledge base"""
|
||||
print("\n" + "="*60)
|
||||
print("PHASE 2: Analyzing Exported Knowledge Base")
|
||||
print("="*60)
|
||||
|
||||
# Read and analyze JSONL
|
||||
documents = []
|
||||
with open(kb_path, 'r') as f:
|
||||
for line in f:
|
||||
documents.append(json.loads(line))
|
||||
|
||||
print(f"\nKnowledge base contains {len(documents)} documents:")
|
||||
|
||||
# Analyze document properties
|
||||
total_content_length = 0
|
||||
urls_by_domain = {}
|
||||
|
||||
for doc in documents:
|
||||
# Content analysis
|
||||
content_length = len(doc.get('content', ''))
|
||||
total_content_length += content_length
|
||||
|
||||
# URL analysis
|
||||
url = doc.get('url', '')
|
||||
domain = url.split('/')[2] if url.startswith('http') else 'unknown'
|
||||
urls_by_domain[domain] = urls_by_domain.get(domain, 0) + 1
|
||||
|
||||
# Show sample document
|
||||
if documents.index(doc) == 0:
|
||||
print(f"\nSample document structure:")
|
||||
print(f" - URL: {url}")
|
||||
print(f" - Content length: {content_length} chars")
|
||||
print(f" - Has metadata: {'metadata' in doc}")
|
||||
print(f" - Has links: {len(doc.get('links', []))} links")
|
||||
print(f" - Query: {doc.get('query', 'N/A')}")
|
||||
|
||||
print(f"\nContent statistics:")
|
||||
print(f" - Total content: {total_content_length:,} characters")
|
||||
print(f" - Average per document: {total_content_length/len(documents):,.0f} chars")
|
||||
|
||||
print(f"\nDomain distribution:")
|
||||
for domain, count in urls_by_domain.items():
|
||||
print(f" - {domain}: {count} pages")
|
||||
|
||||
|
||||
async def import_and_continue():
|
||||
"""Import a knowledge base and continue crawling"""
|
||||
print("\n" + "="*60)
|
||||
print("PHASE 3: Importing and Extending Knowledge Base")
|
||||
print("="*60)
|
||||
|
||||
kb_path = "web_tech_knowledge.jsonl"
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
# Create new adaptive crawler
|
||||
adaptive = AdaptiveCrawler(crawler)
|
||||
|
||||
# Import existing knowledge base
|
||||
print(f"\n1. Importing knowledge base from {kb_path}")
|
||||
adaptive.import_knowledge_base(kb_path)
|
||||
|
||||
print(f" - Imported {len(adaptive.state.knowledge_base)} documents")
|
||||
print(f" - Existing URLs: {len(adaptive.state.crawled_urls)}")
|
||||
|
||||
# Check current state
|
||||
print("\n2. Checking imported knowledge state:")
|
||||
adaptive.print_stats(detailed=False)
|
||||
|
||||
# Continue crawling with new query
|
||||
print("\n3. Extending knowledge with new query...")
|
||||
await adaptive.digest(
|
||||
start_url="https://httpbin.org/status/200",
|
||||
query="error handling retry timeout"
|
||||
)
|
||||
|
||||
print("\n4. Final knowledge base state:")
|
||||
adaptive.print_stats(detailed=False)
|
||||
|
||||
# Export extended knowledge base
|
||||
extended_path = "web_tech_knowledge_extended.jsonl"
|
||||
adaptive.export_knowledge_base(extended_path)
|
||||
print(f"\n5. Extended knowledge base exported to {extended_path}")
|
||||
|
||||
|
||||
async def share_knowledge_bases():
|
||||
"""Demonstrate sharing knowledge bases between projects"""
|
||||
print("\n" + "="*60)
|
||||
print("PHASE 4: Sharing Knowledge Between Projects")
|
||||
print("="*60)
|
||||
|
||||
# Simulate two different projects
|
||||
project_a_kb = "project_a_knowledge.jsonl"
|
||||
project_b_kb = "project_b_knowledge.jsonl"
|
||||
|
||||
async with AsyncWebCrawler(verbose=False) as crawler:
|
||||
# Project A: Security documentation
|
||||
print("\n1. Project A: Building security knowledge...")
|
||||
crawler_a = AdaptiveCrawler(crawler)
|
||||
await crawler_a.digest(
|
||||
start_url="https://httpbin.org/basic-auth/user/pass",
|
||||
query="authentication security headers"
|
||||
)
|
||||
crawler_a.export_knowledge_base(project_a_kb)
|
||||
print(f" - Exported {len(crawler_a.state.knowledge_base)} documents")
|
||||
|
||||
# Project B: API testing
|
||||
print("\n2. Project B: Building testing knowledge...")
|
||||
crawler_b = AdaptiveCrawler(crawler)
|
||||
await crawler_b.digest(
|
||||
start_url="https://httpbin.org/anything",
|
||||
query="testing endpoints mocking"
|
||||
)
|
||||
crawler_b.export_knowledge_base(project_b_kb)
|
||||
print(f" - Exported {len(crawler_b.state.knowledge_base)} documents")
|
||||
|
||||
# Merge knowledge bases
|
||||
print("\n3. Merging knowledge bases...")
|
||||
merged_crawler = AdaptiveCrawler(crawler)
|
||||
|
||||
# Import both knowledge bases
|
||||
merged_crawler.import_knowledge_base(project_a_kb)
|
||||
initial_size = len(merged_crawler.state.knowledge_base)
|
||||
|
||||
merged_crawler.import_knowledge_base(project_b_kb)
|
||||
final_size = len(merged_crawler.state.knowledge_base)
|
||||
|
||||
print(f" - Project A documents: {initial_size}")
|
||||
print(f" - Additional from Project B: {final_size - initial_size}")
|
||||
print(f" - Total merged documents: {final_size}")
|
||||
|
||||
# Export merged knowledge
|
||||
merged_kb = "merged_knowledge.jsonl"
|
||||
merged_crawler.export_knowledge_base(merged_kb)
|
||||
print(f"\n4. Merged knowledge base exported to {merged_kb}")
|
||||
|
||||
# Show combined coverage
|
||||
print("\n5. Combined knowledge coverage:")
|
||||
merged_crawler.print_stats(detailed=False)
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all examples"""
|
||||
try:
|
||||
# Build initial knowledge base
|
||||
kb_path = await build_knowledge_base()
|
||||
|
||||
# Analyze the export
|
||||
await analyze_knowledge_base(kb_path)
|
||||
|
||||
# Import and extend
|
||||
await import_and_continue()
|
||||
|
||||
# Demonstrate sharing
|
||||
await share_knowledge_bases()
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("All examples completed successfully!")
|
||||
print("="*60)
|
||||
|
||||
finally:
|
||||
# Clean up generated files
|
||||
print("\nCleaning up generated files...")
|
||||
for file in [
|
||||
"web_tech_knowledge.jsonl",
|
||||
"web_tech_knowledge_extended.jsonl",
|
||||
"project_a_knowledge.jsonl",
|
||||
"project_b_knowledge.jsonl",
|
||||
"merged_knowledge.jsonl"
|
||||
]:
|
||||
Path(file).unlink(missing_ok=True)
|
||||
print("Cleanup complete.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -5,7 +5,7 @@ prices, ratings, and other details using CSS selectors.
|
||||
"""
|
||||
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||
import json
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ prices, ratings, and other details using CSS selectors.
|
||||
"""
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||
import json
|
||||
from playwright.async_api import Page, BrowserContext
|
||||
|
||||
@@ -5,7 +5,7 @@ prices, ratings, and other details using CSS selectors.
|
||||
"""
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig
|
||||
import json
|
||||
|
||||
|
||||
BIN
docs/examples/assets/instagram_grid_result.png
Normal file
BIN
docs/examples/assets/instagram_grid_result.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.6 MiB |
132
docs/examples/assets/virtual_scroll_append_only.html
Normal file
132
docs/examples/assets/virtual_scroll_append_only.html
Normal file
@@ -0,0 +1,132 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Append-Only Scroll (Traditional Infinite Scroll)</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
h1 {
|
||||
color: #333;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.posts-container {
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
background: white;
|
||||
border: 1px solid #ddd;
|
||||
border-radius: 5px;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
.post {
|
||||
background: #f9f9f9;
|
||||
padding: 15px;
|
||||
margin-bottom: 15px;
|
||||
border-radius: 5px;
|
||||
border: 1px solid #eee;
|
||||
}
|
||||
|
||||
.post-title {
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
color: #2c3e50;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.post-content {
|
||||
color: #555;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
.loading {
|
||||
text-align: center;
|
||||
padding: 20px;
|
||||
color: #888;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Traditional Infinite Scroll Demo</h1>
|
||||
<p style="text-align: center; color: #666;">This appends new content without removing old content</p>
|
||||
<div class="posts-container"></div>
|
||||
|
||||
<script>
|
||||
// Traditional infinite scroll - APPENDS content
|
||||
const container = document.querySelector('.posts-container');
|
||||
const totalPosts = 200;
|
||||
const postsPerPage = 20;
|
||||
let loadedPosts = 0;
|
||||
let isLoading = false;
|
||||
|
||||
// Generate fake post data
|
||||
function generatePost(index) {
|
||||
return {
|
||||
id: index,
|
||||
title: `Post Title #${index + 1}`,
|
||||
content: `This is the content of post ${index + 1}. In traditional infinite scroll, new content is appended to existing content. The DOM keeps growing. Post ID: ${index}`
|
||||
};
|
||||
}
|
||||
|
||||
// Load more posts - APPENDS to existing content
|
||||
function loadMorePosts() {
|
||||
if (isLoading || loadedPosts >= totalPosts) return;
|
||||
|
||||
isLoading = true;
|
||||
|
||||
// Show loading indicator
|
||||
const loadingDiv = document.createElement('div');
|
||||
loadingDiv.className = 'loading';
|
||||
loadingDiv.textContent = 'Loading more posts...';
|
||||
container.appendChild(loadingDiv);
|
||||
|
||||
// Simulate network delay
|
||||
setTimeout(() => {
|
||||
// Remove loading indicator
|
||||
container.removeChild(loadingDiv);
|
||||
|
||||
// Add new posts
|
||||
const fragment = document.createDocumentFragment();
|
||||
const endIndex = Math.min(loadedPosts + postsPerPage, totalPosts);
|
||||
|
||||
for (let i = loadedPosts; i < endIndex; i++) {
|
||||
const post = generatePost(i);
|
||||
const postElement = document.createElement('div');
|
||||
postElement.className = 'post';
|
||||
postElement.setAttribute('data-post-id', post.id);
|
||||
postElement.innerHTML = `
|
||||
<div class="post-title">${post.title}</div>
|
||||
<div class="post-content">${post.content}</div>
|
||||
`;
|
||||
fragment.appendChild(postElement);
|
||||
}
|
||||
|
||||
// APPEND new posts to existing ones
|
||||
container.appendChild(fragment);
|
||||
loadedPosts = endIndex;
|
||||
isLoading = false;
|
||||
|
||||
console.log(`Loaded ${loadedPosts} of ${totalPosts} posts`);
|
||||
}, 300);
|
||||
}
|
||||
|
||||
// Initial load
|
||||
loadMorePosts();
|
||||
|
||||
// Load more on scroll
|
||||
window.addEventListener('scroll', () => {
|
||||
const scrollBottom = window.innerHeight + window.scrollY;
|
||||
const threshold = document.body.offsetHeight - 500;
|
||||
|
||||
if (scrollBottom >= threshold) {
|
||||
loadMorePosts();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
158
docs/examples/assets/virtual_scroll_instagram_grid.html
Normal file
158
docs/examples/assets/virtual_scroll_instagram_grid.html
Normal file
@@ -0,0 +1,158 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Instagram-like Grid Virtual Scroll</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Helvetica, Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
background-color: #fafafa;
|
||||
}
|
||||
|
||||
h1 {
|
||||
text-align: center;
|
||||
color: #262626;
|
||||
font-weight: 300;
|
||||
}
|
||||
|
||||
.feed-container {
|
||||
max-width: 935px;
|
||||
margin: 0 auto;
|
||||
height: 800px;
|
||||
overflow-y: auto;
|
||||
background: white;
|
||||
border: 1px solid #dbdbdb;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
gap: 28px;
|
||||
padding: 28px;
|
||||
}
|
||||
|
||||
.post {
|
||||
aspect-ratio: 1;
|
||||
background: #f0f0f0;
|
||||
border-radius: 3px;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.post:hover .overlay {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.post img {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
object-fit: cover;
|
||||
}
|
||||
|
||||
.overlay {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background: rgba(0, 0, 0, 0.3);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: white;
|
||||
font-size: 14px;
|
||||
opacity: 0;
|
||||
transition: opacity 0.2s;
|
||||
}
|
||||
|
||||
.stats {
|
||||
display: flex;
|
||||
gap: 20px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Instagram Grid Virtual Scroll</h1>
|
||||
<p style="text-align: center; color: #8e8e8e;">Grid layout with virtual scrolling - only visible rows are rendered</p>
|
||||
<div class="feed-container">
|
||||
<div class="grid" id="grid"></div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Instagram-like grid virtual scroll
|
||||
const grid = document.getElementById('grid');
|
||||
const container = document.querySelector('.feed-container');
|
||||
const totalPosts = 999; // Instagram style count
|
||||
const postsPerRow = 3;
|
||||
const rowsPerPage = 4; // 12 posts per page
|
||||
const postsPerPage = postsPerRow * rowsPerPage;
|
||||
let currentStartIndex = 0;
|
||||
|
||||
// Generate fake Instagram post data
|
||||
const allPosts = [];
|
||||
for (let i = 0; i < totalPosts; i++) {
|
||||
allPosts.push({
|
||||
id: i,
|
||||
likes: Math.floor(Math.random() * 10000),
|
||||
comments: Math.floor(Math.random() * 500),
|
||||
imageNumber: (i % 10) + 1 // Cycle through 10 placeholder images
|
||||
});
|
||||
}
|
||||
|
||||
// Render grid - REPLACES content for performance
|
||||
function renderGrid(startIndex) {
|
||||
const posts = [];
|
||||
const endIndex = Math.min(startIndex + postsPerPage, totalPosts);
|
||||
|
||||
for (let i = startIndex; i < endIndex; i++) {
|
||||
const post = allPosts[i];
|
||||
posts.push(`
|
||||
<div class="post" data-post-id="${post.id}">
|
||||
<img src="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='400' height='400'%3E%3Crect width='400' height='400' fill='%23${Math.floor(Math.random()*16777215).toString(16)}'/%3E%3Ctext x='50%25' y='50%25' text-anchor='middle' dy='.3em' font-family='Arial' font-size='48' fill='white'%3E${post.id + 1}%3C/text%3E%3C/svg%3E" alt="Post ${post.id + 1}">
|
||||
<div class="overlay">
|
||||
<div class="stats">
|
||||
<span>❤️ ${post.likes.toLocaleString()}</span>
|
||||
<span>💬 ${post.comments}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`);
|
||||
}
|
||||
|
||||
// REPLACE grid content (virtual scroll)
|
||||
grid.innerHTML = posts.join('');
|
||||
currentStartIndex = startIndex;
|
||||
}
|
||||
|
||||
// Initial render
|
||||
renderGrid(0);
|
||||
|
||||
// Handle scroll
|
||||
let scrollTimeout;
|
||||
container.addEventListener('scroll', () => {
|
||||
clearTimeout(scrollTimeout);
|
||||
scrollTimeout = setTimeout(() => {
|
||||
const scrollTop = container.scrollTop;
|
||||
const scrollHeight = container.scrollHeight;
|
||||
const clientHeight = container.clientHeight;
|
||||
|
||||
// Calculate which "page" we should show
|
||||
const scrollPercentage = scrollTop / (scrollHeight - clientHeight);
|
||||
const targetIndex = Math.floor(scrollPercentage * (totalPosts - postsPerPage) / postsPerPage) * postsPerPage;
|
||||
|
||||
// When scrolled to bottom, show next page
|
||||
if (scrollTop + clientHeight >= scrollHeight - 100) {
|
||||
const nextIndex = currentStartIndex + postsPerPage;
|
||||
if (nextIndex < totalPosts) {
|
||||
renderGrid(nextIndex);
|
||||
container.scrollTop = 100; // Reset scroll for continuous experience
|
||||
}
|
||||
}
|
||||
}, 50);
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
210
docs/examples/assets/virtual_scroll_news_feed.html
Normal file
210
docs/examples/assets/virtual_scroll_news_feed.html
Normal file
@@ -0,0 +1,210 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>News Feed with Mixed Scroll Behavior</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: Georgia, serif;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
background-color: #f8f8f8;
|
||||
}
|
||||
|
||||
h1 {
|
||||
text-align: center;
|
||||
color: #1a1a1a;
|
||||
font-size: 32px;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.description {
|
||||
text-align: center;
|
||||
color: #666;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
#newsContainer {
|
||||
max-width: 900px;
|
||||
margin: 0 auto;
|
||||
height: 700px;
|
||||
overflow-y: auto;
|
||||
background: white;
|
||||
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
.article {
|
||||
margin-bottom: 30px;
|
||||
padding-bottom: 30px;
|
||||
border-bottom: 1px solid #e0e0e0;
|
||||
}
|
||||
|
||||
.article:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
.article-header {
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
.category {
|
||||
display: inline-block;
|
||||
background: #ff6b6b;
|
||||
color: white;
|
||||
padding: 4px 12px;
|
||||
font-size: 12px;
|
||||
text-transform: uppercase;
|
||||
border-radius: 3px;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.headline {
|
||||
font-size: 24px;
|
||||
font-weight: bold;
|
||||
color: #1a1a1a;
|
||||
margin: 10px 0;
|
||||
line-height: 1.3;
|
||||
}
|
||||
|
||||
.meta {
|
||||
color: #888;
|
||||
font-size: 14px;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
.content {
|
||||
font-size: 16px;
|
||||
line-height: 1.8;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.featured {
|
||||
background: #fff9e6;
|
||||
padding: 20px;
|
||||
border-radius: 5px;
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
||||
.featured .category {
|
||||
background: #ffa500;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>📰 Dynamic News Feed</h1>
|
||||
<p class="description">Mixed behavior: Featured articles stay, regular articles use virtual scroll</p>
|
||||
<div id="newsContainer"></div>
|
||||
|
||||
<script>
|
||||
const container = document.getElementById('newsContainer');
|
||||
const totalArticles = 100;
|
||||
const articlesPerPage = 5;
|
||||
let currentRegularIndex = 0;
|
||||
|
||||
// Categories for variety
|
||||
const categories = ['Politics', 'Technology', 'Business', 'Science', 'Sports', 'Entertainment'];
|
||||
|
||||
// Generate article data
|
||||
const featuredArticles = [];
|
||||
const regularArticles = [];
|
||||
|
||||
// 3 featured articles that always stay
|
||||
for (let i = 0; i < 3; i++) {
|
||||
featuredArticles.push({
|
||||
id: `featured-${i}`,
|
||||
category: 'Featured',
|
||||
headline: `Breaking: Major Story ${i + 1} That Stays Visible`,
|
||||
date: new Date().toLocaleDateString(),
|
||||
content: `This is featured article ${i + 1}. Featured articles remain in the DOM and are not replaced during scrolling. They provide important persistent content.`
|
||||
});
|
||||
}
|
||||
|
||||
// Regular articles that get virtualized
|
||||
for (let i = 0; i < totalArticles; i++) {
|
||||
regularArticles.push({
|
||||
id: `article-${i}`,
|
||||
category: categories[i % categories.length],
|
||||
headline: `${categories[i % categories.length]} News: Article ${i + 1} of ${totalArticles}`,
|
||||
date: new Date(Date.now() - i * 86400000).toLocaleDateString(),
|
||||
content: `This is regular article ${i + 1}. These articles are replaced as you scroll to maintain performance. Only a subset is shown at any time. Article ID: ${i}`
|
||||
});
|
||||
}
|
||||
|
||||
// Render articles - Featured stay, regular ones are replaced
|
||||
function renderArticles(regularStartIndex) {
|
||||
const html = [];
|
||||
|
||||
// Always show featured articles
|
||||
featuredArticles.forEach(article => {
|
||||
html.push(`
|
||||
<div class="article featured" data-article-id="${article.id}">
|
||||
<div class="article-header">
|
||||
<span class="category">${article.category}</span>
|
||||
<h2 class="headline">${article.headline}</h2>
|
||||
<div class="meta">📅 ${article.date}</div>
|
||||
</div>
|
||||
<div class="content">${article.content}</div>
|
||||
</div>
|
||||
`);
|
||||
});
|
||||
|
||||
// Add divider
|
||||
html.push('<div style="text-align: center; color: #999; margin: 20px 0;">— Latest News —</div>');
|
||||
|
||||
// Show current page of regular articles (virtual scroll)
|
||||
const endIndex = Math.min(regularStartIndex + articlesPerPage, totalArticles);
|
||||
for (let i = regularStartIndex; i < endIndex; i++) {
|
||||
const article = regularArticles[i];
|
||||
html.push(`
|
||||
<div class="article" data-article-id="${article.id}">
|
||||
<div class="article-header">
|
||||
<span class="category" style="background: ${getCategoryColor(article.category)}">${article.category}</span>
|
||||
<h2 class="headline">${article.headline}</h2>
|
||||
<div class="meta">📅 ${article.date}</div>
|
||||
</div>
|
||||
<div class="content">${article.content}</div>
|
||||
</div>
|
||||
`);
|
||||
}
|
||||
|
||||
container.innerHTML = html.join('');
|
||||
currentRegularIndex = regularStartIndex;
|
||||
}
|
||||
|
||||
function getCategoryColor(category) {
|
||||
const colors = {
|
||||
'Politics': '#e74c3c',
|
||||
'Technology': '#3498db',
|
||||
'Business': '#2ecc71',
|
||||
'Science': '#9b59b6',
|
||||
'Sports': '#f39c12',
|
||||
'Entertainment': '#e91e63'
|
||||
};
|
||||
return colors[category] || '#95a5a6';
|
||||
}
|
||||
|
||||
// Initial render
|
||||
renderArticles(0);
|
||||
|
||||
// Handle scroll
|
||||
container.addEventListener('scroll', () => {
|
||||
const scrollTop = container.scrollTop;
|
||||
const scrollHeight = container.scrollHeight;
|
||||
const clientHeight = container.clientHeight;
|
||||
|
||||
// When near bottom, load next page of regular articles
|
||||
if (scrollTop + clientHeight >= scrollHeight - 200) {
|
||||
const nextIndex = currentRegularIndex + articlesPerPage;
|
||||
if (nextIndex < totalArticles) {
|
||||
renderArticles(nextIndex);
|
||||
// Scroll to where regular articles start
|
||||
const regularStart = document.querySelector('.article:not(.featured)');
|
||||
if (regularStart) {
|
||||
container.scrollTop = regularStart.offsetTop - 100;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
122
docs/examples/assets/virtual_scroll_twitter_like.html
Normal file
122
docs/examples/assets/virtual_scroll_twitter_like.html
Normal file
@@ -0,0 +1,122 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Twitter-like Virtual Scroll</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
background-color: #f0f2f5;
|
||||
}
|
||||
|
||||
h1 {
|
||||
color: #1da1f2;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#timeline {
|
||||
max-width: 600px;
|
||||
margin: 0 auto;
|
||||
height: 600px;
|
||||
overflow-y: auto;
|
||||
background: white;
|
||||
border: 1px solid #e1e8ed;
|
||||
border-radius: 10px;
|
||||
}
|
||||
|
||||
.tweet {
|
||||
padding: 15px;
|
||||
border-bottom: 1px solid #e1e8ed;
|
||||
min-height: 80px;
|
||||
}
|
||||
|
||||
.tweet:hover {
|
||||
background-color: #f7f9fa;
|
||||
}
|
||||
|
||||
.author {
|
||||
font-weight: bold;
|
||||
color: #14171a;
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
.content {
|
||||
color: #14171a;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.stats {
|
||||
color: #657786;
|
||||
font-size: 14px;
|
||||
margin-top: 10px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Virtual Scroll Demo - Twitter Style</h1>
|
||||
<p style="text-align: center; color: #666;">This simulates Twitter's timeline where content is replaced as you scroll</p>
|
||||
<div id="timeline"></div>
|
||||
|
||||
<script>
|
||||
// Simulate Twitter-like virtual scrolling where DOM elements are replaced
|
||||
const timeline = document.getElementById('timeline');
|
||||
const totalTweets = 500;
|
||||
const tweetsPerPage = 10;
|
||||
let currentIndex = 0;
|
||||
|
||||
// Generate fake tweet data
|
||||
const allTweets = [];
|
||||
for (let i = 0; i < totalTweets; i++) {
|
||||
allTweets.push({
|
||||
id: i,
|
||||
author: `User_${i + 1}`,
|
||||
content: `This is tweet #${i + 1} of ${totalTweets}. Virtual scrolling replaces DOM elements to maintain performance. Unique content ID: ${i}`,
|
||||
likes: Math.floor(Math.random() * 1000),
|
||||
retweets: Math.floor(Math.random() * 500)
|
||||
});
|
||||
}
|
||||
|
||||
// Render tweets - REPLACES content
|
||||
function renderTweets(startIndex) {
|
||||
const tweets = [];
|
||||
const endIndex = Math.min(startIndex + tweetsPerPage, totalTweets);
|
||||
|
||||
for (let i = startIndex; i < endIndex; i++) {
|
||||
const tweet = allTweets[i];
|
||||
tweets.push(`
|
||||
<div class="tweet" data-tweet-id="${tweet.id}">
|
||||
<div class="author">@${tweet.author}</div>
|
||||
<div class="content">${tweet.content}</div>
|
||||
<div class="stats">❤️ ${tweet.likes} | 🔁 ${tweet.retweets}</div>
|
||||
</div>
|
||||
`);
|
||||
}
|
||||
|
||||
// REPLACE entire content (virtual scroll behavior)
|
||||
timeline.innerHTML = tweets.join('');
|
||||
currentIndex = startIndex;
|
||||
}
|
||||
|
||||
// Initial render
|
||||
renderTweets(0);
|
||||
|
||||
// Handle scroll
|
||||
timeline.addEventListener('scroll', () => {
|
||||
const scrollTop = timeline.scrollTop;
|
||||
const scrollHeight = timeline.scrollHeight;
|
||||
const clientHeight = timeline.clientHeight;
|
||||
|
||||
// When near bottom, load next page
|
||||
if (scrollTop + clientHeight >= scrollHeight - 100) {
|
||||
const nextIndex = currentIndex + tweetsPerPage;
|
||||
if (nextIndex < totalTweets) {
|
||||
renderTweets(nextIndex);
|
||||
// Small scroll adjustment for continuous scrolling
|
||||
timeline.scrollTop = 50;
|
||||
}
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
171
docs/examples/c4a_script/amazon_example/README.md
Normal file
171
docs/examples/c4a_script/amazon_example/README.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# Amazon R2D2 Product Search Example
|
||||
|
||||
A real-world demonstration of Crawl4AI's multi-step crawling with LLM-generated automation scripts.
|
||||
|
||||
## 🎯 What This Example Shows
|
||||
|
||||
This example demonstrates advanced Crawl4AI features:
|
||||
- **LLM-Generated Scripts**: Automatically create C4A-Script from HTML snippets
|
||||
- **Multi-Step Crawling**: Navigate through multiple pages using session persistence
|
||||
- **Structured Data Extraction**: Extract product data using JSON CSS schemas
|
||||
- **Visual Automation**: Watch the browser perform the search (headless=False)
|
||||
|
||||
## 🚀 How It Works
|
||||
|
||||
### 1. **Script Generation Phase**
|
||||
The example uses `C4ACompiler.generate_script()` to analyze Amazon's HTML and create:
|
||||
- **Search Script**: Automates filling the search box and clicking search
|
||||
- **Extraction Schema**: Defines how to extract product information
|
||||
|
||||
### 2. **Crawling Workflow**
|
||||
```
|
||||
Homepage → Execute Search Script → Extract Products → Save Results
|
||||
```
|
||||
|
||||
All steps use the same `session_id` to maintain browser state.
|
||||
|
||||
### 3. **Data Extraction**
|
||||
Products are extracted with:
|
||||
- Title, price, rating, reviews
|
||||
- Delivery information
|
||||
- Sponsored/Small Business badges
|
||||
- Direct product URLs
|
||||
|
||||
## 📁 Files
|
||||
|
||||
- `amazon_r2d2_search.py` - Main example script
|
||||
- `header.html` - Amazon search bar HTML (provided)
|
||||
- `product.html` - Product card HTML (provided)
|
||||
- **Generated files:**
|
||||
- `generated_search_script.c4a` - Auto-generated search automation
|
||||
- `generated_product_schema.json` - Auto-generated extraction rules
|
||||
- `extracted_products.json` - Final scraped data
|
||||
- `search_results_screenshot.png` - Visual proof of results
|
||||
|
||||
## 🏃 Running the Example
|
||||
|
||||
1. **Prerequisites**
|
||||
```bash
|
||||
# Ensure Crawl4AI is installed
|
||||
pip install crawl4ai
|
||||
|
||||
# Set up LLM API key (for script generation)
|
||||
export OPENAI_API_KEY="your-key-here"
|
||||
```
|
||||
|
||||
2. **Run the scraper**
|
||||
```bash
|
||||
python amazon_r2d2_search.py
|
||||
```
|
||||
|
||||
3. **Watch the magic!**
|
||||
- Browser window opens (not headless)
|
||||
- Navigates to Amazon.com
|
||||
- Searches for "r2d2"
|
||||
- Extracts all products
|
||||
- Saves results to JSON
|
||||
|
||||
## 📊 Sample Output
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"title": "Death Star BB8 R2D2 Golf Balls with 20 Printed tees",
|
||||
"price": "29.95",
|
||||
"rating": "4.7",
|
||||
"reviews_count": "184",
|
||||
"delivery": "FREE delivery Thu, Jun 19",
|
||||
"url": "https://www.amazon.com/Death-Star-R2D2-Balls-Printed/dp/B081XSYZMS",
|
||||
"is_sponsored": true,
|
||||
"small_business": true
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
## 🔍 Key Features Demonstrated
|
||||
|
||||
### Session Persistence
|
||||
```python
|
||||
# Same session_id across multiple arun() calls
|
||||
config = CrawlerRunConfig(
|
||||
session_id="amazon_r2d2_session",
|
||||
# ... other settings
|
||||
)
|
||||
```
|
||||
|
||||
### LLM Script Generation
|
||||
```python
|
||||
# Generate automation from natural language + HTML
|
||||
script = C4ACompiler.generate_script(
|
||||
html=header_html,
|
||||
query="Find search box, type 'r2d2', click search",
|
||||
mode="c4a"
|
||||
)
|
||||
```
|
||||
|
||||
### JSON CSS Extraction
|
||||
```python
|
||||
# Structured data extraction with CSS selectors
|
||||
schema = {
|
||||
"baseSelector": "[data-component-type='s-search-result']",
|
||||
"fields": [
|
||||
{"name": "title", "selector": "h2 a span", "type": "text"},
|
||||
{"name": "price", "selector": ".a-price-whole", "type": "text"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## 🛠️ Customization
|
||||
|
||||
### Search Different Products
|
||||
Change the search term in the script generation:
|
||||
```python
|
||||
search_goal = """
|
||||
...
|
||||
3. Type "star wars lego" into the search box
|
||||
...
|
||||
"""
|
||||
```
|
||||
|
||||
### Extract More Data
|
||||
Add fields to the extraction schema:
|
||||
```python
|
||||
"fields": [
|
||||
# ... existing fields
|
||||
{"name": "prime", "selector": ".s-prime", "type": "exists"},
|
||||
{"name": "image_url", "selector": "img.s-image", "type": "attribute", "attribute": "src"}
|
||||
]
|
||||
```
|
||||
|
||||
### Use Different Sites
|
||||
Adapt the approach for other e-commerce sites by:
|
||||
1. Providing their HTML snippets
|
||||
2. Adjusting the search goals
|
||||
3. Updating the extraction schema
|
||||
|
||||
## 🎓 Learning Points
|
||||
|
||||
1. **No Manual Scripting**: LLM generates all automation code
|
||||
2. **Session Management**: Maintain state across page navigations
|
||||
3. **Robust Extraction**: Handle dynamic content and multiple products
|
||||
4. **Error Handling**: Graceful fallbacks if generation fails
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
- **"No products found"**: Check if Amazon's HTML structure changed
|
||||
- **"Script generation failed"**: Ensure LLM API key is configured
|
||||
- **"Page timeout"**: Increase wait times in the config
|
||||
- **"Session lost"**: Ensure same session_id is used consistently
|
||||
|
||||
## 📚 Next Steps
|
||||
|
||||
- Try searching for different products
|
||||
- Add pagination to get more results
|
||||
- Extract product details pages
|
||||
- Compare prices across different sellers
|
||||
- Build a price monitoring system
|
||||
|
||||
---
|
||||
|
||||
This example shows the power of combining LLM intelligence with web automation. The scripts adapt to HTML changes and natural language instructions make automation accessible to everyone!
|
||||
202
docs/examples/c4a_script/amazon_example/amazon_r2d2_search.py
Normal file
202
docs/examples/c4a_script/amazon_example/amazon_r2d2_search.py
Normal file
@@ -0,0 +1,202 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Amazon R2D2 Product Search Example using Crawl4AI
|
||||
|
||||
This example demonstrates:
|
||||
1. Using LLM to generate C4A-Script from HTML snippets
|
||||
2. Multi-step crawling with session persistence
|
||||
3. JSON CSS extraction for structured product data
|
||||
4. Complete workflow: homepage → search → extract products
|
||||
|
||||
Requirements:
|
||||
- Crawl4AI with generate_script support
|
||||
- LLM API key (configured in environment)
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
from crawl4ai.script.c4a_compile import C4ACompiler
|
||||
|
||||
|
||||
class AmazonR2D2Scraper:
|
||||
def __init__(self):
|
||||
self.base_dir = Path(__file__).parent
|
||||
self.search_script_path = self.base_dir / "generated_search_script.js"
|
||||
self.schema_path = self.base_dir / "generated_product_schema.json"
|
||||
self.results_path = self.base_dir / "extracted_products.json"
|
||||
self.session_id = "amazon_r2d2_session"
|
||||
|
||||
async def generate_search_script(self) -> str:
|
||||
"""Generate JavaScript for Amazon search interaction"""
|
||||
print("🔧 Generating search script from header.html...")
|
||||
|
||||
# Check if already generated
|
||||
if self.search_script_path.exists():
|
||||
print("✅ Using cached search script")
|
||||
return self.search_script_path.read_text()
|
||||
|
||||
# Read the header HTML
|
||||
header_html = (self.base_dir / "header.html").read_text()
|
||||
|
||||
# Generate script using LLM
|
||||
search_goal = """
|
||||
Find the search box and search button, then:
|
||||
1. Wait for the search box to be visible
|
||||
2. Click on the search box to focus it
|
||||
3. Clear any existing text
|
||||
4. Type "r2d2" into the search box
|
||||
5. Click the search submit button
|
||||
6. Wait for navigation to complete and search results to appear
|
||||
"""
|
||||
|
||||
try:
|
||||
script = C4ACompiler.generate_script(
|
||||
html=header_html,
|
||||
query=search_goal,
|
||||
mode="js"
|
||||
)
|
||||
|
||||
# Save for future use
|
||||
self.search_script_path.write_text(script)
|
||||
print("✅ Search script generated and saved!")
|
||||
print(f"📄 Script:\n{script}")
|
||||
return script
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error generating search script: {e}")
|
||||
|
||||
|
||||
async def generate_product_schema(self) -> Dict[str, Any]:
|
||||
"""Generate JSON CSS extraction schema from product HTML"""
|
||||
print("\n🔧 Generating product extraction schema...")
|
||||
|
||||
# Check if already generated
|
||||
if self.schema_path.exists():
|
||||
print("✅ Using cached extraction schema")
|
||||
return json.loads(self.schema_path.read_text())
|
||||
|
||||
# Read the product HTML
|
||||
product_html = (self.base_dir / "product.html").read_text()
|
||||
|
||||
# Generate extraction schema using LLM
|
||||
schema_goal = """
|
||||
Create a JSON CSS extraction schema to extract:
|
||||
- Product title (from the h2 element)
|
||||
- Price (the dollar amount)
|
||||
- Rating (star rating value)
|
||||
- Number of reviews
|
||||
- Delivery information
|
||||
- Product URL (from the main product link)
|
||||
- Whether it's sponsored
|
||||
- Small business badge if present
|
||||
|
||||
The schema should handle multiple products on a search results page.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Generate JavaScript that returns the schema
|
||||
schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=product_html,
|
||||
query=schema_goal,
|
||||
)
|
||||
|
||||
# Save for future use
|
||||
self.schema_path.write_text(json.dumps(schema, indent=2))
|
||||
print("✅ Extraction schema generated and saved!")
|
||||
print(f"📄 Schema fields: {[f['name'] for f in schema['fields']]}")
|
||||
return schema
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error generating schema: {e}")
|
||||
|
||||
async def crawl_amazon(self):
|
||||
"""Main crawling logic with 2 calls using same session"""
|
||||
print("\n🚀 Starting Amazon R2D2 product search...")
|
||||
|
||||
# Generate scripts and schemas
|
||||
search_script = await self.generate_search_script()
|
||||
product_schema = await self.generate_product_schema()
|
||||
|
||||
# Configure browser (headless=False to see the action)
|
||||
browser_config = BrowserConfig(
|
||||
headless=False,
|
||||
verbose=True,
|
||||
viewport_width=1920,
|
||||
viewport_height=1080
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
print("\n📍 Step 1: Navigate to Amazon and search for R2D2")
|
||||
|
||||
# FIRST CALL: Navigate to Amazon and execute search
|
||||
search_config = CrawlerRunConfig(
|
||||
session_id=self.session_id,
|
||||
js_code= f"(() => {{ {search_script} }})()", # Execute generated JS
|
||||
wait_for=".s-search-results", # Wait for search results
|
||||
extraction_strategy=JsonCssExtractionStrategy(schema=product_schema),
|
||||
delay_before_return_html=3.0 # Give time for results to load
|
||||
)
|
||||
|
||||
results = await crawler.arun(
|
||||
url="https://www.amazon.com",
|
||||
config=search_config
|
||||
)
|
||||
|
||||
if not results.success:
|
||||
print("❌ Failed to search Amazon")
|
||||
print(f"Error: {results.error_message}")
|
||||
return
|
||||
|
||||
print("✅ Search completed successfully!")
|
||||
print("✅ Product extraction completed!")
|
||||
|
||||
# Extract and save results
|
||||
print("\n📍 Extracting product data")
|
||||
|
||||
if results[0].extracted_content:
|
||||
products = json.loads(results[0].extracted_content)
|
||||
print(f"🔍 Found {len(products)} products in search results")
|
||||
|
||||
print(f"✅ Extracted {len(products)} R2D2 products")
|
||||
|
||||
# Save results
|
||||
self.results_path.write_text(
|
||||
json.dumps(products, indent=2)
|
||||
)
|
||||
print(f"💾 Results saved to: {self.results_path}")
|
||||
|
||||
# Print sample results
|
||||
print("\n📊 Sample Results:")
|
||||
for i, product in enumerate(products[:3], 1):
|
||||
print(f"\n{i}. {product['title'][:60]}...")
|
||||
print(f" Price: ${product['price']}")
|
||||
print(f" Rating: {product['rating']} ({product['number_of_reviews']} reviews)")
|
||||
print(f" {'🏪 Small Business' if product['small_business_badge'] else ''}")
|
||||
print(f" {'📢 Sponsored' if product['sponsored'] else ''}")
|
||||
|
||||
else:
|
||||
print("❌ No products extracted")
|
||||
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run the Amazon scraper"""
|
||||
scraper = AmazonR2D2Scraper()
|
||||
await scraper.crawl_amazon()
|
||||
|
||||
print("\n🎉 Amazon R2D2 search example completed!")
|
||||
print("Check the generated files:")
|
||||
print(" - generated_search_script.js")
|
||||
print(" - generated_product_schema.json")
|
||||
print(" - extracted_products.json")
|
||||
print(" - search_results_screenshot.png")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user