Merge branch 'main' of https://github.com/unclecode/crawl4ai
This commit is contained in:
357
docs/examples/docker_example.py
Normal file
357
docs/examples/docker_example.py
Normal file
@@ -0,0 +1,357 @@
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
import base64
|
||||
import os
|
||||
from typing import Dict, Any
|
||||
|
||||
class Crawl4AiTester:
|
||||
def __init__(self, base_url: str = "http://localhost:11235", api_token: str = None):
|
||||
self.base_url = base_url
|
||||
self.api_token = api_token or os.getenv('CRAWL4AI_API_TOKEN') or "test_api_code" # Check environment variable as fallback
|
||||
self.headers = {'Authorization': f'Bearer {self.api_token}'} if self.api_token else {}
|
||||
|
||||
def submit_and_wait(self, request_data: Dict[str, Any], timeout: int = 300) -> Dict[str, Any]:
|
||||
# Submit crawl job
|
||||
response = requests.post(f"{self.base_url}/crawl", json=request_data, headers=self.headers)
|
||||
if response.status_code == 403:
|
||||
raise Exception("API token is invalid or missing")
|
||||
task_id = response.json()["task_id"]
|
||||
print(f"Task ID: {task_id}")
|
||||
|
||||
# Poll for result
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if time.time() - start_time > timeout:
|
||||
raise TimeoutError(f"Task {task_id} did not complete within {timeout} seconds")
|
||||
|
||||
result = requests.get(f"{self.base_url}/task/{task_id}", headers=self.headers)
|
||||
status = result.json()
|
||||
|
||||
if status["status"] == "failed":
|
||||
print("Task failed:", status.get("error"))
|
||||
raise Exception(f"Task failed: {status.get('error')}")
|
||||
|
||||
if status["status"] == "completed":
|
||||
return status
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
def submit_sync(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
response = requests.post(f"{self.base_url}/crawl_sync", json=request_data, headers=self.headers, timeout=60)
|
||||
if response.status_code == 408:
|
||||
raise TimeoutError("Task did not complete within server timeout")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def crawl_direct(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Directly crawl without using task queue"""
|
||||
response = requests.post(
|
||||
f"{self.base_url}/crawl_direct",
|
||||
json=request_data,
|
||||
headers=self.headers
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def test_docker_deployment(version="basic"):
|
||||
tester = Crawl4AiTester(
|
||||
base_url="http://localhost:11235" ,
|
||||
# base_url="https://api.crawl4ai.com" # just for example
|
||||
# api_token="test" # just for example
|
||||
)
|
||||
print(f"Testing Crawl4AI Docker {version} version")
|
||||
|
||||
# Health check with timeout and retry
|
||||
max_retries = 5
|
||||
for i in range(max_retries):
|
||||
try:
|
||||
health = requests.get(f"{tester.base_url}/health", timeout=10)
|
||||
print("Health check:", health.json())
|
||||
break
|
||||
except requests.exceptions.RequestException as e:
|
||||
if i == max_retries - 1:
|
||||
print(f"Failed to connect after {max_retries} attempts")
|
||||
sys.exit(1)
|
||||
print(f"Waiting for service to start (attempt {i+1}/{max_retries})...")
|
||||
time.sleep(5)
|
||||
|
||||
# Test cases based on version
|
||||
test_basic_crawl_direct(tester)
|
||||
test_basic_crawl(tester)
|
||||
test_basic_crawl(tester)
|
||||
test_basic_crawl_sync(tester)
|
||||
|
||||
if version in ["full", "transformer"]:
|
||||
test_cosine_extraction(tester)
|
||||
|
||||
test_js_execution(tester)
|
||||
test_css_selector(tester)
|
||||
test_structured_extraction(tester)
|
||||
test_llm_extraction(tester)
|
||||
test_llm_with_ollama(tester)
|
||||
test_screenshot(tester)
|
||||
|
||||
|
||||
def test_basic_crawl(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Basic Crawl ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10,
|
||||
"session_id": "test"
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print(f"Basic crawl result length: {len(result['result']['markdown'])}")
|
||||
assert result["result"]["success"]
|
||||
assert len(result["result"]["markdown"]) > 0
|
||||
|
||||
def test_basic_crawl_sync(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Basic Crawl (Sync) ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10,
|
||||
"session_id": "test"
|
||||
}
|
||||
|
||||
result = tester.submit_sync(request)
|
||||
print(f"Basic crawl result length: {len(result['result']['markdown'])}")
|
||||
assert result['status'] == 'completed'
|
||||
assert result['result']['success']
|
||||
assert len(result['result']['markdown']) > 0
|
||||
|
||||
def test_basic_crawl_direct(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Basic Crawl (Direct) ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10,
|
||||
# "session_id": "test"
|
||||
"cache_mode": "bypass" # or "enabled", "disabled", "read_only", "write_only"
|
||||
}
|
||||
|
||||
result = tester.crawl_direct(request)
|
||||
print(f"Basic crawl result length: {len(result['result']['markdown'])}")
|
||||
assert result['result']['success']
|
||||
assert len(result['result']['markdown']) > 0
|
||||
|
||||
def test_js_execution(tester: Crawl4AiTester):
|
||||
print("\n=== Testing JS Execution ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 8,
|
||||
"js_code": [
|
||||
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
|
||||
],
|
||||
"wait_for": "article.tease-card:nth-child(10)",
|
||||
"crawler_params": {
|
||||
"headless": True
|
||||
}
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print(f"JS execution result length: {len(result['result']['markdown'])}")
|
||||
assert result["result"]["success"]
|
||||
|
||||
def test_css_selector(tester: Crawl4AiTester):
|
||||
print("\n=== Testing CSS Selector ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 7,
|
||||
"css_selector": ".wide-tease-item__description",
|
||||
"crawler_params": {
|
||||
"headless": True
|
||||
},
|
||||
"extra": {"word_count_threshold": 10}
|
||||
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print(f"CSS selector result length: {len(result['result']['markdown'])}")
|
||||
assert result["result"]["success"]
|
||||
|
||||
def test_structured_extraction(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Structured Extraction ===")
|
||||
schema = {
|
||||
"name": "Coinbase Crypto Prices",
|
||||
"baseSelector": ".cds-tableRow-t45thuk",
|
||||
"fields": [
|
||||
{
|
||||
"name": "crypto",
|
||||
"selector": "td:nth-child(1) h2",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "symbol",
|
||||
"selector": "td:nth-child(1) p",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": "td:nth-child(2)",
|
||||
"type": "text",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
request = {
|
||||
"urls": "https://www.coinbase.com/explore",
|
||||
"priority": 9,
|
||||
"extraction_config": {
|
||||
"type": "json_css",
|
||||
"params": {
|
||||
"schema": schema
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print(f"Extracted {len(extracted)} items")
|
||||
print("Sample item:", json.dumps(extracted[0], indent=2))
|
||||
assert result["result"]["success"]
|
||||
assert len(extracted) > 0
|
||||
|
||||
def test_llm_extraction(tester: Crawl4AiTester):
|
||||
print("\n=== Testing LLM Extraction ===")
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the OpenAI model."
|
||||
},
|
||||
"input_fee": {
|
||||
"type": "string",
|
||||
"description": "Fee for input token for the OpenAI model."
|
||||
},
|
||||
"output_fee": {
|
||||
"type": "string",
|
||||
"description": "Fee for output token for the OpenAI model."
|
||||
}
|
||||
},
|
||||
"required": ["model_name", "input_fee", "output_fee"]
|
||||
}
|
||||
|
||||
request = {
|
||||
"urls": "https://openai.com/api/pricing",
|
||||
"priority": 8,
|
||||
"extraction_config": {
|
||||
"type": "llm",
|
||||
"params": {
|
||||
"provider": "openai/gpt-4o-mini",
|
||||
"api_token": os.getenv("OPENAI_API_KEY"),
|
||||
"schema": schema,
|
||||
"extraction_type": "schema",
|
||||
"instruction": """From the crawled content, extract all mentioned model names along with their fees for input and output tokens."""
|
||||
}
|
||||
},
|
||||
"crawler_params": {"word_count_threshold": 1}
|
||||
}
|
||||
|
||||
try:
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print(f"Extracted {len(extracted)} model pricing entries")
|
||||
print("Sample entry:", json.dumps(extracted[0], indent=2))
|
||||
assert result["result"]["success"]
|
||||
except Exception as e:
|
||||
print(f"LLM extraction test failed (might be due to missing API key): {str(e)}")
|
||||
|
||||
def test_llm_with_ollama(tester: Crawl4AiTester):
|
||||
print("\n=== Testing LLM with Ollama ===")
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"article_title": {
|
||||
"type": "string",
|
||||
"description": "The main title of the news article"
|
||||
},
|
||||
"summary": {
|
||||
"type": "string",
|
||||
"description": "A brief summary of the article content"
|
||||
},
|
||||
"main_topics": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Main topics or themes discussed in the article"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 8,
|
||||
"extraction_config": {
|
||||
"type": "llm",
|
||||
"params": {
|
||||
"provider": "ollama/llama2",
|
||||
"schema": schema,
|
||||
"extraction_type": "schema",
|
||||
"instruction": "Extract the main article information including title, summary, and main topics."
|
||||
}
|
||||
},
|
||||
"extra": {"word_count_threshold": 1},
|
||||
"crawler_params": {"verbose": True}
|
||||
}
|
||||
|
||||
try:
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print("Extracted content:", json.dumps(extracted, indent=2))
|
||||
assert result["result"]["success"]
|
||||
except Exception as e:
|
||||
print(f"Ollama extraction test failed: {str(e)}")
|
||||
|
||||
def test_cosine_extraction(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Cosine Extraction ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 8,
|
||||
"extraction_config": {
|
||||
"type": "cosine",
|
||||
"params": {
|
||||
"semantic_filter": "business finance economy",
|
||||
"word_count_threshold": 10,
|
||||
"max_dist": 0.2,
|
||||
"top_k": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
result = tester.submit_and_wait(request)
|
||||
extracted = json.loads(result["result"]["extracted_content"])
|
||||
print(f"Extracted {len(extracted)} text clusters")
|
||||
print("First cluster tags:", extracted[0]["tags"])
|
||||
assert result["result"]["success"]
|
||||
except Exception as e:
|
||||
print(f"Cosine extraction test failed: {str(e)}")
|
||||
|
||||
def test_screenshot(tester: Crawl4AiTester):
|
||||
print("\n=== Testing Screenshot ===")
|
||||
request = {
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 5,
|
||||
"screenshot": True,
|
||||
"crawler_params": {
|
||||
"headless": True
|
||||
}
|
||||
}
|
||||
|
||||
result = tester.submit_and_wait(request)
|
||||
print("Screenshot captured:", bool(result["result"]["screenshot"]))
|
||||
|
||||
if result["result"]["screenshot"]:
|
||||
# Save screenshot
|
||||
screenshot_data = base64.b64decode(result["result"]["screenshot"])
|
||||
with open("test_screenshot.jpg", "wb") as f:
|
||||
f.write(screenshot_data)
|
||||
print("Screenshot saved as test_screenshot.jpg")
|
||||
|
||||
assert result["result"]["success"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
version = sys.argv[1] if len(sys.argv) > 1 else "basic"
|
||||
# version = "full"
|
||||
test_docker_deployment(version)
|
||||
@@ -13,7 +13,9 @@ import re
|
||||
from typing import Dict, List
|
||||
from bs4 import BeautifulSoup
|
||||
from pydantic import BaseModel, Field
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from crawl4ai.content_filter_strategy import BM25ContentFilter, PruningContentFilter
|
||||
from crawl4ai.extraction_strategy import (
|
||||
JsonCssExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
@@ -30,7 +32,7 @@ print("Website: https://crawl4ai.com")
|
||||
async def simple_crawl():
|
||||
print("\n--- Basic Usage ---")
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(url="https://www.nbcnews.com/business")
|
||||
result = await crawler.arun(url="https://www.nbcnews.com/business", cache_mode= CacheMode.BYPASS)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
async def simple_example_with_running_js_code():
|
||||
@@ -51,7 +53,7 @@ async def simple_example_with_running_js_code():
|
||||
url="https://www.nbcnews.com/business",
|
||||
js_code=js_code,
|
||||
# wait_for=wait_for,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
@@ -61,7 +63,7 @@ async def simple_example_with_css_selector():
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
css_selector=".wide-tease-item__description",
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
@@ -71,19 +73,20 @@ async def use_proxy():
|
||||
"Note: Replace 'http://your-proxy-url:port' with a working proxy to run this example."
|
||||
)
|
||||
# Uncomment and modify the following lines to use a proxy
|
||||
# async with AsyncWebCrawler(verbose=True, proxy="http://your-proxy-url:port") as crawler:
|
||||
# result = await crawler.arun(
|
||||
# url="https://www.nbcnews.com/business",
|
||||
# bypass_cache=True
|
||||
# )
|
||||
# print(result.markdown[:500]) # Print first 500 characters
|
||||
async with AsyncWebCrawler(verbose=True, proxy="http://your-proxy-url:port") as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
cache_mode= CacheMode.BYPASS
|
||||
)
|
||||
if result.success:
|
||||
print(result.markdown[:500]) # Print first 500 characters
|
||||
|
||||
async def capture_and_save_screenshot(url: str, output_path: str):
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
screenshot=True,
|
||||
bypass_cache=True
|
||||
cache_mode= CacheMode.BYPASS
|
||||
)
|
||||
|
||||
if result.success and result.screenshot:
|
||||
@@ -132,48 +135,75 @@ async def extract_structured_data_using_llm(provider: str, api_token: str = None
|
||||
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}.""",
|
||||
extra_args=extra_args
|
||||
),
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
print(result.extracted_content)
|
||||
|
||||
async def extract_structured_data_using_css_extractor():
|
||||
print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---")
|
||||
schema = {
|
||||
"name": "Coinbase Crypto Prices",
|
||||
"baseSelector": ".cds-tableRow-t45thuk",
|
||||
"fields": [
|
||||
{
|
||||
"name": "crypto",
|
||||
"selector": "td:nth-child(1) h2",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "symbol",
|
||||
"selector": "td:nth-child(1) p",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": "td:nth-child(2)",
|
||||
"type": "text",
|
||||
"name": "KidoCode Courses",
|
||||
"baseSelector": "section.charge-methodology .w-tab-content > div",
|
||||
"fields": [
|
||||
{
|
||||
"name": "section_title",
|
||||
"selector": "h3.heading-50",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "section_description",
|
||||
"selector": ".charge-content",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_name",
|
||||
"selector": ".text-block-93",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_description",
|
||||
"selector": ".course-content-text",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_icon",
|
||||
"selector": ".image-92",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async with AsyncWebCrawler(
|
||||
headless=True,
|
||||
verbose=True
|
||||
) as crawler:
|
||||
|
||||
# Create the JavaScript that handles clicking multiple times
|
||||
js_click_tabs = """
|
||||
(async () => {
|
||||
const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");
|
||||
|
||||
for(let tab of tabs) {
|
||||
// scroll to the tab
|
||||
tab.scrollIntoView();
|
||||
tab.click();
|
||||
// Wait for content to load and animations to complete
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
}
|
||||
],
|
||||
}
|
||||
})();
|
||||
"""
|
||||
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.coinbase.com/explore",
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True,
|
||||
url="https://www.kidocode.com/degrees/technology",
|
||||
extraction_strategy=JsonCssExtractionStrategy(schema, verbose=True),
|
||||
js_code=[js_click_tabs],
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
assert result.success, "Failed to crawl the page"
|
||||
|
||||
news_teasers = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(news_teasers)} news teasers")
|
||||
print(json.dumps(news_teasers[0], indent=2))
|
||||
companies = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(companies)} companies")
|
||||
print(json.dumps(companies[0], indent=2))
|
||||
|
||||
# Advanced Session-Based Crawling with Dynamic Content 🔄
|
||||
async def crawl_dynamic_content_pages_method_1():
|
||||
@@ -213,7 +243,7 @@ async def crawl_dynamic_content_pages_method_1():
|
||||
session_id=session_id,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
js=js_next_page if page > 0 else None,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
js_only=page > 0,
|
||||
headless=False,
|
||||
)
|
||||
@@ -282,7 +312,7 @@ async def crawl_dynamic_content_pages_method_2():
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=js_next_page_and_wait if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
@@ -343,7 +373,7 @@ async def crawl_dynamic_content_pages_method_3():
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
wait_for=wait_for if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
@@ -361,21 +391,21 @@ async def crawl_custom_browser_type():
|
||||
# Use Firefox
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(browser_type="firefox", verbose=True, headless = True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
result = await crawler.arun(url="https://www.example.com", cache_mode= CacheMode.BYPASS)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
# Use WebKit
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(browser_type="webkit", verbose=True, headless = True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
result = await crawler.arun(url="https://www.example.com", cache_mode= CacheMode.BYPASS)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
# Use Chromium (default)
|
||||
start = time.time()
|
||||
async with AsyncWebCrawler(verbose=True, headless = True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
result = await crawler.arun(url="https://www.example.com", cache_mode= CacheMode.BYPASS)
|
||||
print(result.markdown[:500])
|
||||
print("Time taken: ", time.time() - start)
|
||||
|
||||
@@ -384,7 +414,7 @@ async def crawl_with_user_simultion():
|
||||
url = "YOUR-URL-HERE"
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
magic = True, # Automatically detects and removes overlays, popups, and other elements that block content
|
||||
# simulate_user = True,# Causes a series of random mouse movements and clicks to simulate user interaction
|
||||
# override_navigator = True # Overrides the navigator object to make it look like a real user
|
||||
@@ -408,7 +438,7 @@ async def speed_comparison():
|
||||
params={'formats': ['markdown', 'html']}
|
||||
)
|
||||
end = time.time()
|
||||
print("Firecrawl (simulated):")
|
||||
print("Firecrawl:")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(scrape_status['markdown'])} characters")
|
||||
print(f"Images found: {scrape_status['markdown'].count('cldnry.s-nbcnews.com')}")
|
||||
@@ -420,7 +450,7 @@ async def speed_comparison():
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
word_count_threshold=0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
@@ -430,6 +460,26 @@ async def speed_comparison():
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Crawl4AI with advanced content filtering
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
word_count_threshold=0,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter = PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
|
||||
# content_filter=BM25ContentFilter(user_query=None, bm25_threshold=1.0)
|
||||
),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (Markdown Plus):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown_v2.raw_markdown)} characters")
|
||||
print(f"Fit Markdown: {len(result.markdown_v2.fit_markdown)} characters")
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
print()
|
||||
|
||||
# Crawl4AI with JavaScript execution
|
||||
start = time.time()
|
||||
result = await crawler.arun(
|
||||
@@ -438,13 +488,18 @@ async def speed_comparison():
|
||||
"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"
|
||||
],
|
||||
word_count_threshold=0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter = PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
|
||||
# content_filter=BM25ContentFilter(user_query=None, bm25_threshold=1.0)
|
||||
),
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
print("Crawl4AI (with JavaScript execution):")
|
||||
print(f"Time taken: {end - start:.2f} seconds")
|
||||
print(f"Content length: {len(result.markdown)} characters")
|
||||
print(f"Fit Markdown: {len(result.markdown_v2.fit_markdown)} characters")
|
||||
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
|
||||
|
||||
print("\nNote on Speed Comparison:")
|
||||
@@ -483,7 +538,7 @@ async def generate_knowledge_graph():
|
||||
url = "https://paulgraham.com/love.html"
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=extraction_strategy,
|
||||
# magic=True
|
||||
)
|
||||
@@ -492,19 +547,50 @@ async def generate_knowledge_graph():
|
||||
f.write(result.extracted_content)
|
||||
|
||||
async def fit_markdown_remove_overlay():
|
||||
async with AsyncWebCrawler(headless = False) as crawler:
|
||||
url = "https://janineintheworld.com/places-to-visit-in-central-mexico"
|
||||
async with AsyncWebCrawler(
|
||||
headless=True, # Set to False to see what is happening
|
||||
verbose=True,
|
||||
user_agent_mode="random",
|
||||
user_agent_generator_config={
|
||||
"device_type": "mobile",
|
||||
"os_type": "android"
|
||||
},
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
bypass_cache=True,
|
||||
word_count_threshold = 10,
|
||||
remove_overlay_elements=True,
|
||||
screenshot = True
|
||||
url='https://www.kidocode.com/degrees/technology',
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0),
|
||||
options={
|
||||
"ignore_links": True
|
||||
}
|
||||
),
|
||||
# markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=BM25ContentFilter(user_query=None, bm25_threshold=1.0),
|
||||
# options={
|
||||
# "ignore_links": True
|
||||
# }
|
||||
# ),
|
||||
)
|
||||
# Save markdown to file
|
||||
with open(os.path.join(__location__, "mexico_places.md"), "w") as f:
|
||||
f.write(result.fit_markdown)
|
||||
|
||||
|
||||
if result.success:
|
||||
print(len(result.markdown_v2.raw_markdown))
|
||||
print(len(result.markdown_v2.markdown_with_citations))
|
||||
print(len(result.markdown_v2.fit_markdown))
|
||||
|
||||
# Save clean html
|
||||
with open(os.path.join(__location__, "output/cleaned_html.html"), "w") as f:
|
||||
f.write(result.cleaned_html)
|
||||
|
||||
with open(os.path.join(__location__, "output/output_raw_markdown.md"), "w") as f:
|
||||
f.write(result.markdown_v2.raw_markdown)
|
||||
|
||||
with open(os.path.join(__location__, "output/output_markdown_with_citations.md"), "w") as f:
|
||||
f.write(result.markdown_v2.markdown_with_citations)
|
||||
|
||||
with open(os.path.join(__location__, "output/output_fit_markdown.md"), "w") as f:
|
||||
f.write(result.markdown_v2.fit_markdown)
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
@@ -512,25 +598,25 @@ async def main():
|
||||
await simple_crawl()
|
||||
await simple_example_with_running_js_code()
|
||||
await simple_example_with_css_selector()
|
||||
await use_proxy()
|
||||
# await use_proxy()
|
||||
await capture_and_save_screenshot("https://www.example.com", os.path.join(__location__, "tmp/example_screenshot.jpg"))
|
||||
await extract_structured_data_using_css_extractor()
|
||||
|
||||
# LLM extraction examples
|
||||
await extract_structured_data_using_llm()
|
||||
await extract_structured_data_using_llm("huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct", os.getenv("HUGGINGFACE_API_KEY"))
|
||||
# await extract_structured_data_using_llm()
|
||||
# await extract_structured_data_using_llm("huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct", os.getenv("HUGGINGFACE_API_KEY"))
|
||||
# await extract_structured_data_using_llm("ollama/llama3.2")
|
||||
await extract_structured_data_using_llm("openai/gpt-4o", os.getenv("OPENAI_API_KEY"))
|
||||
await extract_structured_data_using_llm("ollama/llama3.2")
|
||||
|
||||
# You always can pass custom headers to the extraction strategy
|
||||
custom_headers = {
|
||||
"Authorization": "Bearer your-custom-token",
|
||||
"X-Custom-Header": "Some-Value"
|
||||
}
|
||||
await extract_structured_data_using_llm(extra_headers=custom_headers)
|
||||
# custom_headers = {
|
||||
# "Authorization": "Bearer your-custom-token",
|
||||
# "X-Custom-Header": "Some-Value"
|
||||
# }
|
||||
# await extract_structured_data_using_llm(extra_headers=custom_headers)
|
||||
|
||||
# await crawl_dynamic_content_pages_method_1()
|
||||
# await crawl_dynamic_content_pages_method_2()
|
||||
await crawl_dynamic_content_pages_method_1()
|
||||
await crawl_dynamic_content_pages_method_2()
|
||||
await crawl_dynamic_content_pages_method_3()
|
||||
|
||||
await crawl_custom_browser_type()
|
||||
|
||||
277
docs/examples/v0.3.74.overview.py
Normal file
277
docs/examples/v0.3.74.overview.py
Normal file
@@ -0,0 +1,277 @@
|
||||
import os, sys
|
||||
# append the parent directory to the sys.path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
parent_parent_dir = os.path.dirname(parent_dir)
|
||||
sys.path.append(parent_parent_dir)
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
__data__ = os.path.join(__location__, "__data")
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
import aiohttp
|
||||
import json
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
from crawl4ai.content_filter_strategy import BM25ContentFilter
|
||||
|
||||
# 1. File Download Processing Example
|
||||
async def download_example():
|
||||
"""Example of downloading files from Python.org"""
|
||||
# downloads_path = os.path.join(os.getcwd(), "downloads")
|
||||
downloads_path = os.path.join(Path.home(), ".crawl4ai", "downloads")
|
||||
os.makedirs(downloads_path, exist_ok=True)
|
||||
|
||||
print(f"Downloads will be saved to: {downloads_path}")
|
||||
|
||||
async with AsyncWebCrawler(
|
||||
accept_downloads=True,
|
||||
downloads_path=downloads_path,
|
||||
verbose=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.python.org/downloads/",
|
||||
js_code="""
|
||||
// Find and click the first Windows installer link
|
||||
const downloadLink = document.querySelector('a[href$=".exe"]');
|
||||
if (downloadLink) {
|
||||
console.log('Found download link:', downloadLink.href);
|
||||
downloadLink.click();
|
||||
} else {
|
||||
console.log('No .exe download link found');
|
||||
}
|
||||
""",
|
||||
delay_before_return_html=1, # Wait 5 seconds to ensure download starts
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
if result.downloaded_files:
|
||||
print("\nDownload successful!")
|
||||
print("Downloaded files:")
|
||||
for file_path in result.downloaded_files:
|
||||
print(f"- {file_path}")
|
||||
print(f" File size: {os.path.getsize(file_path) / (1024*1024):.2f} MB")
|
||||
else:
|
||||
print("\nNo files were downloaded")
|
||||
|
||||
# 2. Local File and Raw HTML Processing Example
|
||||
async def local_and_raw_html_example():
|
||||
"""Example of processing local files and raw HTML"""
|
||||
# Create a sample HTML file
|
||||
sample_file = os.path.join(__data__, "sample.html")
|
||||
with open(sample_file, "w") as f:
|
||||
f.write("""
|
||||
<html><body>
|
||||
<h1>Test Content</h1>
|
||||
<p>This is a test paragraph.</p>
|
||||
</body></html>
|
||||
""")
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# Process local file
|
||||
local_result = await crawler.arun(
|
||||
url=f"file://{os.path.abspath(sample_file)}"
|
||||
)
|
||||
|
||||
# Process raw HTML
|
||||
raw_html = """
|
||||
<html><body>
|
||||
<h1>Raw HTML Test</h1>
|
||||
<p>This is a test of raw HTML processing.</p>
|
||||
</body></html>
|
||||
"""
|
||||
raw_result = await crawler.arun(
|
||||
url=f"raw:{raw_html}"
|
||||
)
|
||||
|
||||
# Clean up
|
||||
os.remove(sample_file)
|
||||
|
||||
print("Local file content:", local_result.markdown)
|
||||
print("\nRaw HTML content:", raw_result.markdown)
|
||||
|
||||
# 3. Enhanced Markdown Generation Example
|
||||
async def markdown_generation_example():
|
||||
"""Example of enhanced markdown generation with citations and LLM-friendly features"""
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# Create a content filter (optional)
|
||||
content_filter = BM25ContentFilter(
|
||||
# user_query="History and cultivation",
|
||||
bm25_threshold=1.0
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://en.wikipedia.org/wiki/Apple",
|
||||
css_selector="main div#bodyContent",
|
||||
content_filter=content_filter,
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.content_filter_strategy import BM25ContentFilter
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://en.wikipedia.org/wiki/Apple",
|
||||
css_selector="main div#bodyContent",
|
||||
content_filter=BM25ContentFilter()
|
||||
)
|
||||
print(result.markdown_v2.fit_markdown)
|
||||
|
||||
print("\nMarkdown Generation Results:")
|
||||
print(f"1. Original markdown length: {len(result.markdown)}")
|
||||
print(f"2. New markdown versions (markdown_v2):")
|
||||
print(f" - Raw markdown length: {len(result.markdown_v2.raw_markdown)}")
|
||||
print(f" - Citations markdown length: {len(result.markdown_v2.markdown_with_citations)}")
|
||||
print(f" - References section length: {len(result.markdown_v2.references_markdown)}")
|
||||
if result.markdown_v2.fit_markdown:
|
||||
print(f" - Filtered markdown length: {len(result.markdown_v2.fit_markdown)}")
|
||||
|
||||
# Save examples to files
|
||||
output_dir = os.path.join(__data__, "markdown_examples")
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Save different versions
|
||||
with open(os.path.join(output_dir, "1_raw_markdown.md"), "w") as f:
|
||||
f.write(result.markdown_v2.raw_markdown)
|
||||
|
||||
with open(os.path.join(output_dir, "2_citations_markdown.md"), "w") as f:
|
||||
f.write(result.markdown_v2.markdown_with_citations)
|
||||
|
||||
with open(os.path.join(output_dir, "3_references.md"), "w") as f:
|
||||
f.write(result.markdown_v2.references_markdown)
|
||||
|
||||
if result.markdown_v2.fit_markdown:
|
||||
with open(os.path.join(output_dir, "4_filtered_markdown.md"), "w") as f:
|
||||
f.write(result.markdown_v2.fit_markdown)
|
||||
|
||||
print(f"\nMarkdown examples saved to: {output_dir}")
|
||||
|
||||
# Show a sample of citations and references
|
||||
print("\nSample of markdown with citations:")
|
||||
print(result.markdown_v2.markdown_with_citations[:500] + "...\n")
|
||||
print("Sample of references:")
|
||||
print('\n'.join(result.markdown_v2.references_markdown.split('\n')[:10]) + "...")
|
||||
|
||||
# 4. Browser Management Example
|
||||
async def browser_management_example():
|
||||
"""Example of using enhanced browser management features"""
|
||||
# Use the specified user directory path
|
||||
user_data_dir = os.path.join(Path.home(), ".crawl4ai", "browser_profile")
|
||||
os.makedirs(user_data_dir, exist_ok=True)
|
||||
|
||||
print(f"Browser profile will be saved to: {user_data_dir}")
|
||||
|
||||
async with AsyncWebCrawler(
|
||||
use_managed_browser=True,
|
||||
user_data_dir=user_data_dir,
|
||||
headless=False,
|
||||
verbose=True
|
||||
) as crawler:
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://crawl4ai.com",
|
||||
# session_id="persistent_session_1",
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
# Use GitHub as an example - it's a good test for browser management
|
||||
# because it requires proper browser handling
|
||||
result = await crawler.arun(
|
||||
url="https://github.com/trending",
|
||||
# session_id="persistent_session_1",
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
print("\nBrowser session result:", result.success)
|
||||
if result.success:
|
||||
print("Page title:", result.metadata.get('title', 'No title found'))
|
||||
|
||||
# 5. API Usage Example
|
||||
async def api_example():
|
||||
"""Example of using the new API endpoints"""
|
||||
api_token = os.getenv('CRAWL4AI_API_TOKEN') or "test_api_code"
|
||||
headers = {'Authorization': f'Bearer {api_token}'}
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Submit crawl job
|
||||
crawl_request = {
|
||||
"urls": ["https://news.ycombinator.com"], # Hacker News as an example
|
||||
"extraction_config": {
|
||||
"type": "json_css",
|
||||
"params": {
|
||||
"schema": {
|
||||
"name": "Hacker News Articles",
|
||||
"baseSelector": ".athing",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".title a",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "score",
|
||||
"selector": ".score",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "url",
|
||||
"selector": ".title a",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"crawler_params": {
|
||||
"headless": True,
|
||||
# "use_managed_browser": True
|
||||
},
|
||||
"cache_mode": "bypass",
|
||||
# "screenshot": True,
|
||||
# "magic": True
|
||||
}
|
||||
|
||||
async with session.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json=crawl_request,
|
||||
headers=headers
|
||||
) as response:
|
||||
task_data = await response.json()
|
||||
task_id = task_data["task_id"]
|
||||
|
||||
# Check task status
|
||||
while True:
|
||||
async with session.get(
|
||||
f"http://localhost:11235/task/{task_id}",
|
||||
headers=headers
|
||||
) as status_response:
|
||||
result = await status_response.json()
|
||||
print(f"Task status: {result['status']}")
|
||||
|
||||
if result["status"] == "completed":
|
||||
print("Task completed!")
|
||||
print("Results:")
|
||||
news = json.loads(result["results"][0]['extracted_content'])
|
||||
print(json.dumps(news[:4], indent=2))
|
||||
break
|
||||
else:
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Main execution
|
||||
async def main():
|
||||
# print("Running Crawl4AI feature examples...")
|
||||
|
||||
# print("\n1. Running Download Example:")
|
||||
# await download_example()
|
||||
|
||||
# print("\n2. Running Markdown Generation Example:")
|
||||
# await markdown_generation_example()
|
||||
|
||||
# # print("\n3. Running Local and Raw HTML Example:")
|
||||
# await local_and_raw_html_example()
|
||||
|
||||
# # print("\n4. Running Browser Management Example:")
|
||||
await browser_management_example()
|
||||
|
||||
# print("\n5. Running API Example:")
|
||||
await api_example()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -18,7 +18,7 @@ Let's see how we can customize the AsyncWebCrawler using hooks! In this example,
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
|
||||
from playwright.async_api import Page, Browser
|
||||
from playwright.async_api import Page, Browser, BrowserContext
|
||||
|
||||
async def on_browser_created(browser: Browser):
|
||||
print("[HOOK] on_browser_created")
|
||||
@@ -71,7 +71,11 @@ from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
|
||||
async def main():
|
||||
print("\n🔗 Using Crawler Hooks: Let's see how we can customize the AsyncWebCrawler using hooks!")
|
||||
|
||||
crawler_strategy = AsyncPlaywrightCrawlerStrategy(verbose=True)
|
||||
initial_cookies = [
|
||||
{"name": "sessionId", "value": "abc123", "domain": ".example.com"},
|
||||
{"name": "userId", "value": "12345", "domain": ".example.com"}
|
||||
]
|
||||
crawler_strategy = AsyncPlaywrightCrawlerStrategy(verbose=True, cookies=initial_cookies)
|
||||
crawler_strategy.set_hook('on_browser_created', on_browser_created)
|
||||
crawler_strategy.set_hook('before_goto', before_goto)
|
||||
crawler_strategy.set_hook('after_goto', after_goto)
|
||||
|
||||
136
docs/md_v2/advanced/managed_browser.md
Normal file
136
docs/md_v2/advanced/managed_browser.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Content Filtering in Crawl4AI
|
||||
|
||||
This guide explains how to use content filtering strategies in Crawl4AI to extract the most relevant information from crawled web pages. You'll learn how to use the built-in `BM25ContentFilter` and how to create your own custom content filtering strategies.
|
||||
|
||||
## Relevance Content Filter
|
||||
|
||||
The `RelevanceContentFilter` is an abstract class that provides a common interface for content filtering strategies. Specific filtering algorithms, like `PruningContentFilter` or `BM25ContentFilter`, inherit from this class and implement the `filter_content` method. This method takes the HTML content as input and returns a list of filtered text blocks.
|
||||
|
||||
|
||||
## Pruning Content Filter
|
||||
|
||||
The `PruningContentFilter` is a tree-shaking algorithm that analyzes the HTML DOM structure and removes less relevant nodes based on various metrics like text density, link density, and tag importance. It evaluates each node using a composite scoring system and "prunes" nodes that fall below a certain threshold.
|
||||
|
||||
### Usage
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
||||
|
||||
async def filter_content(url):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
content_filter = PruningContentFilter(
|
||||
min_word_threshold=5,
|
||||
threshold_type='dynamic',
|
||||
threshold=0.45
|
||||
)
|
||||
result = await crawler.arun(url=url, extraction_strategy=content_filter, fit_markdown=True)
|
||||
if result.success:
|
||||
print(f"Cleaned Markdown:\n{result.fit_markdown}")
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- **`min_word_threshold`**: (Optional) Minimum number of words a node must contain to be considered relevant. Nodes with fewer words are automatically pruned.
|
||||
|
||||
- **`threshold_type`**: (Optional, default 'fixed') Controls how pruning thresholds are calculated:
|
||||
- `'fixed'`: Uses a constant threshold value for all nodes
|
||||
- `'dynamic'`: Adjusts threshold based on node characteristics like tag importance and text/link ratios
|
||||
|
||||
- **`threshold`**: (Optional, default 0.48) Base threshold value for node pruning:
|
||||
- For fixed threshold: Nodes scoring below this value are removed
|
||||
- For dynamic threshold: This value is adjusted based on node properties
|
||||
|
||||
### How It Works
|
||||
|
||||
The pruning algorithm evaluates each node using multiple metrics:
|
||||
- Text density: Ratio of actual text to overall node content
|
||||
- Link density: Proportion of text within links
|
||||
- Tag importance: Weight based on HTML tag type (e.g., article, p, div)
|
||||
- Content quality: Metrics like text length and structural importance
|
||||
|
||||
Nodes scoring below the threshold are removed, effectively "shaking" less relevant content from the DOM tree. This results in a cleaner document containing only the most relevant content blocks.
|
||||
|
||||
The algorithm is particularly effective for:
|
||||
- Removing boilerplate content
|
||||
- Eliminating navigation menus and sidebars
|
||||
- Preserving main article content
|
||||
- Maintaining document structure while removing noise
|
||||
|
||||
|
||||
## BM25 Algorithm
|
||||
|
||||
The `BM25ContentFilter` uses the BM25 algorithm, a ranking function used in information retrieval to estimate the relevance of documents to a given search query. In Crawl4AI, this algorithm helps to identify and extract text chunks that are most relevant to the page's metadata or a user-specified query.
|
||||
|
||||
### Usage
|
||||
|
||||
To use the `BM25ContentFilter`, initialize it and then pass it as the `extraction_strategy` parameter to the `arun` method of the crawler.
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.content_filter_strategy import BM25ContentFilter
|
||||
|
||||
async def filter_content(url, query=None):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
content_filter = BM25ContentFilter(user_query=query)
|
||||
result = await crawler.arun(url=url, extraction_strategy=content_filter, fit_markdown=True) # Set fit_markdown flag to True to trigger BM25 filtering
|
||||
if result.success:
|
||||
print(f"Filtered Content (JSON):\n{result.extracted_content}")
|
||||
print(f"\nFiltered Markdown:\n{result.fit_markdown}") # New field in CrawlResult object
|
||||
print(f"\nFiltered HTML:\n{result.fit_html}") # New field in CrawlResult object. Note that raw HTML may have tags re-organized due to internal parsing.
|
||||
else:
|
||||
print("Error:", result.error_message)
|
||||
|
||||
# Example usage:
|
||||
asyncio.run(filter_content("https://en.wikipedia.org/wiki/Apple", "fruit nutrition health")) # with query
|
||||
asyncio.run(filter_content("https://en.wikipedia.org/wiki/Apple")) # without query, metadata will be used as the query.
|
||||
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- **`user_query`**: (Optional) A string representing the search query. If not provided, the filter extracts relevant metadata (title, description, keywords) from the page and uses that as the query.
|
||||
- **`bm25_threshold`**: (Optional, default 1.0) A float value that controls the threshold for relevance. Higher values result in stricter filtering, returning only the most relevant text chunks. Lower values result in more lenient filtering.
|
||||
|
||||
|
||||
## Fit Markdown Flag
|
||||
|
||||
Setting the `fit_markdown` flag to `True` in the `arun` method activates the BM25 content filtering during the crawl. The `fit_markdown` parameter instructs the scraper to extract and clean the HTML, primarily to prepare for a Large Language Model that cannot process large amounts of data. Setting this flag not only improves the quality of the extracted content but also adds the filtered content to two new attributes in the returned `CrawlResult` object: `fit_markdown` and `fit_html`.
|
||||
|
||||
|
||||
## Custom Content Filtering Strategies
|
||||
|
||||
You can create your own custom filtering strategies by inheriting from the `RelevantContentFilter` class and implementing the `filter_content` method. This allows you to tailor the filtering logic to your specific needs.
|
||||
|
||||
```python
|
||||
from crawl4ai.content_filter_strategy import RelevantContentFilter
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
from typing import List
|
||||
|
||||
class MyCustomFilter(RelevantContentFilter):
|
||||
def filter_content(self, html: str) -> List[str]:
|
||||
soup = BeautifulSoup(html, 'lxml')
|
||||
# Implement custom filtering logic here
|
||||
# Example: extract all paragraphs within divs with class "article-body"
|
||||
filtered_paragraphs = []
|
||||
for tag in soup.select("div.article-body p"):
|
||||
if isinstance(tag, Tag):
|
||||
filtered_paragraphs.append(str(tag)) # Add the cleaned HTML element.
|
||||
return filtered_paragraphs
|
||||
|
||||
|
||||
|
||||
async def custom_filter_demo(url: str):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
custom_filter = MyCustomFilter()
|
||||
result = await crawler.arun(url, extraction_strategy=custom_filter)
|
||||
if result.success:
|
||||
print(result.extracted_content)
|
||||
|
||||
```
|
||||
|
||||
This example demonstrates extracting paragraphs from a specific div class. You can customize this logic to implement different filtering strategies, use regular expressions, analyze text density, or apply other relevant techniques.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Content filtering strategies provide a powerful way to refine the output of your crawls. By using `BM25ContentFilter` or creating custom strategies, you can focus on the most pertinent information and improve the efficiency of your data processing pipeline.
|
||||
@@ -30,7 +30,7 @@ Let's start with a basic example of session-based crawling:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
|
||||
async def basic_session_crawl():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
@@ -43,7 +43,7 @@ async def basic_session_crawl():
|
||||
session_id=session_id,
|
||||
js_code="document.querySelector('.load-more-button').click();" if page > 0 else None,
|
||||
css_selector=".content-item",
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
print(f"Page {page + 1}: Found {result.extracted_content.count('.content-item')} items")
|
||||
@@ -102,7 +102,7 @@ async def advanced_session_crawl_with_hooks():
|
||||
session_id=session_id,
|
||||
css_selector="li.commit-item",
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
js_only=page > 0
|
||||
)
|
||||
|
||||
@@ -174,7 +174,7 @@ async def integrated_js_and_wait_crawl():
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=js_next_page_and_wait if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
commits = json.loads(result.extracted_content)
|
||||
@@ -241,7 +241,7 @@ async def wait_for_parameter_crawl():
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
wait_for=wait_for if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
commits = json.loads(result.extracted_content)
|
||||
|
||||
@@ -75,7 +75,7 @@ async def crawl_dynamic_content():
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
wait_for=wait_for if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
if result.success:
|
||||
|
||||
@@ -8,11 +8,26 @@ The following parameters can be passed to the `arun()` method. They are organize
|
||||
await crawler.arun(
|
||||
url="https://example.com", # Required: URL to crawl
|
||||
verbose=True, # Enable detailed logging
|
||||
bypass_cache=False, # Skip cache for this request
|
||||
cache_mode=CacheMode.ENABLED, # Control cache behavior
|
||||
warmup=True # Whether to run warmup check
|
||||
)
|
||||
```
|
||||
|
||||
## Cache Control
|
||||
|
||||
```python
|
||||
from crawl4ai import CacheMode
|
||||
|
||||
await crawler.arun(
|
||||
cache_mode=CacheMode.ENABLED, # Normal caching (read/write)
|
||||
# Other cache modes:
|
||||
# cache_mode=CacheMode.DISABLED # No caching at all
|
||||
# cache_mode=CacheMode.READ_ONLY # Only read from cache
|
||||
# cache_mode=CacheMode.WRITE_ONLY # Only write to cache
|
||||
# cache_mode=CacheMode.BYPASS # Skip cache for this operation
|
||||
)
|
||||
```
|
||||
|
||||
## Content Processing Parameters
|
||||
|
||||
### Text Processing
|
||||
@@ -162,14 +177,13 @@ await crawler.arun(
|
||||
|
||||
## Parameter Interactions and Notes
|
||||
|
||||
1. **Magic Mode Combinations**
|
||||
1. **Cache and Performance Setup**
|
||||
```python
|
||||
# Full anti-detection setup
|
||||
# Optimal caching for repeated crawls
|
||||
await crawler.arun(
|
||||
magic=True,
|
||||
headless=False,
|
||||
simulate_user=True,
|
||||
override_navigator=True
|
||||
cache_mode=CacheMode.ENABLED,
|
||||
word_count_threshold=10,
|
||||
process_iframes=False
|
||||
)
|
||||
```
|
||||
|
||||
@@ -179,7 +193,8 @@ await crawler.arun(
|
||||
await crawler.arun(
|
||||
js_code="window.scrollTo(0, document.body.scrollHeight);",
|
||||
wait_for="css:.lazy-content",
|
||||
delay_before_return_html=2.0
|
||||
delay_before_return_html=2.0,
|
||||
cache_mode=CacheMode.WRITE_ONLY # Cache results after dynamic load
|
||||
)
|
||||
```
|
||||
|
||||
@@ -192,7 +207,8 @@ await crawler.arun(
|
||||
extraction_strategy=my_strategy,
|
||||
chunking_strategy=my_chunking,
|
||||
process_iframes=True,
|
||||
remove_overlay_elements=True
|
||||
remove_overlay_elements=True,
|
||||
cache_mode=CacheMode.ENABLED
|
||||
)
|
||||
```
|
||||
|
||||
@@ -201,7 +217,7 @@ await crawler.arun(
|
||||
1. **Performance Optimization**
|
||||
```python
|
||||
await crawler.arun(
|
||||
bypass_cache=False, # Use cache when possible
|
||||
cache_mode=CacheMode.ENABLED, # Use full caching
|
||||
word_count_threshold=10, # Filter out noise
|
||||
process_iframes=False # Skip iframes if not needed
|
||||
)
|
||||
@@ -212,7 +228,8 @@ await crawler.arun(
|
||||
await crawler.arun(
|
||||
magic=True, # Enable anti-detection
|
||||
delay_before_return_html=1.0, # Wait for dynamic content
|
||||
page_timeout=60000 # Longer timeout for slow pages
|
||||
page_timeout=60000, # Longer timeout for slow pages
|
||||
cache_mode=CacheMode.WRITE_ONLY # Cache results after successful crawl
|
||||
)
|
||||
```
|
||||
|
||||
@@ -221,6 +238,7 @@ await crawler.arun(
|
||||
await crawler.arun(
|
||||
remove_overlay_elements=True, # Remove popups
|
||||
excluded_tags=['nav', 'aside'],# Remove unnecessary elements
|
||||
keep_data_attributes=False # Remove data attributes
|
||||
keep_data_attributes=False, # Remove data attributes
|
||||
cache_mode=CacheMode.ENABLED # Use cache for faster processing
|
||||
)
|
||||
```
|
||||
@@ -13,7 +13,7 @@ AsyncWebCrawler(
|
||||
|
||||
# Cache Settings
|
||||
always_by_pass_cache: bool = False, # Always bypass cache
|
||||
base_directory: str = str(Path.home()), # Base directory for cache
|
||||
base_directory: str = str(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())), # Base directory for cache
|
||||
|
||||
# Network Settings
|
||||
proxy: str = None, # Simple proxy URL
|
||||
|
||||
@@ -20,6 +20,7 @@ class CrawlResult(BaseModel):
|
||||
fit_html: Optional[str] = None # Most relevant HTML content
|
||||
markdown: Optional[str] = None # HTML converted to markdown
|
||||
fit_markdown: Optional[str] = None # Most relevant markdown content
|
||||
downloaded_files: Optional[List[str]] = None # Downloaded files
|
||||
|
||||
# Extracted Data
|
||||
extracted_content: Optional[str] = None # Content from extraction strategy
|
||||
|
||||
@@ -32,4 +32,5 @@
|
||||
| async_webcrawler.py | warmup | `kwargs.get("warmup", True)` | AsyncWebCrawler | Initialize crawler with warmup request |
|
||||
| async_webcrawler.py | session_id | `kwargs.get("session_id", None)` | AsyncWebCrawler | Session identifier for browser reuse |
|
||||
| async_webcrawler.py | only_text | `kwargs.get("only_text", False)` | AsyncWebCrawler | Extract only text content |
|
||||
| async_webcrawler.py | bypass_cache | `kwargs.get("bypass_cache", False)` | AsyncWebCrawler | Skip cache and force fresh crawl |
|
||||
| async_webcrawler.py | bypass_cache | `kwargs.get("bypass_cache", False)` | AsyncWebCrawler | Skip cache and force fresh crawl |
|
||||
| async_webcrawler.py | cache_mode | `kwargs.get("cache_mode", CacheMode.ENABLE)` | AsyncWebCrawler | Cache handling mode for request |
|
||||
79
docs/md_v2/basic/cache-modes.md
Normal file
79
docs/md_v2/basic/cache-modes.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Crawl4AI Cache System and Migration Guide
|
||||
|
||||
## Overview
|
||||
Starting from version X.X.X, Crawl4AI introduces a new caching system that replaces the old boolean flags with a more intuitive `CacheMode` enum. This change simplifies cache control and makes the behavior more predictable.
|
||||
|
||||
## Old vs New Approach
|
||||
|
||||
### Old Way (Deprecated)
|
||||
The old system used multiple boolean flags:
|
||||
- `bypass_cache`: Skip cache entirely
|
||||
- `disable_cache`: Disable all caching
|
||||
- `no_cache_read`: Don't read from cache
|
||||
- `no_cache_write`: Don't write to cache
|
||||
|
||||
### New Way (Recommended)
|
||||
The new system uses a single `CacheMode` enum:
|
||||
- `CacheMode.ENABLED`: Normal caching (read/write)
|
||||
- `CacheMode.DISABLED`: No caching at all
|
||||
- `CacheMode.READ_ONLY`: Only read from cache
|
||||
- `CacheMode.WRITE_ONLY`: Only write to cache
|
||||
- `CacheMode.BYPASS`: Skip cache for this operation
|
||||
|
||||
## Migration Example
|
||||
|
||||
### Old Code (Deprecated)
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def use_proxy():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
bypass_cache=True # Old way
|
||||
)
|
||||
print(len(result.markdown))
|
||||
|
||||
async def main():
|
||||
await use_proxy()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### New Code (Recommended)
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode # Import CacheMode
|
||||
|
||||
async def use_proxy():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
cache_mode=CacheMode.BYPASS # New way
|
||||
)
|
||||
print(len(result.markdown))
|
||||
|
||||
async def main():
|
||||
await use_proxy()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## Common Migration Patterns
|
||||
|
||||
Old Flag | New Mode
|
||||
---------|----------
|
||||
`bypass_cache=True` | `cache_mode=CacheMode.BYPASS`
|
||||
`disable_cache=True` | `cache_mode=CacheMode.DISABLED`
|
||||
`no_cache_read=True` | `cache_mode=CacheMode.WRITE_ONLY`
|
||||
`no_cache_write=True` | `cache_mode=CacheMode.READ_ONLY`
|
||||
|
||||
## Suppressing Deprecation Warnings
|
||||
If you need time to migrate, you can temporarily suppress deprecation warnings:
|
||||
```python
|
||||
# In your config.py
|
||||
SHOW_DEPRECATION_WARNINGS = False
|
||||
```
|
||||
136
docs/md_v2/basic/content_filtering.md
Normal file
136
docs/md_v2/basic/content_filtering.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Content Filtering in Crawl4AI
|
||||
|
||||
This guide explains how to use content filtering strategies in Crawl4AI to extract the most relevant information from crawled web pages. You'll learn how to use the built-in `BM25ContentFilter` and how to create your own custom content filtering strategies.
|
||||
|
||||
## Relevance Content Filter
|
||||
|
||||
The `RelevanceContentFilter` is an abstract class that provides a common interface for content filtering strategies. Specific filtering algorithms, like `PruningContentFilter` or `BM25ContentFilter`, inherit from this class and implement the `filter_content` method. This method takes the HTML content as input and returns a list of filtered text blocks.
|
||||
|
||||
|
||||
## Pruning Content Filter
|
||||
|
||||
The `PruningContentFilter` is a tree-shaking algorithm that analyzes the HTML DOM structure and removes less relevant nodes based on various metrics like text density, link density, and tag importance. It evaluates each node using a composite scoring system and "prunes" nodes that fall below a certain threshold.
|
||||
|
||||
### Usage
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter
|
||||
|
||||
async def filter_content(url):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
content_filter = PruningContentFilter(
|
||||
min_word_threshold=5,
|
||||
threshold_type='dynamic',
|
||||
threshold=0.45
|
||||
)
|
||||
result = await crawler.arun(url=url, extraction_strategy=content_filter, fit_markdown=True)
|
||||
if result.success:
|
||||
print(f"Cleaned Markdown:\n{result.fit_markdown}")
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- **`min_word_threshold`**: (Optional) Minimum number of words a node must contain to be considered relevant. Nodes with fewer words are automatically pruned.
|
||||
|
||||
- **`threshold_type`**: (Optional, default 'fixed') Controls how pruning thresholds are calculated:
|
||||
- `'fixed'`: Uses a constant threshold value for all nodes
|
||||
- `'dynamic'`: Adjusts threshold based on node characteristics like tag importance and text/link ratios
|
||||
|
||||
- **`threshold`**: (Optional, default 0.48) Base threshold value for node pruning:
|
||||
- For fixed threshold: Nodes scoring below this value are removed
|
||||
- For dynamic threshold: This value is adjusted based on node properties
|
||||
|
||||
### How It Works
|
||||
|
||||
The pruning algorithm evaluates each node using multiple metrics:
|
||||
- Text density: Ratio of actual text to overall node content
|
||||
- Link density: Proportion of text within links
|
||||
- Tag importance: Weight based on HTML tag type (e.g., article, p, div)
|
||||
- Content quality: Metrics like text length and structural importance
|
||||
|
||||
Nodes scoring below the threshold are removed, effectively "shaking" less relevant content from the DOM tree. This results in a cleaner document containing only the most relevant content blocks.
|
||||
|
||||
The algorithm is particularly effective for:
|
||||
- Removing boilerplate content
|
||||
- Eliminating navigation menus and sidebars
|
||||
- Preserving main article content
|
||||
- Maintaining document structure while removing noise
|
||||
|
||||
|
||||
## BM25 Algorithm
|
||||
|
||||
The `BM25ContentFilter` uses the BM25 algorithm, a ranking function used in information retrieval to estimate the relevance of documents to a given search query. In Crawl4AI, this algorithm helps to identify and extract text chunks that are most relevant to the page's metadata or a user-specified query.
|
||||
|
||||
### Usage
|
||||
|
||||
To use the `BM25ContentFilter`, initialize it and then pass it as the `extraction_strategy` parameter to the `arun` method of the crawler.
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.content_filter_strategy import BM25ContentFilter
|
||||
|
||||
async def filter_content(url, query=None):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
content_filter = BM25ContentFilter(user_query=query)
|
||||
result = await crawler.arun(url=url, extraction_strategy=content_filter, fit_markdown=True) # Set fit_markdown flag to True to trigger BM25 filtering
|
||||
if result.success:
|
||||
print(f"Filtered Content (JSON):\n{result.extracted_content}")
|
||||
print(f"\nFiltered Markdown:\n{result.fit_markdown}") # New field in CrawlResult object
|
||||
print(f"\nFiltered HTML:\n{result.fit_html}") # New field in CrawlResult object. Note that raw HTML may have tags re-organized due to internal parsing.
|
||||
else:
|
||||
print("Error:", result.error_message)
|
||||
|
||||
# Example usage:
|
||||
asyncio.run(filter_content("https://en.wikipedia.org/wiki/Apple", "fruit nutrition health")) # with query
|
||||
asyncio.run(filter_content("https://en.wikipedia.org/wiki/Apple")) # without query, metadata will be used as the query.
|
||||
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- **`user_query`**: (Optional) A string representing the search query. If not provided, the filter extracts relevant metadata (title, description, keywords) from the page and uses that as the query.
|
||||
- **`bm25_threshold`**: (Optional, default 1.0) A float value that controls the threshold for relevance. Higher values result in stricter filtering, returning only the most relevant text chunks. Lower values result in more lenient filtering.
|
||||
|
||||
|
||||
## Fit Markdown Flag
|
||||
|
||||
Setting the `fit_markdown` flag to `True` in the `arun` method activates the BM25 content filtering during the crawl. The `fit_markdown` parameter instructs the scraper to extract and clean the HTML, primarily to prepare for a Large Language Model that cannot process large amounts of data. Setting this flag not only improves the quality of the extracted content but also adds the filtered content to two new attributes in the returned `CrawlResult` object: `fit_markdown` and `fit_html`.
|
||||
|
||||
|
||||
## Custom Content Filtering Strategies
|
||||
|
||||
You can create your own custom filtering strategies by inheriting from the `RelevantContentFilter` class and implementing the `filter_content` method. This allows you to tailor the filtering logic to your specific needs.
|
||||
|
||||
```python
|
||||
from crawl4ai.content_filter_strategy import RelevantContentFilter
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
from typing import List
|
||||
|
||||
class MyCustomFilter(RelevantContentFilter):
|
||||
def filter_content(self, html: str) -> List[str]:
|
||||
soup = BeautifulSoup(html, 'lxml')
|
||||
# Implement custom filtering logic here
|
||||
# Example: extract all paragraphs within divs with class "article-body"
|
||||
filtered_paragraphs = []
|
||||
for tag in soup.select("div.article-body p"):
|
||||
if isinstance(tag, Tag):
|
||||
filtered_paragraphs.append(str(tag)) # Add the cleaned HTML element.
|
||||
return filtered_paragraphs
|
||||
|
||||
|
||||
|
||||
async def custom_filter_demo(url: str):
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
custom_filter = MyCustomFilter()
|
||||
result = await crawler.arun(url, extraction_strategy=custom_filter)
|
||||
if result.success:
|
||||
print(result.extracted_content)
|
||||
|
||||
```
|
||||
|
||||
This example demonstrates extracting paragraphs from a specific div class. You can customize this logic to implement different filtering strategies, use regular expressions, analyze text density, or apply other relevant techniques.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Content filtering strategies provide a powerful way to refine the output of your crawls. By using `BM25ContentFilter` or creating custom strategies, you can focus on the most pertinent information and improve the efficiency of your data processing pipeline.
|
||||
@@ -7,66 +7,325 @@ Crawl4AI provides official Docker images for easy deployment and scalability. Th
|
||||
Pull and run the basic version:
|
||||
|
||||
```bash
|
||||
# Basic run without security
|
||||
docker pull unclecode/crawl4ai:basic
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:basic
|
||||
|
||||
# Run with API security enabled
|
||||
docker run -p 11235:11235 -e CRAWL4AI_API_TOKEN=your_secret_token unclecode/crawl4ai:basic
|
||||
```
|
||||
|
||||
Test the deployment:
|
||||
## Running with Docker Compose 🐳
|
||||
|
||||
### Use Docker Compose (From Local Dockerfile or Docker Hub)
|
||||
|
||||
Crawl4AI provides flexibility to use Docker Compose for managing your containerized services. You can either build the image locally from the provided `Dockerfile` or use the pre-built image from Docker Hub.
|
||||
|
||||
### **Option 1: Using Docker Compose to Build Locally**
|
||||
If you want to build the image locally, use the provided `docker-compose.local.yml` file.
|
||||
|
||||
```bash
|
||||
docker-compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
This will:
|
||||
1. Build the Docker image from the provided `Dockerfile`.
|
||||
2. Start the container and expose it on `http://localhost:11235`.
|
||||
|
||||
---
|
||||
|
||||
### **Option 2: Using Docker Compose with Pre-Built Image from Hub**
|
||||
If you prefer using the pre-built image on Docker Hub, use the `docker-compose.hub.yml` file.
|
||||
|
||||
```bash
|
||||
docker-compose -f docker-compose.hub.yml up -d
|
||||
```
|
||||
|
||||
This will:
|
||||
1. Pull the pre-built image `unclecode/crawl4ai:basic` (or `all`, depending on your configuration).
|
||||
2. Start the container and expose it on `http://localhost:11235`.
|
||||
|
||||
---
|
||||
|
||||
### **Stopping the Running Services**
|
||||
|
||||
To stop the services started via Docker Compose, you can use:
|
||||
|
||||
```bash
|
||||
docker-compose -f docker-compose.local.yml down
|
||||
# OR
|
||||
docker-compose -f docker-compose.hub.yml down
|
||||
```
|
||||
|
||||
If the containers don’t stop and the application is still running, check the running containers:
|
||||
|
||||
```bash
|
||||
docker ps
|
||||
```
|
||||
|
||||
Find the `CONTAINER ID` of the running service and stop it forcefully:
|
||||
|
||||
```bash
|
||||
docker stop <CONTAINER_ID>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **Debugging with Docker Compose**
|
||||
|
||||
- **Check Logs**: To view the container logs:
|
||||
```bash
|
||||
docker-compose -f docker-compose.local.yml logs -f
|
||||
```
|
||||
|
||||
- **Remove Orphaned Containers**: If the service is still running unexpectedly:
|
||||
```bash
|
||||
docker-compose -f docker-compose.local.yml down --remove-orphans
|
||||
```
|
||||
|
||||
- **Manually Remove Network**: If the network is still in use:
|
||||
```bash
|
||||
docker network ls
|
||||
docker network rm crawl4ai_default
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Why Use Docker Compose?
|
||||
|
||||
Docker Compose is the recommended way to deploy Crawl4AI because:
|
||||
1. It simplifies multi-container setups.
|
||||
2. Allows you to define environment variables, resources, and ports in a single file.
|
||||
3. Makes it easier to switch between local development and production-ready images.
|
||||
|
||||
For example, your `docker-compose.yml` could include API keys, token settings, and memory limits, making deployment quick and consistent.
|
||||
|
||||
|
||||
|
||||
|
||||
## API Security 🔒
|
||||
|
||||
### Understanding CRAWL4AI_API_TOKEN
|
||||
|
||||
The `CRAWL4AI_API_TOKEN` provides optional security for your Crawl4AI instance:
|
||||
|
||||
- If `CRAWL4AI_API_TOKEN` is set: All API endpoints (except `/health`) require authentication
|
||||
- If `CRAWL4AI_API_TOKEN` is not set: The API is publicly accessible
|
||||
|
||||
```bash
|
||||
# Secured Instance
|
||||
docker run -p 11235:11235 -e CRAWL4AI_API_TOKEN=your_secret_token unclecode/crawl4ai:all
|
||||
|
||||
# Unsecured Instance
|
||||
docker run -p 11235:11235 unclecode/crawl4ai:all
|
||||
```
|
||||
|
||||
### Making API Calls
|
||||
|
||||
For secured instances, include the token in all requests:
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Test health endpoint
|
||||
health = requests.get("http://localhost:11235/health")
|
||||
print("Health check:", health.json())
|
||||
# Setup headers if token is being used
|
||||
api_token = "your_secret_token" # Same token set in CRAWL4AI_API_TOKEN
|
||||
headers = {"Authorization": f"Bearer {api_token}"} if api_token else {}
|
||||
|
||||
# Test basic crawl
|
||||
# Making authenticated requests
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl",
|
||||
headers=headers,
|
||||
json={
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"urls": "https://example.com",
|
||||
"priority": 10
|
||||
}
|
||||
)
|
||||
|
||||
# Checking task status
|
||||
task_id = response.json()["task_id"]
|
||||
print("Task ID:", task_id)
|
||||
status = requests.get(
|
||||
f"http://localhost:11235/task/{task_id}",
|
||||
headers=headers
|
||||
)
|
||||
```
|
||||
|
||||
## Available Images 🏷️
|
||||
### Using with Docker Compose
|
||||
|
||||
- `unclecode/crawl4ai:basic` - Basic web crawling capabilities
|
||||
- `unclecode/crawl4ai:all` - Full installation with all features
|
||||
- `unclecode/crawl4ai:gpu` - GPU-enabled version for ML features
|
||||
In your `docker-compose.yml`:
|
||||
```yaml
|
||||
services:
|
||||
crawl4ai:
|
||||
image: unclecode/crawl4ai:all
|
||||
environment:
|
||||
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-} # Optional
|
||||
# ... other configuration
|
||||
```
|
||||
|
||||
Then either:
|
||||
1. Set in `.env` file:
|
||||
```env
|
||||
CRAWL4AI_API_TOKEN=your_secret_token
|
||||
```
|
||||
|
||||
2. Or set via command line:
|
||||
```bash
|
||||
CRAWL4AI_API_TOKEN=your_secret_token docker-compose up
|
||||
```
|
||||
|
||||
> **Security Note**: If you enable the API token, make sure to keep it secure and never commit it to version control. The token will be required for all API endpoints except the health check endpoint (`/health`).
|
||||
|
||||
## Configuration Options 🔧
|
||||
|
||||
### Environment Variables
|
||||
|
||||
You can configure the service using environment variables:
|
||||
|
||||
```bash
|
||||
# Basic configuration
|
||||
docker run -p 11235:11235 \
|
||||
-e MAX_CONCURRENT_TASKS=5 \
|
||||
-e OPENAI_API_KEY=your_key \
|
||||
unclecode/crawl4ai:all
|
||||
```
|
||||
|
||||
### Volume Mounting
|
||||
|
||||
Mount a directory for persistent data:
|
||||
```bash
|
||||
# With security and LLM support
|
||||
docker run -p 11235:11235 \
|
||||
-v $(pwd)/data:/app/data \
|
||||
-e CRAWL4AI_API_TOKEN=your_secret_token \
|
||||
-e OPENAI_API_KEY=sk-... \
|
||||
-e ANTHROPIC_API_KEY=sk-ant-... \
|
||||
unclecode/crawl4ai:all
|
||||
```
|
||||
|
||||
### Resource Limits
|
||||
### Using Docker Compose (Recommended) 🐳
|
||||
|
||||
Control container resources:
|
||||
Create a `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
crawl4ai:
|
||||
image: unclecode/crawl4ai:all
|
||||
ports:
|
||||
- "11235:11235"
|
||||
environment:
|
||||
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-} # Optional API security
|
||||
- MAX_CONCURRENT_TASKS=5
|
||||
# LLM Provider Keys
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
memory: 1G
|
||||
```
|
||||
|
||||
You can run it in two ways:
|
||||
|
||||
1. Using environment variables directly:
|
||||
```bash
|
||||
docker run -p 11235:11235 \
|
||||
--memory=4g \
|
||||
--cpus=2 \
|
||||
unclecode/crawl4ai:all
|
||||
CRAWL4AI_API_TOKEN=secret123 OPENAI_API_KEY=sk-... docker-compose up
|
||||
```
|
||||
|
||||
2. Using a `.env` file (recommended):
|
||||
Create a `.env` file in the same directory:
|
||||
```env
|
||||
# API Security (optional)
|
||||
CRAWL4AI_API_TOKEN=your_secret_token
|
||||
|
||||
# LLM Provider Keys
|
||||
OPENAI_API_KEY=sk-...
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
|
||||
# Other Configuration
|
||||
MAX_CONCURRENT_TASKS=5
|
||||
```
|
||||
|
||||
Then simply run:
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
### Testing the Deployment 🧪
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# For unsecured instances
|
||||
def test_unsecured():
|
||||
# Health check
|
||||
health = requests.get("http://localhost:11235/health")
|
||||
print("Health check:", health.json())
|
||||
|
||||
# Basic crawl
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10
|
||||
}
|
||||
)
|
||||
task_id = response.json()["task_id"]
|
||||
print("Task ID:", task_id)
|
||||
|
||||
# For secured instances
|
||||
def test_secured(api_token):
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
# Basic crawl with authentication
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl",
|
||||
headers=headers,
|
||||
json={
|
||||
"urls": "https://www.nbcnews.com/business",
|
||||
"priority": 10
|
||||
}
|
||||
)
|
||||
task_id = response.json()["task_id"]
|
||||
print("Task ID:", task_id)
|
||||
```
|
||||
|
||||
### LLM Extraction Example 🤖
|
||||
|
||||
When you've configured your LLM provider keys (via environment variables or `.env`), you can use LLM extraction:
|
||||
|
||||
```python
|
||||
request = {
|
||||
"urls": "https://example.com",
|
||||
"extraction_config": {
|
||||
"type": "llm",
|
||||
"params": {
|
||||
"provider": "openai/gpt-4",
|
||||
"instruction": "Extract main topics from the page"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Make the request (add headers if using API security)
|
||||
response = requests.post("http://localhost:11235/crawl", json=request)
|
||||
```
|
||||
|
||||
> **Note**: Remember to add `.env` to your `.gitignore` to keep your API keys secure!
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Usage Examples 📝
|
||||
|
||||
### Basic Crawling
|
||||
|
||||
148
docs/md_v2/basic/file-download.md
Normal file
148
docs/md_v2/basic/file-download.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Download Handling in Crawl4AI
|
||||
|
||||
This guide explains how to use Crawl4AI to handle file downloads during crawling. You'll learn how to trigger downloads, specify download locations, and access downloaded files.
|
||||
|
||||
## Enabling Downloads
|
||||
|
||||
By default, Crawl4AI does not download files. To enable downloads, set the `accept_downloads` parameter to `True` in either the `AsyncWebCrawler` constructor or the `arun` method.
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(accept_downloads=True) as crawler: # Globally enable downloads
|
||||
# ... your crawling logic ...
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
Or, enable it for a specific crawl:
|
||||
|
||||
```python
|
||||
async def main():
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="...", accept_downloads=True)
|
||||
# ...
|
||||
```
|
||||
|
||||
## Specifying Download Location
|
||||
|
||||
You can specify the download directory using the `downloads_path` parameter. If not provided, Crawl4AI creates a "downloads" directory inside the `.crawl4ai` folder in your home directory.
|
||||
|
||||
```python
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# ... inside your crawl function:
|
||||
|
||||
downloads_path = os.path.join(os.getcwd(), "my_downloads") # Custom download path
|
||||
os.makedirs(downloads_path, exist_ok=True)
|
||||
|
||||
result = await crawler.arun(url="...", downloads_path=downloads_path, accept_downloads=True)
|
||||
|
||||
# ...
|
||||
```
|
||||
|
||||
If you are setting it globally, provide the path to the AsyncWebCrawler:
|
||||
```python
|
||||
async def crawl_with_downloads(url: str, download_path: str):
|
||||
async with AsyncWebCrawler(
|
||||
accept_downloads=True,
|
||||
downloads_path=download_path, # or set it on arun
|
||||
verbose=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(url=url) # you still need to enable downloads per call.
|
||||
# ...
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Triggering Downloads
|
||||
|
||||
Downloads are typically triggered by user interactions on a web page (e.g., clicking a download button). You can simulate these actions with the `js_code` parameter, injecting JavaScript code to be executed within the browser context. The `wait_for` parameter might also be crucial to allowing sufficient time for downloads to initiate before the crawler proceeds.
|
||||
|
||||
```python
|
||||
result = await crawler.arun(
|
||||
url="https://www.python.org/downloads/",
|
||||
js_code="""
|
||||
// Find and click the first Windows installer link
|
||||
const downloadLink = document.querySelector('a[href$=".exe"]');
|
||||
if (downloadLink) {
|
||||
downloadLink.click();
|
||||
}
|
||||
""",
|
||||
wait_for=5 # Wait for 5 seconds for the download to start
|
||||
)
|
||||
```
|
||||
|
||||
## Accessing Downloaded Files
|
||||
|
||||
Downloaded file paths are stored in the `downloaded_files` attribute of the returned `CrawlResult` object. This is a list of strings, with each string representing the absolute path to a downloaded file.
|
||||
|
||||
```python
|
||||
if result.downloaded_files:
|
||||
print("Downloaded files:")
|
||||
for file_path in result.downloaded_files:
|
||||
print(f"- {file_path}")
|
||||
# Perform operations with downloaded files, e.g., check file size
|
||||
file_size = os.path.getsize(file_path)
|
||||
print(f"- File size: {file_size} bytes")
|
||||
else:
|
||||
print("No files downloaded.")
|
||||
```
|
||||
|
||||
|
||||
## Example: Downloading Multiple Files
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
import os
|
||||
from pathlib import Path
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def download_multiple_files(url: str, download_path: str):
|
||||
|
||||
async with AsyncWebCrawler(
|
||||
accept_downloads=True,
|
||||
downloads_path=download_path,
|
||||
verbose=True
|
||||
) as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
js_code="""
|
||||
// Trigger multiple downloads (example)
|
||||
const downloadLinks = document.querySelectorAll('a[download]'); // Or a more specific selector
|
||||
for (const link of downloadLinks) {
|
||||
link.click();
|
||||
await new Promise(r => setTimeout(r, 2000)); // Add a small delay between clicks if needed
|
||||
}
|
||||
""",
|
||||
wait_for=10 # Adjust the timeout to match the expected time for all downloads to start
|
||||
)
|
||||
|
||||
if result.downloaded_files:
|
||||
print("Downloaded files:")
|
||||
for file in result.downloaded_files:
|
||||
print(f"- {file}")
|
||||
else:
|
||||
print("No files downloaded.")
|
||||
|
||||
|
||||
# Example usage
|
||||
download_path = os.path.join(Path.home(), ".crawl4ai", "downloads")
|
||||
os.makedirs(download_path, exist_ok=True) # Create directory if it doesn't exist
|
||||
|
||||
|
||||
asyncio.run(download_multiple_files("https://www.python.org/downloads/windows/", download_path))
|
||||
```
|
||||
|
||||
## Important Considerations
|
||||
|
||||
- **Browser Context:** Downloads are managed within the browser context. Ensure your `js_code` correctly targets the download triggers on the specific web page.
|
||||
- **Waiting:** Use `wait_for` to manage the timing of the crawl process if immediate download might not occur.
|
||||
- **Error Handling:** Implement proper error handling to gracefully manage failed downloads or incorrect file paths.
|
||||
- **Security:** Downloaded files should be scanned for potential security threats before use.
|
||||
|
||||
|
||||
|
||||
This guide provides a foundation for handling downloads with Crawl4AI. You can adapt these techniques to manage downloads in various scenarios and integrate them into more complex crawling workflows.
|
||||
@@ -58,6 +58,51 @@ crawl4ai-download-models
|
||||
|
||||
This is optional but will boost the performance and speed of the crawler. You only need to do this once after installation.
|
||||
|
||||
## Playwright Installation Note for Ubuntu
|
||||
|
||||
If you encounter issues with Playwright installation on Ubuntu, you may need to install additional dependencies:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y \
|
||||
libwoff1 \
|
||||
libopus0 \
|
||||
libwebp7 \
|
||||
libwebpdemux2 \
|
||||
libenchant-2-2 \
|
||||
libgudev-1.0-0 \
|
||||
libsecret-1-0 \
|
||||
libhyphen0 \
|
||||
libgdk-pixbuf2.0-0 \
|
||||
libegl1 \
|
||||
libnotify4 \
|
||||
libxslt1.1 \
|
||||
libevent-2.1-7 \
|
||||
libgles2 \
|
||||
libxcomposite1 \
|
||||
libatk1.0-0 \
|
||||
libatk-bridge2.0-0 \
|
||||
libepoxy0 \
|
||||
libgtk-3-0 \
|
||||
libharfbuzz-icu0 \
|
||||
libgstreamer-gl1.0-0 \
|
||||
libgstreamer-plugins-bad1.0-0 \
|
||||
gstreamer1.0-plugins-good \
|
||||
gstreamer1.0-plugins-bad \
|
||||
libxt6 \
|
||||
libxaw7 \
|
||||
xvfb \
|
||||
fonts-noto-color-emoji \
|
||||
libfontconfig \
|
||||
libfreetype6 \
|
||||
xfonts-cyrillic \
|
||||
xfonts-scalable \
|
||||
fonts-liberation \
|
||||
fonts-ipafont-gothic \
|
||||
fonts-wqy-zenhei \
|
||||
fonts-tlwg-loma-otf \
|
||||
fonts-freefont-ttf
|
||||
```
|
||||
|
||||
## Option 2: Using Docker (Coming Soon)
|
||||
|
||||
Docker support for Crawl4AI is currently in progress and will be available soon. This will allow you to run Crawl4AI in a containerized environment, ensuring consistency across different systems.
|
||||
|
||||
235
docs/md_v2/basic/prefix-based-input.md
Normal file
235
docs/md_v2/basic/prefix-based-input.md
Normal file
@@ -0,0 +1,235 @@
|
||||
# Prefix-Based Input Handling in Crawl4AI
|
||||
|
||||
This guide will walk you through using the Crawl4AI library to crawl web pages, local HTML files, and raw HTML strings. We'll demonstrate these capabilities using a Wikipedia page as an example.
|
||||
|
||||
## Table of Contents
|
||||
- [Prefix-Based Input Handling in Crawl4AI](#prefix-based-input-handling-in-crawl4ai)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Crawling a Web URL](#crawling-a-web-url)
|
||||
- [Crawling a Local HTML File](#crawling-a-local-html-file)
|
||||
- [Crawling Raw HTML Content](#crawling-raw-html-content)
|
||||
- [Complete Example](#complete-example)
|
||||
- [**How It Works**](#how-it-works)
|
||||
- [**Running the Example**](#running-the-example)
|
||||
- [Conclusion](#conclusion)
|
||||
|
||||
---
|
||||
|
||||
|
||||
### Crawling a Web URL
|
||||
|
||||
To crawl a live web page, provide the URL starting with `http://` or `https://`.
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def crawl_web():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(url="https://en.wikipedia.org/wiki/apple", bypass_cache=True)
|
||||
if result.success:
|
||||
print("Markdown Content:")
|
||||
print(result.markdown)
|
||||
else:
|
||||
print(f"Failed to crawl: {result.error_message}")
|
||||
|
||||
asyncio.run(crawl_web())
|
||||
```
|
||||
|
||||
### Crawling a Local HTML File
|
||||
|
||||
To crawl a local HTML file, prefix the file path with `file://`.
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def crawl_local_file():
|
||||
local_file_path = "/path/to/apple.html" # Replace with your file path
|
||||
file_url = f"file://{local_file_path}"
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(url=file_url, bypass_cache=True)
|
||||
if result.success:
|
||||
print("Markdown Content from Local File:")
|
||||
print(result.markdown)
|
||||
else:
|
||||
print(f"Failed to crawl local file: {result.error_message}")
|
||||
|
||||
asyncio.run(crawl_local_file())
|
||||
```
|
||||
|
||||
### Crawling Raw HTML Content
|
||||
|
||||
To crawl raw HTML content, prefix the HTML string with `raw:`.
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def crawl_raw_html():
|
||||
raw_html = "<html><body><h1>Hello, World!</h1></body></html>"
|
||||
raw_html_url = f"raw:{raw_html}"
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
result = await crawler.arun(url=raw_html_url, bypass_cache=True)
|
||||
if result.success:
|
||||
print("Markdown Content from Raw HTML:")
|
||||
print(result.markdown)
|
||||
else:
|
||||
print(f"Failed to crawl raw HTML: {result.error_message}")
|
||||
|
||||
asyncio.run(crawl_raw_html())
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Complete Example
|
||||
|
||||
Below is a comprehensive script that:
|
||||
1. **Crawls the Wikipedia page for "Apple".**
|
||||
2. **Saves the HTML content to a local file (`apple.html`).**
|
||||
3. **Crawls the local HTML file and verifies the markdown length matches the original crawl.**
|
||||
4. **Crawls the raw HTML content from the saved file and verifies consistency.**
|
||||
|
||||
```python
|
||||
import os
|
||||
import sys
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
|
||||
# Adjust the parent directory to include the crawl4ai module
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
# Define the URL to crawl
|
||||
wikipedia_url = "https://en.wikipedia.org/wiki/apple"
|
||||
|
||||
# Define the path to save the HTML file
|
||||
# Save the file in the same directory as the script
|
||||
script_dir = Path(__file__).parent
|
||||
html_file_path = script_dir / "apple.html"
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
print("\n=== Step 1: Crawling the Wikipedia URL ===")
|
||||
# Crawl the Wikipedia URL
|
||||
result = await crawler.arun(url=wikipedia_url, bypass_cache=True)
|
||||
|
||||
# Check if crawling was successful
|
||||
if not result.success:
|
||||
print(f"Failed to crawl {wikipedia_url}: {result.error_message}")
|
||||
return
|
||||
|
||||
# Save the HTML content to a local file
|
||||
with open(html_file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(result.html)
|
||||
print(f"Saved HTML content to {html_file_path}")
|
||||
|
||||
# Store the length of the generated markdown
|
||||
web_crawl_length = len(result.markdown)
|
||||
print(f"Length of markdown from web crawl: {web_crawl_length}\n")
|
||||
|
||||
print("=== Step 2: Crawling from the Local HTML File ===")
|
||||
# Construct the file URL with 'file://' prefix
|
||||
file_url = f"file://{html_file_path.resolve()}"
|
||||
|
||||
# Crawl the local HTML file
|
||||
local_result = await crawler.arun(url=file_url, bypass_cache=True)
|
||||
|
||||
# Check if crawling was successful
|
||||
if not local_result.success:
|
||||
print(f"Failed to crawl local file {file_url}: {local_result.error_message}")
|
||||
return
|
||||
|
||||
# Store the length of the generated markdown from local file
|
||||
local_crawl_length = len(local_result.markdown)
|
||||
print(f"Length of markdown from local file crawl: {local_crawl_length}")
|
||||
|
||||
# Compare the lengths
|
||||
assert web_crawl_length == local_crawl_length, (
|
||||
f"Markdown length mismatch: Web crawl ({web_crawl_length}) != Local file crawl ({local_crawl_length})"
|
||||
)
|
||||
print("✅ Markdown length matches between web crawl and local file crawl.\n")
|
||||
|
||||
print("=== Step 3: Crawling Using Raw HTML Content ===")
|
||||
# Read the HTML content from the saved file
|
||||
with open(html_file_path, 'r', encoding='utf-8') as f:
|
||||
raw_html_content = f.read()
|
||||
|
||||
# Prefix the raw HTML content with 'raw:'
|
||||
raw_html_url = f"raw:{raw_html_content}"
|
||||
|
||||
# Crawl using the raw HTML content
|
||||
raw_result = await crawler.arun(url=raw_html_url, bypass_cache=True)
|
||||
|
||||
# Check if crawling was successful
|
||||
if not raw_result.success:
|
||||
print(f"Failed to crawl raw HTML content: {raw_result.error_message}")
|
||||
return
|
||||
|
||||
# Store the length of the generated markdown from raw HTML
|
||||
raw_crawl_length = len(raw_result.markdown)
|
||||
print(f"Length of markdown from raw HTML crawl: {raw_crawl_length}")
|
||||
|
||||
# Compare the lengths
|
||||
assert web_crawl_length == raw_crawl_length, (
|
||||
f"Markdown length mismatch: Web crawl ({web_crawl_length}) != Raw HTML crawl ({raw_crawl_length})"
|
||||
)
|
||||
print("✅ Markdown length matches between web crawl and raw HTML crawl.\n")
|
||||
|
||||
print("All tests passed successfully!")
|
||||
|
||||
# Clean up by removing the saved HTML file
|
||||
if html_file_path.exists():
|
||||
os.remove(html_file_path)
|
||||
print(f"Removed the saved HTML file: {html_file_path}")
|
||||
|
||||
# Run the main function
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### **How It Works**
|
||||
|
||||
1. **Step 1: Crawl the Web URL**
|
||||
- Crawls `https://en.wikipedia.org/wiki/apple`.
|
||||
- Saves the HTML content to `apple.html`.
|
||||
- Records the length of the generated markdown.
|
||||
|
||||
2. **Step 2: Crawl from the Local HTML File**
|
||||
- Uses the `file://` prefix to crawl `apple.html`.
|
||||
- Ensures the markdown length matches the original web crawl.
|
||||
|
||||
3. **Step 3: Crawl Using Raw HTML Content**
|
||||
- Reads the HTML from `apple.html`.
|
||||
- Prefixes it with `raw:` and crawls.
|
||||
- Verifies the markdown length matches the previous results.
|
||||
|
||||
4. **Cleanup**
|
||||
- Deletes the `apple.html` file after testing.
|
||||
|
||||
### **Running the Example**
|
||||
|
||||
1. **Save the Script:**
|
||||
- Save the above code as `test_crawl4ai.py` in your project directory.
|
||||
|
||||
2. **Execute the Script:**
|
||||
- Run the script using:
|
||||
```bash
|
||||
python test_crawl4ai.py
|
||||
```
|
||||
|
||||
3. **Observe the Output:**
|
||||
- The script will print logs detailing each step.
|
||||
- Assertions ensure consistency across different crawling methods.
|
||||
- Upon success, it confirms that all markdown lengths match.
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
With the new prefix-based input handling in **Crawl4AI**, you can effortlessly crawl web URLs, local HTML files, and raw HTML strings using a unified `url` parameter. This enhancement simplifies the API usage and provides greater flexibility for diverse crawling scenarios.
|
||||
|
||||
@@ -8,7 +8,7 @@ First, let's import the necessary modules and create an instance of `AsyncWebCra
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, CasheMode
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
@@ -42,7 +42,7 @@ async def capture_and_save_screenshot(url: str, output_path: str):
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
screenshot=True,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
if result.success and result.screenshot:
|
||||
@@ -62,15 +62,15 @@ Crawl4AI supports multiple browser engines. Here's how to use different browsers
|
||||
```python
|
||||
# Use Firefox
|
||||
async with AsyncWebCrawler(browser_type="firefox", verbose=True, headless=True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
result = await crawler.arun(url="https://www.example.com", cache_mode=CacheMode.BYPASS)
|
||||
|
||||
# Use WebKit
|
||||
async with AsyncWebCrawler(browser_type="webkit", verbose=True, headless=True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
result = await crawler.arun(url="https://www.example.com", cache_mode=CacheMode.BYPASS)
|
||||
|
||||
# Use Chromium (default)
|
||||
async with AsyncWebCrawler(verbose=True, headless=True) as crawler:
|
||||
result = await crawler.arun(url="https://www.example.com", bypass_cache=True)
|
||||
result = await crawler.arun(url="https://www.example.com", cache_mode=CacheMode.BYPASS)
|
||||
```
|
||||
|
||||
### User Simulation 🎭
|
||||
@@ -81,7 +81,7 @@ Simulate real user behavior to avoid detection:
|
||||
async with AsyncWebCrawler(verbose=True, headless=True) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="YOUR-URL-HERE",
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
simulate_user=True, # Causes random mouse movements and clicks
|
||||
override_navigator=True # Makes the browser appear more like a real user
|
||||
)
|
||||
@@ -99,7 +99,7 @@ async def main():
|
||||
print(f"First crawl result: {result1.markdown[:100]}...")
|
||||
|
||||
# Force to crawl again
|
||||
result2 = await crawler.arun(url="https://www.nbcnews.com/business", bypass_cache=True)
|
||||
result2 = await crawler.arun(url="https://www.nbcnews.com/business", cache_mode=CacheMode.BYPASS)
|
||||
print(f"Second crawl result: {result2.markdown[:100]}...")
|
||||
|
||||
asyncio.run(main())
|
||||
@@ -189,7 +189,7 @@ extraction_strategy = LLMExtractionStrategy(
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://paulgraham.com/love.html",
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=extraction_strategy
|
||||
)
|
||||
```
|
||||
@@ -239,7 +239,7 @@ async def crawl_dynamic_content():
|
||||
js_code=js_next_page if page > 0 else None,
|
||||
wait_for=wait_for if page > 0 else None,
|
||||
js_only=page > 0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
headless=False,
|
||||
)
|
||||
|
||||
@@ -254,7 +254,7 @@ Remove overlay elements and fit content appropriately:
|
||||
async with AsyncWebCrawler(headless=False) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="your-url-here",
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
word_count_threshold=10,
|
||||
remove_overlay_elements=True,
|
||||
screenshot=True
|
||||
@@ -282,7 +282,7 @@ async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
word_count_threshold=0,
|
||||
bypass_cache=True,
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
verbose=False,
|
||||
)
|
||||
end = time.time()
|
||||
|
||||
@@ -12,7 +12,9 @@ from crawl4ai import AsyncWebCrawler
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
result = await crawler.arun(
|
||||
url="https://example.com"
|
||||
)
|
||||
print(result.markdown) # Print clean markdown content
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -24,7 +26,7 @@ if __name__ == "__main__":
|
||||
The `arun()` method returns a `CrawlResult` object with several useful properties. Here's a quick overview (see [CrawlResult](../api/crawl-result.md) for complete details):
|
||||
|
||||
```python
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
result = await crawler.arun(url="https://example.com", fit_markdown=True)
|
||||
|
||||
# Different content formats
|
||||
print(result.html) # Raw HTML
|
||||
@@ -81,7 +83,7 @@ Here's a more comprehensive example showing common usage patterns:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
@@ -97,7 +99,7 @@ async def main():
|
||||
remove_overlay_elements=True,
|
||||
|
||||
# Cache control
|
||||
bypass_cache=False # Use cache if available
|
||||
cache_mode=CacheMode.ENABLE # Use cache if available
|
||||
)
|
||||
|
||||
if result.success:
|
||||
|
||||
28
docs/md_v2/blog/index.md
Normal file
28
docs/md_v2/blog/index.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Crawl4AI Blog
|
||||
|
||||
Welcome to the Crawl4AI blog! Here you'll find detailed release notes, technical deep dives, and news about the project.
|
||||
|
||||
## Latest Release
|
||||
|
||||
### [0.4.0 - Major Content Filtering Update](releases/0.4.0.md)
|
||||
*December 1, 2024*
|
||||
|
||||
Introducing significant improvements to content filtering, multi-threaded environment handling, and user-agent generation. This release features the new PruningContentFilter, enhanced thread safety, and improved test coverage.
|
||||
|
||||
[Read full release notes →](releases/0.4.0.md)
|
||||
|
||||
## Project History
|
||||
|
||||
Want to see how we got here? Check out our [complete changelog](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md) covering all previous versions and the evolution of Crawl4AI.
|
||||
|
||||
## Categories
|
||||
|
||||
- [Technical Deep Dives](/blog/technical) - Coming soon
|
||||
- [Tutorials & Guides](/blog/tutorials) - Coming soon
|
||||
- [Community Updates](/blog/community) - Coming soon
|
||||
|
||||
## Stay Updated
|
||||
|
||||
- Star us on [GitHub](https://github.com/unclecode/crawl4ai)
|
||||
- Follow [@unclecode](https://twitter.com/unclecode) on Twitter
|
||||
- Join our community discussions on GitHub
|
||||
62
docs/md_v2/blog/releases/0.4.0.md
Normal file
62
docs/md_v2/blog/releases/0.4.0.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Release Summary for Version 0.4.0 (December 1, 2024)
|
||||
|
||||
## Overview
|
||||
The 0.4.0 release introduces significant improvements to content filtering, multi-threaded environment handling, user-agent generation, and test coverage. Key highlights include the introduction of the PruningContentFilter, designed to automatically identify and extract the most valuable parts of an HTML document, as well as enhancements to the BM25ContentFilter to extend its versatility and effectiveness.
|
||||
|
||||
## Major Features and Enhancements
|
||||
|
||||
### 1. PruningContentFilter
|
||||
- Introduced a new unsupervised content filtering strategy that scores and prunes less relevant nodes in an HTML document based on metrics like text and link density.
|
||||
- Focuses on retaining the most valuable parts of the content, making it highly effective for extracting relevant information from complex web pages.
|
||||
- Fully documented with updated README and expanded user guides.
|
||||
|
||||
### 2. User-Agent Generator
|
||||
- Added a user-agent generator utility that resolves compatibility issues and supports customizable user-agent strings.
|
||||
- By default, the generator randomizes user agents for each request, adding diversity, but users can customize it for tailored scenarios.
|
||||
|
||||
### 3. Enhanced Thread Safety
|
||||
- Improved handling of multi-threaded environments by adding better thread locks for parallel processing, ensuring consistency and stability when running multiple threads.
|
||||
|
||||
### 4. Extended Content Filtering Strategies
|
||||
- Users now have access to both the PruningContentFilter for unsupervised extraction and the BM25ContentFilter for supervised filtering based on user queries.
|
||||
- Enhanced BM25ContentFilter with improved capabilities to process page titles, meta tags, and descriptions, allowing for more effective classification and clustering of text chunks.
|
||||
|
||||
### 5. Documentation Updates
|
||||
- Updated examples and tutorials to promote the use of the PruningContentFilter alongside the BM25ContentFilter, providing clear instructions for selecting the appropriate filter for each use case.
|
||||
|
||||
### 6. Unit Test Enhancements
|
||||
- Added unit tests for PruningContentFilter to ensure accuracy and reliability.
|
||||
- Enhanced BM25ContentFilter tests to cover additional edge cases and performance metrics, particularly for malformed HTML inputs.
|
||||
|
||||
## Revised Change Logs for Version 0.4.0
|
||||
|
||||
### PruningContentFilter (Dec 01, 2024)
|
||||
- Introduced the PruningContentFilter to optimize content extraction by pruning less relevant HTML nodes.
|
||||
- **Affected Files:**
|
||||
- **crawl4ai/content_filter_strategy.py**: Added a scoring-based pruning algorithm.
|
||||
- **README.md**: Updated to include PruningContentFilter usage.
|
||||
- **docs/md_v2/basic/content_filtering.md**: Expanded user documentation, detailing the use and benefits of PruningContentFilter.
|
||||
|
||||
### Unit Tests for PruningContentFilter (Dec 01, 2024)
|
||||
- Added comprehensive unit tests for PruningContentFilter to ensure correctness and efficiency.
|
||||
- **Affected Files:**
|
||||
- **tests/async/test_content_filter_prune.py**: Created tests covering different pruning scenarios to ensure stability and correctness.
|
||||
|
||||
### Enhanced BM25ContentFilter Tests (Dec 01, 2024)
|
||||
- Expanded tests to cover additional extraction scenarios and performance metrics, improving robustness.
|
||||
- **Affected Files:**
|
||||
- **tests/async/test_content_filter_bm25.py**: Added tests for edge cases, including malformed HTML inputs.
|
||||
|
||||
### Documentation and Example Updates (Dec 01, 2024)
|
||||
- Revised examples to illustrate the use of PruningContentFilter alongside existing content filtering methods.
|
||||
- **Affected Files:**
|
||||
- **docs/examples/quickstart_async.py**: Enhanced example clarity and usability for new users.
|
||||
|
||||
## Experimental Features
|
||||
- The PruningContentFilter is still under experimental development, and we continue to gather feedback for further refinements.
|
||||
|
||||
## Conclusion
|
||||
This release significantly enhances the content extraction capabilities of Crawl4ai with the introduction of the PruningContentFilter, improved supervised filtering with BM25ContentFilter, and robust multi-threaded handling. Additionally, the user-agent generator provides much-needed versatility, resolving compatibility issues faced by many users.
|
||||
|
||||
Users are encouraged to experiment with the new content filtering methods to determine which best suits their needs.
|
||||
|
||||
@@ -113,4 +113,4 @@ Here’s a clear and focused outline for the **Media Handling: Images, Videos, a
|
||||
|
||||
---
|
||||
|
||||
This outline provides users with a complete guide to handling images, videos, and audio in Crawl4AI, using metadata to enhance relevance and precision in multimedia extraction.
|
||||
This outline provides users with a complete guide to handling images, videos, and audio in Crawl4AI, using metadata to enhance relevance and precision in multimedia extraction.
|
||||
@@ -183,4 +183,4 @@ Here’s a detailed outline for the **JSON-CSS Extraction Strategy** video, cove
|
||||
|
||||
---
|
||||
|
||||
This outline covers each JSON-CSS Extraction option in Crawl4AI, with practical examples and schema configurations, making it a thorough guide for users.
|
||||
This outline covers each JSON-CSS Extraction option in Crawl4AI, with practical examples and schema configurations, making it a thorough guide for users.
|
||||
@@ -52,7 +52,7 @@ Here’s a comprehensive outline for the **LLM Extraction Strategy** video, cove
|
||||
extraction_type="schema",
|
||||
instruction="Extract model names and fees for input and output tokens from the page."
|
||||
),
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
@@ -98,7 +98,7 @@ Here’s a comprehensive outline for the **LLM Extraction Strategy** video, cove
|
||||
result = await crawler.arun(
|
||||
url="https://example.com/some-article",
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
@@ -150,4 +150,4 @@ Here’s a comprehensive outline for the **LLM Extraction Strategy** video, cove
|
||||
|
||||
---
|
||||
|
||||
This outline explains LLM Extraction in Crawl4AI, with examples showing how to extract structured data using custom schemas and instructions. It demonstrates flexibility with multiple providers, ensuring practical application for different use cases.
|
||||
This outline explains LLM Extraction in Crawl4AI, with examples showing how to extract structured data using custom schemas and instructions. It demonstrates flexibility with multiple providers, ensuring practical application for different use cases.
|
||||
@@ -55,7 +55,7 @@ Here’s a structured outline for the **Cosine Similarity Strategy** video, cove
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
@@ -103,7 +103,7 @@ Here’s a structured outline for the **Cosine Similarity Strategy** video, cove
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
@@ -133,4 +133,4 @@ Here’s a structured outline for the **Cosine Similarity Strategy** video, cove
|
||||
|
||||
---
|
||||
|
||||
This outline covers Cosine Similarity Strategy’s speed and effectiveness, providing examples that showcase its potential for clustering various content types efficiently.
|
||||
This outline covers Cosine Similarity Strategy’s speed and effectiveness, providing examples that showcase its potential for clustering various content types efficiently.
|
||||
@@ -26,7 +26,7 @@ Here's a condensed outline of the **Installation and Setup** video content:
|
||||
- Walk through a simple test script to confirm the setup:
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
@@ -1093,7 +1093,7 @@ Here’s a comprehensive outline for the **LLM Extraction Strategy** video, cove
|
||||
extraction_type="schema",
|
||||
instruction="Extract model names and fees for input and output tokens from the page."
|
||||
),
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
@@ -1139,7 +1139,7 @@ Here’s a comprehensive outline for the **LLM Extraction Strategy** video, cove
|
||||
result = await crawler.arun(
|
||||
url="https://example.com/some-article",
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
@@ -1248,7 +1248,7 @@ Here’s a structured outline for the **Cosine Similarity Strategy** video, cove
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
@@ -1296,7 +1296,7 @@ Here’s a structured outline for the **Cosine Similarity Strategy** video, cove
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
print(result.extracted_content)
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user