refactor(dispatcher): migrate to modular dispatcher system with enhanced monitoring

Reorganize dispatcher functionality into separate components:
- Create dedicated dispatcher classes (MemoryAdaptive, Semaphore)
- Add RateLimiter for smart request throttling
- Implement CrawlerMonitor for real-time progress tracking
- Move dispatcher config from CrawlerRunConfig to separate classes

BREAKING CHANGE: Dispatcher configuration moved from CrawlerRunConfig to dedicated dispatcher classes. Users need to update their configuration approach for multi-URL crawling.
This commit is contained in:
UncleCode
2025-01-11 21:10:27 +08:00
parent 3865342c93
commit 825c78a048
19 changed files with 1742 additions and 484 deletions

View File

@@ -1,67 +1,121 @@
import asyncio, time
from crawl4ai.async_webcrawler import AsyncWebCrawler
from crawl4ai.async_configs import BrowserConfig, CrawlerRunConfig, RateLimitConfig
from crawl4ai.dispatcher import DisplayMode
import asyncio
import time
from rich import print
from rich.table import Table
from crawl4ai import (
AsyncWebCrawler, BrowserConfig, CrawlerRunConfig,
MemoryAdaptiveDispatcher, SemaphoreDispatcher,
RateLimiter, CrawlerMonitor, DisplayMode, CacheMode
)
async def crawl_with_rate_limiting(urls):
"""
Example function demonstrating how to use AsyncWebCrawler with rate limiting and resource monitoring.
Args:
urls (List[str]): List of URLs to crawl
Returns:
List[CrawlResult]: List of crawl results for each URL
"""
# Configure browser settings
browser_config = BrowserConfig(
headless=True, # Run browser in headless mode
verbose=False # Minimize browser logging
)
# Configure crawler settings with rate limiting
run_config = CrawlerRunConfig(
# Enable rate limiting
enable_rate_limiting=True,
rate_limit_config=RateLimitConfig(
base_delay=(1.0, 2.0), # Random delay between 1-2 seconds between requests
max_delay=30.0, # Maximum delay after rate limit hits
max_retries=2, # Number of retries before giving up
rate_limit_codes=[429, 503] # HTTP status codes to trigger rate limiting
),
# Resource monitoring settings
memory_threshold_percent=70.0, # Pause crawling if memory usage exceeds this
check_interval=0.5, # How often to check resource usage
max_session_permit=10, # Maximum concurrent crawls
display_mode=DisplayMode.DETAILED.value # Show detailed progress
)
# Create and use crawler with context manager
async with AsyncWebCrawler(config=browser_config) as crawler:
results = await crawler.arun_many(urls, config=run_config)
return results
def main():
# Example URLs (replace with real URLs)
urls = [
f"https://example.com/page{i}" for i in range(1, 40)
]
async def memory_adaptive(urls, browser_config, run_config):
"""Memory adaptive crawler with monitoring"""
start = time.perf_counter()
async with AsyncWebCrawler(config=browser_config) as crawler:
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=70.0,
max_session_permit=10,
monitor=CrawlerMonitor(
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
)
results = await crawler.arun_many(urls, config=run_config, dispatcher=dispatcher)
duration = time.perf_counter() - start
return len(results), duration
async def memory_adaptive_with_rate_limit(urls, browser_config, run_config):
"""Memory adaptive crawler with rate limiting"""
start = time.perf_counter()
async with AsyncWebCrawler(config=browser_config) as crawler:
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=70.0,
max_session_permit=10,
rate_limiter=RateLimiter(
base_delay=(1.0, 2.0),
max_delay=30.0,
max_retries=2
),
monitor=CrawlerMonitor(
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
)
results = await crawler.arun_many(urls, config=run_config, dispatcher=dispatcher)
duration = time.perf_counter() - start
return len(results), duration
async def semaphore(urls, browser_config, run_config):
"""Basic semaphore crawler"""
start = time.perf_counter()
async with AsyncWebCrawler(config=browser_config) as crawler:
dispatcher = SemaphoreDispatcher(
semaphore_count=5,
monitor=CrawlerMonitor(
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
)
results = await crawler.arun_many(urls, config=run_config, dispatcher=dispatcher)
duration = time.perf_counter() - start
return len(results), duration
async def semaphore_with_rate_limit(urls, browser_config, run_config):
"""Semaphore crawler with rate limiting"""
start = time.perf_counter()
async with AsyncWebCrawler(config=browser_config) as crawler:
dispatcher = SemaphoreDispatcher(
semaphore_count=5,
rate_limiter=RateLimiter(
base_delay=(1.0, 2.0),
max_delay=30.0,
max_retries=2
),
monitor=CrawlerMonitor(
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
)
results = await crawler.arun_many(urls, config=run_config, dispatcher=dispatcher)
duration = time.perf_counter() - start
return len(results), duration
def create_performance_table(results):
"""Creates a rich table showing performance results"""
table = Table(title="Crawler Strategy Performance Comparison")
table.add_column("Strategy", style="cyan")
table.add_column("URLs Crawled", justify="right", style="green")
table.add_column("Time (seconds)", justify="right", style="yellow")
table.add_column("URLs/second", justify="right", style="magenta")
sorted_results = sorted(results.items(), key=lambda x: x[1][1])
# Run the crawler
results = asyncio.run(crawl_with_rate_limiting(urls))
for strategy, (urls_crawled, duration) in sorted_results:
urls_per_second = urls_crawled / duration
table.add_row(
strategy,
str(urls_crawled),
f"{duration:.2f}",
f"{urls_per_second:.2f}"
)
# Process results
successful_results = [result for result in results if result.success]
failed_results = [result for result in results if not result.success]
end = time.perf_counter()
# Print results
print(f"Successful crawls: {len(successful_results)}")
print(f"Failed crawls: {len(failed_results)}")
print(f"Time taken: {end - start:.2f} seconds")
return table
async def main():
urls = [f"https://example.com/page{i}" for i in range(1, 20)]
browser_config = BrowserConfig(headless=True, verbose=False)
run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
results = {
"Memory Adaptive": await memory_adaptive(urls, browser_config, run_config),
"Memory Adaptive + Rate Limit": await memory_adaptive_with_rate_limit(urls, browser_config, run_config),
"Semaphore": await semaphore(urls, browser_config, run_config),
"Semaphore + Rate Limit": await semaphore_with_rate_limit(urls, browser_config, run_config),
}
table = create_performance_table(results)
print("\nPerformance Summary:")
print(table)
if __name__ == "__main__":
main()
asyncio.run(main())

View File

@@ -0,0 +1,264 @@
# Optimized Multi-URL Crawling
> **Note**: Were developing a new **executor module** that uses a sophisticated algorithm to dynamically manage multi-URL crawling, optimizing for speed and memory usage. The approaches in this document remain fully valid, but keep an eye on **Crawl4AI**s upcoming releases for this powerful feature! Follow [@unclecode](https://twitter.com/unclecode) on X and check the changelogs to stay updated.
Crawl4AIs **AsyncWebCrawler** can handle multiple URLs in a single run, which can greatly reduce overhead and speed up crawling. This guide shows how to:
1. **Sequentially** crawl a list of URLs using the **same** session, avoiding repeated browser creation.
2. **Parallel**-crawl subsets of URLs in batches, again reusing the same browser.
When the entire process finishes, you close the browser once—**minimizing** memory and resource usage.
---
## 1. Why Avoid Simple Loops per URL?
If you naively do:
```python
for url in urls:
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(url)
```
You end up:
1. Spinning up a **new** browser for each URL
2. Closing it immediately after the single crawl
3. Potentially using a lot of CPU/memory for short-living browsers
4. Missing out on session reusability if you have login or ongoing states
**Better** approaches ensure you **create** the browser once, then crawl multiple URLs with minimal overhead.
---
## 2. Sequential Crawling with Session Reuse
### 2.1 Overview
1. **One** `AsyncWebCrawler` instance for **all** URLs.
2. **One** session (via `session_id`) so we can preserve local storage or cookies across URLs if needed.
3. The crawler is only closed at the **end**.
**This** is the simplest pattern if your workload is moderate (dozens to a few hundred URLs).
### 2.2 Example Code
```python
import asyncio
from typing import List
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
async def crawl_sequential(urls: List[str]):
print("\n=== Sequential Crawling with Session Reuse ===")
browser_config = BrowserConfig(
headless=True,
# For better performance in Docker or low-memory environments:
extra_args=["--disable-gpu", "--disable-dev-shm-usage", "--no-sandbox"],
)
crawl_config = CrawlerRunConfig(
markdown_generator=DefaultMarkdownGenerator()
)
# Create the crawler (opens the browser)
crawler = AsyncWebCrawler(config=browser_config)
await crawler.start()
try:
session_id = "session1" # Reuse the same session across all URLs
for url in urls:
result = await crawler.arun(
url=url,
config=crawl_config,
session_id=session_id
)
if result.success:
print(f"Successfully crawled: {url}")
# E.g. check markdown length
print(f"Markdown length: {len(result.markdown_v2.raw_markdown)}")
else:
print(f"Failed: {url} - Error: {result.error_message}")
finally:
# After all URLs are done, close the crawler (and the browser)
await crawler.close()
async def main():
urls = [
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3"
]
await crawl_sequential(urls)
if __name__ == "__main__":
asyncio.run(main())
```
**Why Its Good**:
- **One** browser launch.
- Minimal memory usage.
- If the site requires login, you can log in once in `session_id` context and preserve auth across all URLs.
---
## 3. Parallel Crawling with Browser Reuse
### 3.1 Overview
To speed up crawling further, you can crawl multiple URLs in **parallel** (batches or a concurrency limit). The crawler still uses **one** browser, but spawns different sessions (or the same, depending on your logic) for each task.
### 3.2 Example Code
For this example make sure to install the [psutil](https://pypi.org/project/psutil/) package.
```bash
pip install psutil
```
Then you can run the following code:
```python
import os
import sys
import psutil
import asyncio
__location__ = os.path.dirname(os.path.abspath(__file__))
__output__ = os.path.join(__location__, "output")
# Append parent directory to system path
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
from typing import List
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
async def crawl_parallel(urls: List[str], max_concurrent: int = 3):
print("\n=== Parallel Crawling with Browser Reuse + Memory Check ===")
# We'll keep track of peak memory usage across all tasks
peak_memory = 0
process = psutil.Process(os.getpid())
def log_memory(prefix: str = ""):
nonlocal peak_memory
current_mem = process.memory_info().rss # in bytes
if current_mem > peak_memory:
peak_memory = current_mem
print(f"{prefix} Current Memory: {current_mem // (1024 * 1024)} MB, Peak: {peak_memory // (1024 * 1024)} MB")
# Minimal browser config
browser_config = BrowserConfig(
headless=True,
verbose=False, # corrected from 'verbos=False'
extra_args=["--disable-gpu", "--disable-dev-shm-usage", "--no-sandbox"],
)
crawl_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
# Create the crawler instance
crawler = AsyncWebCrawler(config=browser_config)
await crawler.start()
try:
# We'll chunk the URLs in batches of 'max_concurrent'
success_count = 0
fail_count = 0
for i in range(0, len(urls), max_concurrent):
batch = urls[i : i + max_concurrent]
tasks = []
for j, url in enumerate(batch):
# Unique session_id per concurrent sub-task
session_id = f"parallel_session_{i + j}"
task = crawler.arun(url=url, config=crawl_config, session_id=session_id)
tasks.append(task)
# Check memory usage prior to launching tasks
log_memory(prefix=f"Before batch {i//max_concurrent + 1}: ")
# Gather results
results = await asyncio.gather(*tasks, return_exceptions=True)
# Check memory usage after tasks complete
log_memory(prefix=f"After batch {i//max_concurrent + 1}: ")
# Evaluate results
for url, result in zip(batch, results):
if isinstance(result, Exception):
print(f"Error crawling {url}: {result}")
fail_count += 1
elif result.success:
success_count += 1
else:
fail_count += 1
print(f"\nSummary:")
print(f" - Successfully crawled: {success_count}")
print(f" - Failed: {fail_count}")
finally:
print("\nClosing crawler...")
await crawler.close()
# Final memory log
log_memory(prefix="Final: ")
print(f"\nPeak memory usage (MB): {peak_memory // (1024 * 1024)}")
async def main():
urls = [
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3",
"https://example.com/page4"
]
await crawl_parallel(urls, max_concurrent=2)
if __name__ == "__main__":
asyncio.run(main())
```
**Notes**:
- We **reuse** the same `AsyncWebCrawler` instance for all parallel tasks, launching **one** browser.
- Each parallel sub-task might get its own `session_id` so they dont share cookies/localStorage (unless thats desired).
- We limit concurrency to `max_concurrent=2` or 3 to avoid saturating CPU/memory.
---
## 4. Performance Tips
1. **Extra Browser Args**
- `--disable-gpu`, `--no-sandbox` can help in Docker or restricted environments.
- `--disable-dev-shm-usage` avoids using `/dev/shm` which can be small on some systems.
2. **Session Reuse**
- If your site requires a login or you want to maintain local data across URLs, share the **same** `session_id`.
- If you want isolation (each URL fresh), create unique sessions.
3. **Batching**
- If you have **many** URLs (like thousands), you can do parallel crawling in chunks (like `max_concurrent=5`).
- Use `arun_many()` for a built-in approach if you prefer, but the example above is often more flexible.
4. **Cache**
- If your pages share many resources or youre re-crawling the same domain repeatedly, consider setting `cache_mode=CacheMode.ENABLED` in `CrawlerRunConfig`.
- If you need fresh data each time, keep `cache_mode=CacheMode.BYPASS`.
5. **Hooks**
- You can set up global hooks for each crawler (like to block images) or per-run if you want.
- Keep them consistent if youre reusing sessions.
---
## 5. Summary
- **One** `AsyncWebCrawler` + multiple calls to `.arun()` is far more efficient than launching a new crawler per URL.
- **Sequential** approach with a shared session is simple and memory-friendly for moderate sets of URLs.
- **Parallel** approach can speed up large crawls by concurrency, but keep concurrency balanced to avoid overhead.
- Close the crawler once at the end, ensuring the browser is only opened/closed once.
For even more advanced memory optimizations or dynamic concurrency patterns, see future sections on hooking or distributed crawling. The patterns above suffice for the majority of multi-URL scenarios—**giving you speed, simplicity, and minimal resource usage**. Enjoy your optimized crawling!

View File

@@ -1,264 +1,205 @@
# Optimized Multi-URL Crawling
# Advanced Multi-URL Crawling with Dispatchers
> **Note**: Were developing a new **executor module** that uses a sophisticated algorithm to dynamically manage multi-URL crawling, optimizing for speed and memory usage. The approaches in this document remain fully valid, but keep an eye on **Crawl4AI**s upcoming releases for this powerful feature! Follow [@unclecode](https://twitter.com/unclecode) on X and check the changelogs to stay updated.
> **Heads Up**: Crawl4AI supports advanced dispatchers for **parallel** or **throttled** crawling, providing dynamic rate limiting and memory usage checks. The built-in `arun_many()` function uses these dispatchers to handle concurrency efficiently.
## 1. Introduction
Crawl4AIs **AsyncWebCrawler** can handle multiple URLs in a single run, which can greatly reduce overhead and speed up crawling. This guide shows how to:
When crawling many URLs:
- **Basic**: Use `arun()` in a loop (simple but less efficient)
- **Better**: Use `arun_many()`, which efficiently handles multiple URLs with proper concurrency control
- **Best**: Customize dispatcher behavior for your specific needs (memory management, rate limits, etc.)
1. **Sequentially** crawl a list of URLs using the **same** session, avoiding repeated browser creation.
2. **Parallel**-crawl subsets of URLs in batches, again reusing the same browser.
**Why Dispatchers?**
- **Adaptive**: Memory-based dispatchers can pause or slow down based on system resources
- **Rate-limiting**: Built-in rate limiting with exponential backoff for 429/503 responses
- **Real-time Monitoring**: Live dashboard of ongoing tasks, memory usage, and performance
- **Flexibility**: Choose between memory-adaptive or semaphore-based concurrency
When the entire process finishes, you close the browser once—**minimizing** memory and resource usage.
## 2. Core Components
---
## 1. Why Avoid Simple Loops per URL?
If you naively do:
### 2.1 Rate Limiter
```python
for url in urls:
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(url)
class RateLimiter:
def __init__(
base_delay: Tuple[float, float] = (1.0, 3.0), # Random delay range between requests
max_delay: float = 60.0, # Maximum backoff delay
max_retries: int = 3, # Retries before giving up
rate_limit_codes: List[int] = [429, 503] # Status codes triggering backoff
)
```
You end up:
The RateLimiter provides:
- Random delays between requests
- Exponential backoff on rate limit responses
- Domain-specific rate limiting
- Automatic retry handling
1. Spinning up a **new** browser for each URL
2. Closing it immediately after the single crawl
3. Potentially using a lot of CPU/memory for short-living browsers
4. Missing out on session reusability if you have login or ongoing states
### 2.2 Crawler Monitor
**Better** approaches ensure you **create** the browser once, then crawl multiple URLs with minimal overhead.
---
## 2. Sequential Crawling with Session Reuse
### 2.1 Overview
1. **One** `AsyncWebCrawler` instance for **all** URLs.
2. **One** session (via `session_id`) so we can preserve local storage or cookies across URLs if needed.
3. The crawler is only closed at the **end**.
**This** is the simplest pattern if your workload is moderate (dozens to a few hundred URLs).
### 2.2 Example Code
The CrawlerMonitor provides real-time visibility into crawling operations:
```python
import asyncio
from typing import List
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
async def crawl_sequential(urls: List[str]):
print("\n=== Sequential Crawling with Session Reuse ===")
browser_config = BrowserConfig(
headless=True,
# For better performance in Docker or low-memory environments:
extra_args=["--disable-gpu", "--disable-dev-shm-usage", "--no-sandbox"],
)
crawl_config = CrawlerRunConfig(
markdown_generator=DefaultMarkdownGenerator()
)
# Create the crawler (opens the browser)
crawler = AsyncWebCrawler(config=browser_config)
await crawler.start()
try:
session_id = "session1" # Reuse the same session across all URLs
for url in urls:
result = await crawler.arun(
url=url,
config=crawl_config,
session_id=session_id
)
if result.success:
print(f"Successfully crawled: {url}")
# E.g. check markdown length
print(f"Markdown length: {len(result.markdown_v2.raw_markdown)}")
else:
print(f"Failed: {url} - Error: {result.error_message}")
finally:
# After all URLs are done, close the crawler (and the browser)
await crawler.close()
async def main():
urls = [
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3"
]
await crawl_sequential(urls)
if __name__ == "__main__":
asyncio.run(main())
monitor = CrawlerMonitor(
max_visible_rows=15, # Maximum rows in live display
display_mode=DisplayMode.DETAILED # DETAILED or AGGREGATED view
)
```
**Why Its Good**:
**Display Modes**:
1. **DETAILED**: Shows individual task status, memory usage, and timing
2. **AGGREGATED**: Displays summary statistics and overall progress
- **One** browser launch.
- Minimal memory usage.
- If the site requires login, you can log in once in `session_id` context and preserve auth across all URLs.
## 3. Available Dispatchers
---
### 3.1 MemoryAdaptiveDispatcher (Default)
## 3. Parallel Crawling with Browser Reuse
### 3.1 Overview
To speed up crawling further, you can crawl multiple URLs in **parallel** (batches or a concurrency limit). The crawler still uses **one** browser, but spawns different sessions (or the same, depending on your logic) for each task.
### 3.2 Example Code
For this example make sure to install the [psutil](https://pypi.org/project/psutil/) package.
```bash
pip install psutil
```
Then you can run the following code:
Automatically manages concurrency based on system memory usage:
```python
import os
import sys
import psutil
import asyncio
__location__ = os.path.dirname(os.path.abspath(__file__))
__output__ = os.path.join(__location__, "output")
# Append parent directory to system path
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
from typing import List
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
async def crawl_parallel(urls: List[str], max_concurrent: int = 3):
print("\n=== Parallel Crawling with Browser Reuse + Memory Check ===")
# We'll keep track of peak memory usage across all tasks
peak_memory = 0
process = psutil.Process(os.getpid())
def log_memory(prefix: str = ""):
nonlocal peak_memory
current_mem = process.memory_info().rss # in bytes
if current_mem > peak_memory:
peak_memory = current_mem
print(f"{prefix} Current Memory: {current_mem // (1024 * 1024)} MB, Peak: {peak_memory // (1024 * 1024)} MB")
# Minimal browser config
browser_config = BrowserConfig(
headless=True,
verbose=False, # corrected from 'verbos=False'
extra_args=["--disable-gpu", "--disable-dev-shm-usage", "--no-sandbox"],
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=70.0, # Pause if memory exceeds this
check_interval=1.0, # How often to check memory
max_session_permit=10, # Maximum concurrent tasks
rate_limiter=RateLimiter( # Optional rate limiting
base_delay=(1.0, 2.0),
max_delay=30.0,
max_retries=2
),
monitor=CrawlerMonitor( # Optional monitoring
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
crawl_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
# Create the crawler instance
crawler = AsyncWebCrawler(config=browser_config)
await crawler.start()
try:
# We'll chunk the URLs in batches of 'max_concurrent'
success_count = 0
fail_count = 0
for i in range(0, len(urls), max_concurrent):
batch = urls[i : i + max_concurrent]
tasks = []
for j, url in enumerate(batch):
# Unique session_id per concurrent sub-task
session_id = f"parallel_session_{i + j}"
task = crawler.arun(url=url, config=crawl_config, session_id=session_id)
tasks.append(task)
# Check memory usage prior to launching tasks
log_memory(prefix=f"Before batch {i//max_concurrent + 1}: ")
# Gather results
results = await asyncio.gather(*tasks, return_exceptions=True)
# Check memory usage after tasks complete
log_memory(prefix=f"After batch {i//max_concurrent + 1}: ")
# Evaluate results
for url, result in zip(batch, results):
if isinstance(result, Exception):
print(f"Error crawling {url}: {result}")
fail_count += 1
elif result.success:
success_count += 1
else:
fail_count += 1
print(f"\nSummary:")
print(f" - Successfully crawled: {success_count}")
print(f" - Failed: {fail_count}")
finally:
print("\nClosing crawler...")
await crawler.close()
# Final memory log
log_memory(prefix="Final: ")
print(f"\nPeak memory usage (MB): {peak_memory // (1024 * 1024)}")
async def main():
urls = [
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3",
"https://example.com/page4"
]
await crawl_parallel(urls, max_concurrent=2)
if __name__ == "__main__":
asyncio.run(main())
)
```
**Notes**:
### 3.2 SemaphoreDispatcher
- We **reuse** the same `AsyncWebCrawler` instance for all parallel tasks, launching **one** browser.
- Each parallel sub-task might get its own `session_id` so they dont share cookies/localStorage (unless thats desired).
- We limit concurrency to `max_concurrent=2` or 3 to avoid saturating CPU/memory.
Provides simple concurrency control with a fixed limit:
---
```python
dispatcher = SemaphoreDispatcher(
semaphore_count=5, # Fixed concurrent tasks
rate_limiter=RateLimiter( # Optional rate limiting
base_delay=(0.5, 1.0),
max_delay=10.0
),
monitor=CrawlerMonitor( # Optional monitoring
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
)
```
## 4. Performance Tips
## 4. Usage Examples
1. **Extra Browser Args**
- `--disable-gpu`, `--no-sandbox` can help in Docker or restricted environments.
- `--disable-dev-shm-usage` avoids using `/dev/shm` which can be small on some systems.
### 4.1 Simple Usage (Default MemoryAdaptiveDispatcher)
2. **Session Reuse**
- If your site requires a login or you want to maintain local data across URLs, share the **same** `session_id`.
- If you want isolation (each URL fresh), create unique sessions.
```python
async with AsyncWebCrawler(config=browser_config) as crawler:
results = await crawler.arun_many(urls, config=run_config)
```
3. **Batching**
- If you have **many** URLs (like thousands), you can do parallel crawling in chunks (like `max_concurrent=5`).
- Use `arun_many()` for a built-in approach if you prefer, but the example above is often more flexible.
### 4.2 Memory Adaptive with Rate Limiting
4. **Cache**
- If your pages share many resources or youre re-crawling the same domain repeatedly, consider setting `cache_mode=CacheMode.ENABLED` in `CrawlerRunConfig`.
- If you need fresh data each time, keep `cache_mode=CacheMode.BYPASS`.
```python
async def crawl_with_memory_adaptive(urls):
browser_config = BrowserConfig(headless=True, verbose=False)
run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=70.0,
max_session_permit=10,
rate_limiter=RateLimiter(
base_delay=(1.0, 2.0),
max_delay=30.0,
max_retries=2
),
monitor=CrawlerMonitor(
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
)
async with AsyncWebCrawler(config=browser_config) as crawler:
results = await crawler.arun_many(
urls,
config=run_config,
dispatcher=dispatcher
)
return results
```
5. **Hooks**
- You can set up global hooks for each crawler (like to block images) or per-run if you want.
- Keep them consistent if youre reusing sessions.
### 4.3 Semaphore with Rate Limiting
---
```python
async def crawl_with_semaphore(urls):
browser_config = BrowserConfig(headless=True, verbose=False)
run_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
dispatcher = SemaphoreDispatcher(
semaphore_count=5,
rate_limiter=RateLimiter(
base_delay=(0.5, 1.0),
max_delay=10.0
),
monitor=CrawlerMonitor(
max_visible_rows=15,
display_mode=DisplayMode.DETAILED
)
)
async with AsyncWebCrawler(config=browser_config) as crawler:
results = await crawler.arun_many(
urls,
config=run_config,
dispatcher=dispatcher
)
return results
```
## 5. Summary
## 5. Dispatch Results
- **One** `AsyncWebCrawler` + multiple calls to `.arun()` is far more efficient than launching a new crawler per URL.
- **Sequential** approach with a shared session is simple and memory-friendly for moderate sets of URLs.
- **Parallel** approach can speed up large crawls by concurrency, but keep concurrency balanced to avoid overhead.
- Close the crawler once at the end, ensuring the browser is only opened/closed once.
Each crawl result includes dispatch information:
For even more advanced memory optimizations or dynamic concurrency patterns, see future sections on hooking or distributed crawling. The patterns above suffice for the majority of multi-URL scenarios—**giving you speed, simplicity, and minimal resource usage**. Enjoy your optimized crawling!
```python
@dataclass
class DispatchResult:
task_id: str
memory_usage: float
peak_memory: float
start_time: datetime
end_time: datetime
error_message: str = ""
```
Access via `result.dispatch_result`:
```python
for result in results:
if result.success:
dr = result.dispatch_result
print(f"URL: {result.url}")
print(f"Memory: {dr.memory_usage:.1f}MB")
print(f"Duration: {dr.end_time - dr.start_time}")
```
## 6. Summary
1. **Two Dispatcher Types**:
- MemoryAdaptiveDispatcher (default): Dynamic concurrency based on memory
- SemaphoreDispatcher: Fixed concurrency limit
2. **Optional Components**:
- RateLimiter: Smart request pacing and backoff
- CrawlerMonitor: Real-time progress visualization
3. **Key Benefits**:
- Automatic memory management
- Built-in rate limiting
- Live progress monitoring
- Flexible concurrency control
Choose the dispatcher that best fits your needs:
- **MemoryAdaptiveDispatcher**: For large crawls or limited resources
- **SemaphoreDispatcher**: For simple, fixed-concurrency scenarios

View File

@@ -1,7 +1,3 @@
Below is a **revised parameter guide** for **`arun()`** in **AsyncWebCrawler**, reflecting the **new** approach where all parameters are passed via a **`CrawlerRunConfig`** instead of directly to `arun()`. Each section includes example usage in the new style, ensuring a clear, modern approach.
---
# `arun()` Parameter Guide (New Approach)
In Crawl4AIs **latest** configuration model, nearly all parameters that once went directly to `arun()` are now part of **`CrawlerRunConfig`**. When calling `arun()`, you provide:

100
docs/md_v2/api/arun_many.md Normal file
View File

@@ -0,0 +1,100 @@
# `arun_many(...)` Reference
> **Note**: This function is very similar to [`arun()`](./arun.md) but focused on **concurrent** or **batch** crawling. If youre unfamiliar with `arun()` usage, please read that doc first, then review this for differences.
## Function Signature
```python
async def arun_many(
urls: Union[List[str], List[Any]],
config: Optional[CrawlerRunConfig] = None,
dispatcher: Optional[BaseDispatcher] = None,
...
) -> List[CrawlResult]:
"""
Crawl multiple URLs concurrently or in batches.
:param urls: A list of URLs (or tasks) to crawl.
:param config: (Optional) A default `CrawlerRunConfig` applying to each crawl.
:param dispatcher: (Optional) A concurrency controller (e.g. MemoryAdaptiveDispatcher).
...
:return: A list of `CrawlResult` objects, one per URL.
"""
```
## Differences from `arun()`
1. **Multiple URLs**:
- Instead of crawling a single URL, you pass a list of them (strings or tasks).
- The function returns a **list** of `CrawlResult`, in the same order as `urls`.
2. **Concurrency & Dispatchers**:
- **`dispatcher`** param allows advanced concurrency control.
- If omitted, a default dispatcher (like `MemoryAdaptiveDispatcher`) is used internally.
- Dispatchers handle concurrency, rate limiting, and memory-based adaptive throttling (see [Multi-URL Crawling](../advanced/multi-url-crawling.md)).
3. **Parallel** Execution**:
- `arun_many()` can run multiple requests concurrently under the hood.
- Each `CrawlResult` might also include a **`dispatch_result`** with concurrency details (like memory usage, start/end times).
### Basic Example
```python
# Minimal usage: The default dispatcher will be used
results = await crawler.arun_many(
urls=["https://site1.com", "https://site2.com"],
config=my_run_config
)
for res in results:
if res.success:
print(res.url, "crawled OK!")
else:
print("Failed:", res.url, "-", res.error_message)
```
### With a Custom Dispatcher
```python
dispatcher = MemoryAdaptiveDispatcher(
memory_threshold_percent=70.0,
max_session_permit=10
)
results = await crawler.arun_many(
urls=["https://site1.com", "https://site2.com", "https://site3.com"],
config=my_run_config,
dispatcher=dispatcher
)
```
**Key Points**:
- Each URL is processed by the same or separate sessions, depending on the dispatchers strategy.
- `dispatch_result` in each `CrawlResult` (if using concurrency) can hold memory and timing info.
- If you need to handle authentication or session IDs, pass them in each individual task or within your run config.
### Return Value
A **list** of [`CrawlResult`](./crawl-result.md) objects, one per URL. You can iterate to check `result.success` or read each items `extracted_content`, `markdown`, or `dispatch_result`.
---
## Dispatcher Reference
- **`MemoryAdaptiveDispatcher`**: Dynamically manages concurrency based on system memory usage.
- **`SemaphoreDispatcher`**: Fixed concurrency limit, simpler but less adaptive.
For advanced usage or custom settings, see [Multi-URL Crawling with Dispatchers](../advanced/multi-url-crawling.md).
---
## Common Pitfalls
1. **Large Lists**: If you pass thousands of URLs, be mindful of memory or rate-limits. A dispatcher can help.
2. **Session Reuse**: If you need specialized logins or persistent contexts, ensure your dispatcher or tasks handle sessions accordingly.
3. **Error Handling**: Each `CrawlResult` might fail for different reasons—always check `result.success` or the `error_message` before proceeding.
---
## Conclusion
Use `arun_many()` when you want to **crawl multiple URLs** simultaneously or in controlled parallel tasks. If you need advanced concurrency features (like memory-based adaptive throttling or complex rate-limiting), provide a **dispatcher**. Each result is a standard `CrawlResult`, possibly augmented with concurrency stats (`dispatch_result`) for deeper inspection. For more details on concurrency logic and dispatchers, see the [Advanced Multi-URL Crawling](../advanced/multi-url-crawling.md) docs.

View File

@@ -130,51 +130,88 @@ For **backward** compatibility, `arun()` can still accept direct arguments like
---
## 4. Helper Methods
### 4.1 `arun_many()`
## 4. Batch Processing: `arun_many()`
```python
async def arun_many(
self,
urls: List[str],
config: Optional[CrawlerRunConfig] = None,
# Legacy parameters...
# Legacy parameters maintained for backwards compatibility...
) -> List[CrawlResult]:
...
"""
Process multiple URLs with intelligent rate limiting and resource monitoring.
"""
```
Crawls multiple URLs in concurrency. Accepts the same style `CrawlerRunConfig`. Example:
### 4.1 Resource-Aware Crawling
The `arun_many()` method now uses an intelligent dispatcher that:
- Monitors system memory usage
- Implements adaptive rate limiting
- Provides detailed progress monitoring
- Manages concurrent crawls efficiently
### 4.2 Example Usage
```python
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, RateLimitConfig
from crawl4ai.dispatcher import DisplayMode
# Configure browser
browser_cfg = BrowserConfig(headless=True)
# Configure crawler with rate limiting
run_cfg = CrawlerRunConfig(
# e.g., concurrency, wait_for, caching, extraction, etc.
semaphore_count=5
# Enable rate limiting
enable_rate_limiting=True,
rate_limit_config=RateLimitConfig(
base_delay=(1.0, 2.0), # Random delay between 1-2 seconds
max_delay=30.0, # Maximum delay after rate limit hits
max_retries=2, # Number of retries before giving up
rate_limit_codes=[429, 503] # Status codes that trigger rate limiting
),
# Resource monitoring
memory_threshold_percent=70.0, # Pause if memory exceeds this
check_interval=0.5, # How often to check resources
max_session_permit=3, # Maximum concurrent crawls
display_mode=DisplayMode.DETAILED.value # Show detailed progress
)
urls = [
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3"
]
async with AsyncWebCrawler(config=browser_cfg) as crawler:
results = await crawler.arun_many(
urls=["https://example.com", "https://another.com"],
config=run_cfg
)
for r in results:
print(r.url, ":", len(r.cleaned_html))
results = await crawler.arun_many(urls, config=run_cfg)
for result in results:
print(f"URL: {result.url}, Success: {result.success}")
```
### 4.2 `start()` & `close()`
### 4.3 Key Features
Allows manual lifecycle usage instead of context manager:
1. **Rate Limiting**
- Automatic delay between requests
- Exponential backoff on rate limit detection
- Domain-specific rate limiting
- Configurable retry strategy
```python
crawler = AsyncWebCrawler(config=browser_cfg)
await crawler.start()
2. **Resource Monitoring**
- Memory usage tracking
- Adaptive concurrency based on system load
- Automatic pausing when resources are constrained
# Perform multiple operations
resultA = await crawler.arun("https://exampleA.com", config=run_cfg)
resultB = await crawler.arun("https://exampleB.com", config=run_cfg)
3. **Progress Monitoring**
- Detailed or aggregated progress display
- Real-time status updates
- Memory usage statistics
await crawler.close()
```
4. **Error Handling**
- Graceful handling of rate limits
- Automatic retries with backoff
- Detailed error reporting
---

View File

@@ -26,6 +26,7 @@ class CrawlResult(BaseModel):
response_headers: Optional[dict] = None
status_code: Optional[int] = None
ssl_certificate: Optional[SSLCertificate] = None
dispatch_result: Optional[DispatchResult] = None
...
```
@@ -262,7 +263,31 @@ if result.metadata:
---
## 6. Example: Accessing Everything
## 6. `dispatch_result` (optional)
A `DispatchResult` object providing additional concurrency and resource usage information when crawling URLs in parallel (e.g., via `arun_many()` with custom dispatchers). It contains:
- **`task_id`**: A unique identifier for the parallel task.
- **`memory_usage`** (float): The memory (in MB) used at the time of completion.
- **`peak_memory`** (float): The peak memory usage (in MB) recorded during the tasks execution.
- **`start_time`** / **`end_time`** (datetime): Time range for this crawling task.
- **`error_message`** (str): Any dispatcher- or concurrency-related error encountered.
```python
# Example usage:
for result in results:
if result.success and result.dispatch_result:
dr = result.dispatch_result
print(f"URL: {result.url}, Task ID: {dr.task_id}")
print(f"Memory: {dr.memory_usage:.1f} MB (Peak: {dr.peak_memory:.1f} MB)")
print(f"Duration: {dr.end_time - dr.start_time}")
```
> **Note**: This field is typically populated when using `arun_many(...)` alongside a **dispatcher** (e.g., `MemoryAdaptiveDispatcher` or `SemaphoreDispatcher`). If no concurrency or dispatcher is used, `dispatch_result` may remain `None`.
---
## 7. Example: Accessing Everything
```python
async def handle_result(result: CrawlResult):
@@ -306,7 +331,7 @@ async def handle_result(result: CrawlResult):
---
## 7. Key Points & Future
## 8. Key Points & Future
1. **`markdown_v2` vs `markdown`**
- Right now, `markdown_v2` is the more robust container (`MarkdownGenerationResult`), providing **raw_markdown**, **markdown_with_citations**, references, plus possible **fit_markdown**.

View File

@@ -157,7 +157,32 @@ Use these for link-level content filtering (often to keep crawls “internal”
---
### G) **Debug & Logging**
### G) **Rate Limiting & Resource Management**
| **Parameter** | **Type / Default** | **What It Does** |
|------------------------------|----------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
| **`enable_rate_limiting`** | `bool` (default: `False`) | Enable intelligent rate limiting for multiple URLs |
| **`rate_limit_config`** | `RateLimitConfig` (default: `None`) | Configuration for rate limiting behavior |
The `RateLimitConfig` class has these fields:
| **Field** | **Type / Default** | **What It Does** |
|--------------------|----------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
| **`base_delay`** | `Tuple[float, float]` (1.0, 3.0) | Random delay range between requests to the same domain |
| **`max_delay`** | `float` (60.0) | Maximum delay after rate limit detection |
| **`max_retries`** | `int` (3) | Number of retries before giving up on rate-limited requests |
| **`rate_limit_codes`** | `List[int]` ([429, 503]) | HTTP status codes that trigger rate limiting behavior |
| **Parameter** | **Type / Default** | **What It Does** |
|-------------------------------|----------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
| **`memory_threshold_percent`** | `float` (70.0) | Maximum memory usage before pausing new crawls |
| **`check_interval`** | `float` (1.0) | How often to check system resources (in seconds) |
| **`max_session_permit`** | `int` (20) | Maximum number of concurrent crawl sessions |
| **`display_mode`** | `str` (`None`, "DETAILED", "AGGREGATED") | How to display progress information |
---
### H) **Debug & Logging**
| **Parameter** | **Type / Default** | **What It Does** |
|----------------|--------------------|---------------------------------------------------------------------------|
@@ -170,7 +195,7 @@ Use these for link-level content filtering (often to keep crawls “internal”
```python
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, RateLimitConfig
async def main():
# Configure the browser
@@ -190,7 +215,18 @@ async def main():
excluded_tags=["script", "style"],
exclude_external_links=True,
wait_for="css:.article-loaded",
screenshot=True
screenshot=True,
enable_rate_limiting=True,
rate_limit_config=RateLimitConfig(
base_delay=(1.0, 3.0),
max_delay=60.0,
max_retries=3,
rate_limit_codes=[429, 503]
),
memory_threshold_percent=70.0,
check_interval=1.0,
max_session_permit=20,
display_mode="DETAILED"
)
async with AsyncWebCrawler(config=browser_cfg) as crawler:
@@ -223,4 +259,3 @@ if __name__ == "__main__":
- **Use** `BrowserConfig` for **global** browser settings: engine, headless, proxy, user agent.
- **Use** `CrawlerRunConfig` for each crawls **context**: how to filter content, handle caching, wait for dynamic elements, or run JS.
- **Pass** both configs to `AsyncWebCrawler` (the `BrowserConfig`) and then to `arun()` (the `CrawlerRunConfig`).

View File

@@ -116,6 +116,12 @@ class CrawlerRunConfig:
wait_for=None,
screenshot=False,
pdf=False,
enable_rate_limiting=False,
rate_limit_config=None,
memory_threshold_percent=70.0,
check_interval=1.0,
max_session_permit=20,
display_mode=None,
verbose=True,
# ... other advanced parameters omitted
):
@@ -156,6 +162,58 @@ class CrawlerRunConfig:
- Logs additional runtime details.
- Overlaps with the browsers verbosity if also set to `True` in `BrowserConfig`.
9. **`enable_rate_limiting`**:
- If `True`, enables rate limiting for batch processing.
- Requires `rate_limit_config` to be set.
10. **`rate_limit_config`**:
- A `RateLimitConfig` object controlling rate limiting behavior.
- See below for details.
11. **`memory_threshold_percent`**:
- The memory threshold (as a percentage) to monitor.
- If exceeded, the crawler will pause or slow down.
12. **`check_interval`**:
- The interval (in seconds) to check system resources.
- Affects how often memory and CPU usage are monitored.
13. **`max_session_permit`**:
- The maximum number of concurrent crawl sessions.
- Helps prevent overwhelming the system.
14. **`display_mode`**:
- The display mode for progress information (`DETAILED`, `BRIEF`, etc.).
- Affects how much information is printed during the crawl.
### Rate Limiting & Resource Management
For batch processing with `arun_many()`, you can enable intelligent rate limiting:
```python
from crawl4ai import RateLimitConfig
config = CrawlerRunConfig(
enable_rate_limiting=True,
rate_limit_config=RateLimitConfig(
base_delay=(1.0, 3.0), # Random delay range
max_delay=60.0, # Max delay after rate limits
max_retries=3, # Retries before giving up
rate_limit_codes=[429, 503] # Status codes to watch
),
memory_threshold_percent=70.0, # Memory threshold
check_interval=1.0, # Resource check interval
max_session_permit=20, # Max concurrent crawls
display_mode="DETAILED" # Progress display mode
)
```
This configuration:
- Implements intelligent rate limiting per domain
- Monitors system resources
- Provides detailed progress information
- Manages concurrent crawls efficiently
**Minimal Example**:
```python
@@ -164,7 +222,14 @@ from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
crawl_conf = CrawlerRunConfig(
js_code="document.querySelector('button#loadMore')?.click()",
wait_for="css:.loaded-content",
screenshot=True
screenshot=True,
enable_rate_limiting=True,
rate_limit_config=RateLimitConfig(
base_delay=(1.0, 3.0),
max_delay=60.0,
max_retries=3,
rate_limit_codes=[429, 503]
)
)
async with AsyncWebCrawler() as crawler:
@@ -205,7 +270,14 @@ async def main():
# 3) Crawler run config: skip cache, use extraction
run_conf = CrawlerRunConfig(
extraction_strategy=extraction,
cache_mode=CacheMode.BYPASS
cache_mode=CacheMode.BYPASS,
enable_rate_limiting=True,
rate_limit_config=RateLimitConfig(
base_delay=(1.0, 3.0),
max_delay=60.0,
max_retries=3,
rate_limit_codes=[429, 503]
)
)
async with AsyncWebCrawler(config=browser_conf) as crawler:

View File

@@ -1,7 +1,3 @@
Below is the **revised Quickstart** guide with the **Installation** section removed, plus an updated **dynamic content** crawl example that uses `BrowserConfig` and `CrawlerRunConfig` (instead of passing parameters directly to `arun()`). Everything else remains as before.
---
# Getting Started with Crawl4AI
Welcome to **Crawl4AI**, an open-source LLM-friendly Web Crawler & Scraper. In this tutorial, youll:
@@ -254,7 +250,39 @@ if __name__ == "__main__":
---
## 7. Dynamic Content Example
## 7. Multi-URL Concurrency (Preview)
If you need to crawl multiple URLs in **parallel**, you can use `arun_many()`. By default, Crawl4AI employs a **MemoryAdaptiveDispatcher**, automatically adjusting concurrency based on system resources. Heres a quick glimpse:
```python
import asyncio
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, CacheMode
async def quick_parallel_example():
urls = [
"https://example.com/page1",
"https://example.com/page2",
"https://example.com/page3"
]
run_conf = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
async with AsyncWebCrawler() as crawler:
results = await crawler.arun_many(urls, config=run_conf)
for res in results:
if res.success:
print(f"[OK] {res.url}, length: {len(res.markdown_v2.raw_markdown)}")
else:
print(f"[ERROR] {res.url} => {res.error_message}")
if __name__ == "__main__":
asyncio.run(quick_parallel_example())
```
For more advanced concurrency (e.g., a **semaphore-based** approach, **adaptive memory usage throttling**, or customized rate limiting), see [Advanced Multi-URL Crawling](../advanced/multi-url-crawling.md).
## 8. Dynamic Content Example
Some sites require multiple “page clicks” or dynamic JavaScript updates. Below is an example showing how to **click** a “Next Page” button and wait for new commits to load on GitHub, using **`BrowserConfig`** and **`CrawlerRunConfig`**:
@@ -343,7 +371,7 @@ if __name__ == "__main__":
---
## 8. Next Steps
## 9. Next Steps
Congratulations! You have: