Restructure deep crawling code into a dedicated module with improved organization: - Move deep crawl logic from async_deep_crawl.py to deep_crawling/ - Create separate files for BFS strategy, filters, and scorers - Improve code organization and maintainability - Add optimized implementations for URL filtering and scoring - Rename DeepCrawlHandler to DeepCrawlDecorator for clarity BREAKING CHANGE: DeepCrawlStrategy and BreadthFirstSearchStrategy imports need to be updated to new package structure
43 lines
1.3 KiB
Python
43 lines
1.3 KiB
Python
import asyncio
|
|
import time
|
|
|
|
|
|
from crawl4ai import CrawlerRunConfig, AsyncWebCrawler, CacheMode
|
|
from crawl4ai.deep_crawling.bfs_strategy import BFSDeepCrawlStrategy
|
|
|
|
|
|
async def main():
|
|
"""Example deep crawl of documentation site."""
|
|
config = CrawlerRunConfig(
|
|
deep_crawl_strategy = BFSDeepCrawlStrategy(
|
|
max_depth=2,
|
|
include_external=False
|
|
),
|
|
stream=False,
|
|
verbose=True,
|
|
cache_mode=CacheMode.BYPASS
|
|
)
|
|
|
|
async with AsyncWebCrawler() as crawler:
|
|
start_time = time.perf_counter()
|
|
print("\nStarting deep crawl in batch mode:")
|
|
results = await crawler.arun(
|
|
url="https://docs.crawl4ai.com",
|
|
config=config
|
|
)
|
|
print(f"Crawled {len(results)} pages")
|
|
print(f"Example page: {results[0].url}")
|
|
print(f"Duration: {time.perf_counter() - start_time:.2f} seconds\n")
|
|
|
|
print("Starting deep crawl in streaming mode:")
|
|
config.stream = True
|
|
start_time = time.perf_counter()
|
|
async for result in await crawler.arun(
|
|
url="https://docs.crawl4ai.com",
|
|
config=config
|
|
):
|
|
print(f"→ {result.url} (Depth: {result.metadata.get('depth', 0)})")
|
|
print(f"Duration: {time.perf_counter() - start_time:.2f} seconds")
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main()) |