Add Async Version, JsonCss Extrator
This commit is contained in:
81
tests/async/test_basic_crawling.py
Normal file
81
tests/async/test_basic_crawling.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
import time
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_successful_crawl():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert result.url == url
|
||||
assert result.html
|
||||
assert result.markdown
|
||||
assert result.cleaned_html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_url():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.invalidurl12345.com"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert not result.success
|
||||
assert result.error_message
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_urls():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
urls = [
|
||||
"https://www.nbcnews.com/business",
|
||||
"https://www.example.com",
|
||||
"https://www.python.org"
|
||||
]
|
||||
results = await crawler.arun_many(urls=urls, bypass_cache=True)
|
||||
assert len(results) == len(urls)
|
||||
assert all(result.success for result in results)
|
||||
assert all(result.html for result in results)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_javascript_execution():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
js_code = "document.body.innerHTML = '<h1>Modified by JS</h1>';"
|
||||
url = "https://www.example.com"
|
||||
result = await crawler.arun(url=url, bypass_cache=True, js_code=js_code)
|
||||
assert result.success
|
||||
assert "<h1>Modified by JS</h1>" in result.html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_crawling_performance():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
urls = [
|
||||
"https://www.nbcnews.com/business",
|
||||
"https://www.example.com",
|
||||
"https://www.python.org",
|
||||
"https://www.github.com",
|
||||
"https://www.stackoverflow.com"
|
||||
]
|
||||
|
||||
start_time = time.time()
|
||||
results = await crawler.arun_many(urls=urls, bypass_cache=True)
|
||||
end_time = time.time()
|
||||
|
||||
total_time = end_time - start_time
|
||||
print(f"Total time for concurrent crawling: {total_time:.2f} seconds")
|
||||
|
||||
assert all(result.success for result in results)
|
||||
assert len(results) == len(urls)
|
||||
|
||||
# Assert that concurrent crawling is faster than sequential
|
||||
# This multiplier may need adjustment based on the number of URLs and their complexity
|
||||
assert total_time < len(urls) * 5, f"Concurrent crawling not significantly faster: {total_time:.2f} seconds"
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
82
tests/async/test_caching.py
Normal file
82
tests/async/test_caching.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_caching():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
|
||||
# First crawl (should not use cache)
|
||||
start_time = asyncio.get_event_loop().time()
|
||||
result1 = await crawler.arun(url=url, bypass_cache=True)
|
||||
end_time = asyncio.get_event_loop().time()
|
||||
time_taken1 = end_time - start_time
|
||||
|
||||
assert result1.success
|
||||
|
||||
# Second crawl (should use cache)
|
||||
start_time = asyncio.get_event_loop().time()
|
||||
result2 = await crawler.arun(url=url, bypass_cache=False)
|
||||
end_time = asyncio.get_event_loop().time()
|
||||
time_taken2 = end_time - start_time
|
||||
|
||||
assert result2.success
|
||||
assert time_taken2 < time_taken1 # Cached result should be faster
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bypass_cache():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
|
||||
# First crawl
|
||||
result1 = await crawler.arun(url=url, bypass_cache=False)
|
||||
assert result1.success
|
||||
|
||||
# Second crawl with bypass_cache=True
|
||||
result2 = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result2.success
|
||||
|
||||
# Content should be different (or at least, not guaranteed to be the same)
|
||||
assert result1.html != result2.html or result1.markdown != result2.markdown
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_cache():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
|
||||
# Crawl and cache
|
||||
await crawler.arun(url=url, bypass_cache=False)
|
||||
|
||||
# Clear cache
|
||||
await crawler.aclear_cache()
|
||||
|
||||
# Check cache size
|
||||
cache_size = await crawler.aget_cache_size()
|
||||
assert cache_size == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_flush_cache():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
|
||||
# Crawl and cache
|
||||
await crawler.arun(url=url, bypass_cache=False)
|
||||
|
||||
# Flush cache
|
||||
await crawler.aflush_cache()
|
||||
|
||||
# Check cache size
|
||||
cache_size = await crawler.aget_cache_size()
|
||||
assert cache_size == 0
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
87
tests/async/test_chunking_and_extraction_strategies.py
Normal file
87
tests/async/test_chunking_and_extraction_strategies.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
from crawl4ai.chunking_strategy import RegexChunking, NlpSentenceChunking
|
||||
from crawl4ai.extraction_strategy import CosineStrategy, LLMExtractionStrategy
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_regex_chunking():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
chunking_strategy = RegexChunking(patterns=["\n\n"])
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
chunking_strategy=chunking_strategy,
|
||||
bypass_cache=True
|
||||
)
|
||||
assert result.success
|
||||
assert result.extracted_content
|
||||
chunks = json.loads(result.extracted_content)
|
||||
assert len(chunks) > 1 # Ensure multiple chunks were created
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cosine_strategy():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
extraction_strategy = CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3, sim_threshold=0.3)
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
)
|
||||
assert result.success
|
||||
assert result.extracted_content
|
||||
extracted_data = json.loads(result.extracted_content)
|
||||
assert len(extracted_data) > 0
|
||||
assert all('tags' in item for item in extracted_data)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_llm_extraction_strategy():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
extraction_strategy = LLMExtractionStrategy(
|
||||
provider="openai/gpt-4o-mini",
|
||||
api_token=os.getenv('OPENAI_API_KEY'),
|
||||
instruction="Extract only content related to technology"
|
||||
)
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
)
|
||||
assert result.success
|
||||
assert result.extracted_content
|
||||
extracted_data = json.loads(result.extracted_content)
|
||||
assert len(extracted_data) > 0
|
||||
assert all('content' in item for item in extracted_data)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_combined_chunking_and_extraction():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
chunking_strategy = RegexChunking(patterns=["\n\n"])
|
||||
extraction_strategy = CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3, sim_threshold=0.3)
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
chunking_strategy=chunking_strategy,
|
||||
extraction_strategy=extraction_strategy,
|
||||
bypass_cache=True
|
||||
)
|
||||
assert result.success
|
||||
assert result.extracted_content
|
||||
extracted_data = json.loads(result.extracted_content)
|
||||
assert len(extracted_data) > 0
|
||||
assert all('tags' in item for item in extracted_data)
|
||||
assert all('content' in item for item in extracted_data)
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
90
tests/async/test_content_extraction.py
Normal file
90
tests/async/test_content_extraction.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_markdown():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert result.markdown
|
||||
assert isinstance(result.markdown, str)
|
||||
assert len(result.markdown) > 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_cleaned_html():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert result.cleaned_html
|
||||
assert isinstance(result.cleaned_html, str)
|
||||
assert len(result.cleaned_html) > 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_media():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert result.media
|
||||
media = result.media
|
||||
assert isinstance(media, dict)
|
||||
assert "images" in media
|
||||
assert isinstance(media["images"], list)
|
||||
for image in media["images"]:
|
||||
assert "src" in image
|
||||
assert "alt" in image
|
||||
assert "type" in image
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_links():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert result.links
|
||||
links = result.links
|
||||
assert isinstance(links, dict)
|
||||
assert "internal" in links
|
||||
assert "external" in links
|
||||
assert isinstance(links["internal"], list)
|
||||
assert isinstance(links["external"], list)
|
||||
for link in links["internal"] + links["external"]:
|
||||
assert "href" in link
|
||||
assert "text" in link
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_metadata():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert result.metadata
|
||||
metadata = result.metadata
|
||||
assert isinstance(metadata, dict)
|
||||
assert "title" in metadata
|
||||
assert isinstance(metadata["title"], str)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_css_selector_extraction():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
css_selector = "h1, h2, h3"
|
||||
result = await crawler.arun(url=url, bypass_cache=True, css_selector=css_selector)
|
||||
assert result.success
|
||||
assert result.markdown
|
||||
assert all(heading in result.markdown for heading in ["#", "##", "###"])
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
68
tests/async/test_crawler_strategy.py
Normal file
68
tests/async/test_crawler_strategy.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
from crawl4ai.async_crawler_strategy import AsyncPlaywrightCrawlerStrategy
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_user_agent():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
custom_user_agent = "MyCustomUserAgent/1.0"
|
||||
crawler.crawler_strategy.update_user_agent(custom_user_agent)
|
||||
url = "https://httpbin.org/user-agent"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert custom_user_agent in result.html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_headers():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
custom_headers = {"X-Test-Header": "TestValue"}
|
||||
crawler.crawler_strategy.set_custom_headers(custom_headers)
|
||||
url = "https://httpbin.org/headers"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert "X-Test-Header" in result.html
|
||||
assert "TestValue" in result.html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_javascript_execution():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
js_code = "document.body.innerHTML = '<h1>Modified by JS</h1>';"
|
||||
url = "https://www.example.com"
|
||||
result = await crawler.arun(url=url, bypass_cache=True, js_code=js_code)
|
||||
assert result.success
|
||||
assert "<h1>Modified by JS</h1>" in result.html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_hook_execution():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
async def test_hook(page):
|
||||
await page.evaluate("document.body.style.backgroundColor = 'red';")
|
||||
return page
|
||||
|
||||
crawler.crawler_strategy.set_hook('after_goto', test_hook)
|
||||
url = "https://www.example.com"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result.success
|
||||
assert "background-color: red" in result.html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_screenshot():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.example.com"
|
||||
result = await crawler.arun(url=url, bypass_cache=True, screenshot=True)
|
||||
assert result.success
|
||||
assert result.screenshot
|
||||
assert isinstance(result.screenshot, str)
|
||||
assert len(result.screenshot) > 0
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
82
tests/async/test_database_operations.py
Normal file
82
tests/async/test_database_operations.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_url():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.example.com"
|
||||
# First run to cache the URL
|
||||
result1 = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result1.success
|
||||
|
||||
# Second run to retrieve from cache
|
||||
result2 = await crawler.arun(url=url, bypass_cache=False)
|
||||
assert result2.success
|
||||
assert result2.html == result1.html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bypass_cache():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.python.org"
|
||||
# First run to cache the URL
|
||||
result1 = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result1.success
|
||||
|
||||
# Second run bypassing cache
|
||||
result2 = await crawler.arun(url=url, bypass_cache=True)
|
||||
assert result2.success
|
||||
assert result2.html != result1.html # Content might be different due to dynamic nature of websites
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cache_size():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
initial_size = await crawler.aget_cache_size()
|
||||
|
||||
url = "https://www.nbcnews.com/business"
|
||||
await crawler.arun(url=url, bypass_cache=True)
|
||||
|
||||
new_size = await crawler.aget_cache_size()
|
||||
assert new_size == initial_size + 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_cache():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.example.org"
|
||||
await crawler.arun(url=url, bypass_cache=True)
|
||||
|
||||
initial_size = await crawler.aget_cache_size()
|
||||
assert initial_size > 0
|
||||
|
||||
await crawler.aclear_cache()
|
||||
new_size = await crawler.aget_cache_size()
|
||||
assert new_size == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_flush_cache():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.example.net"
|
||||
await crawler.arun(url=url, bypass_cache=True)
|
||||
|
||||
initial_size = await crawler.aget_cache_size()
|
||||
assert initial_size > 0
|
||||
|
||||
await crawler.aflush_cache()
|
||||
new_size = await crawler.aget_cache_size()
|
||||
assert new_size == 0
|
||||
|
||||
# Try to retrieve the previously cached URL
|
||||
result = await crawler.arun(url=url, bypass_cache=False)
|
||||
assert result.success # The crawler should still succeed, but it will fetch the content anew
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
127
tests/async/test_edge_cases.py
Normal file
127
tests/async/test_edge_cases.py
Normal file
@@ -0,0 +1,127 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import pytest
|
||||
import json
|
||||
from bs4 import BeautifulSoup
|
||||
import asyncio
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
|
||||
# @pytest.mark.asyncio
|
||||
# async def test_large_content_page():
|
||||
# async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# url = "https://en.wikipedia.org/wiki/List_of_largest_known_stars" # A page with a large table
|
||||
# result = await crawler.arun(url=url, bypass_cache=True)
|
||||
# assert result.success
|
||||
# assert len(result.html) > 1000000 # Expecting more than 1MB of content
|
||||
|
||||
# @pytest.mark.asyncio
|
||||
# async def test_minimal_content_page():
|
||||
# async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# url = "https://example.com" # A very simple page
|
||||
# result = await crawler.arun(url=url, bypass_cache=True)
|
||||
# assert result.success
|
||||
# assert len(result.html) < 10000 # Expecting less than 10KB of content
|
||||
|
||||
# @pytest.mark.asyncio
|
||||
# async def test_single_page_application():
|
||||
# async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# url = "https://reactjs.org/" # React's website is a SPA
|
||||
# result = await crawler.arun(url=url, bypass_cache=True)
|
||||
# assert result.success
|
||||
# assert "react" in result.html.lower()
|
||||
|
||||
# @pytest.mark.asyncio
|
||||
# async def test_page_with_infinite_scroll():
|
||||
# async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# url = "https://news.ycombinator.com/" # Hacker News has infinite scroll
|
||||
# result = await crawler.arun(url=url, bypass_cache=True)
|
||||
# assert result.success
|
||||
# assert "hacker news" in result.html.lower()
|
||||
|
||||
# @pytest.mark.asyncio
|
||||
# async def test_page_with_heavy_javascript():
|
||||
# async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# url = "https://www.airbnb.com/" # Airbnb uses a lot of JavaScript
|
||||
# result = await crawler.arun(url=url, bypass_cache=True)
|
||||
# assert result.success
|
||||
# assert "airbnb" in result.html.lower()
|
||||
|
||||
# @pytest.mark.asyncio
|
||||
# async def test_page_with_mixed_content():
|
||||
# async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
# url = "https://github.com/" # GitHub has a mix of static and dynamic content
|
||||
# result = await crawler.arun(url=url, bypass_cache=True)
|
||||
# assert result.success
|
||||
# assert "github" in result.html.lower()
|
||||
|
||||
# Add this test to your existing test file
|
||||
@pytest.mark.asyncio
|
||||
async def test_typescript_commits_multi_page():
|
||||
first_commit = ""
|
||||
async def on_execution_started(page):
|
||||
nonlocal first_commit
|
||||
try:
|
||||
# Check if the page firct commit h4 text is different from the first commit (use document.querySelector('li.Box-sc-g0xbh4-0 h4'))
|
||||
while True:
|
||||
await page.wait_for_selector('li.Box-sc-g0xbh4-0 h4')
|
||||
commit = await page.query_selector('li.Box-sc-g0xbh4-0 h4')
|
||||
commit = await commit.evaluate('(element) => element.textContent')
|
||||
commit = re.sub(r'\s+', '', commit)
|
||||
if commit and commit != first_commit:
|
||||
first_commit = commit
|
||||
break
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
print(f"Warning: New content didn't appear after JavaScript execution: {e}")
|
||||
|
||||
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
crawler.crawler_strategy.set_hook('on_execution_started', on_execution_started)
|
||||
|
||||
url = "https://github.com/microsoft/TypeScript/commits/main"
|
||||
session_id = "typescript_commits_session"
|
||||
all_commits = []
|
||||
|
||||
js_next_page = """
|
||||
const button = document.querySelector('a[data-testid="pagination-next-button"]');
|
||||
if (button) button.click();
|
||||
"""
|
||||
|
||||
for page in range(3): # Crawl 3 pages
|
||||
result = await crawler.arun(
|
||||
url=url, # Only use URL for the first page
|
||||
session_id=session_id,
|
||||
css_selector="li.Box-sc-g0xbh4-0",
|
||||
js=js_next_page if page > 0 else None, # Don't click 'next' on the first page
|
||||
bypass_cache=True,
|
||||
js_only=page > 0 # Use js_only for subsequent pages
|
||||
)
|
||||
|
||||
assert result.success, f"Failed to crawl page {page + 1}"
|
||||
|
||||
# Parse the HTML and extract commits
|
||||
soup = BeautifulSoup(result.cleaned_html, 'html.parser')
|
||||
commits = soup.select("li")
|
||||
# Take first commit find h4 extract text
|
||||
first_commit = commits[0].find("h4").text
|
||||
first_commit = re.sub(r'\s+', '', first_commit)
|
||||
all_commits.extend(commits)
|
||||
|
||||
print(f"Page {page + 1}: Found {len(commits)} commits")
|
||||
|
||||
# Clean up the session
|
||||
await crawler.crawler_strategy.kill_session(session_id)
|
||||
|
||||
# Assertions
|
||||
assert len(all_commits) >= 90, f"Expected at least 90 commits, but got {len(all_commits)}"
|
||||
|
||||
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
78
tests/async/test_error_handling.py
Normal file
78
tests/async/test_error_handling.py
Normal file
@@ -0,0 +1,78 @@
|
||||
# import os
|
||||
# import sys
|
||||
# import pytest
|
||||
# import asyncio
|
||||
|
||||
# # Add the parent directory to the Python path
|
||||
# parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
# sys.path.append(parent_dir)
|
||||
|
||||
# from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
# from crawl4ai.utils import InvalidCSSSelectorError
|
||||
|
||||
# class AsyncCrawlerWrapper:
|
||||
# def __init__(self):
|
||||
# self.crawler = None
|
||||
|
||||
# async def setup(self):
|
||||
# self.crawler = AsyncWebCrawler(verbose=True)
|
||||
# await self.crawler.awarmup()
|
||||
|
||||
# async def cleanup(self):
|
||||
# if self.crawler:
|
||||
# await self.crawler.aclear_cache()
|
||||
|
||||
# @pytest.fixture(scope="module")
|
||||
# def crawler_wrapper():
|
||||
# wrapper = AsyncCrawlerWrapper()
|
||||
# asyncio.get_event_loop().run_until_complete(wrapper.setup())
|
||||
# yield wrapper
|
||||
# asyncio.get_event_loop().run_until_complete(wrapper.cleanup())
|
||||
|
||||
# @pytest.mark.asyncio
|
||||
# async def test_network_error(crawler_wrapper):
|
||||
# url = "https://www.nonexistentwebsite123456789.com"
|
||||
# result = await crawler_wrapper.crawler.arun(url=url, bypass_cache=True)
|
||||
# assert not result.success
|
||||
# assert "Failed to crawl" in result.error_message
|
||||
|
||||
# # @pytest.mark.asyncio
|
||||
# # async def test_timeout_error(crawler_wrapper):
|
||||
# # # Simulating a timeout by using a very short timeout value
|
||||
# # url = "https://www.nbcnews.com/business"
|
||||
# # result = await crawler_wrapper.crawler.arun(url=url, bypass_cache=True, timeout=0.001)
|
||||
# # assert not result.success
|
||||
# # assert "timeout" in result.error_message.lower()
|
||||
|
||||
# # @pytest.mark.asyncio
|
||||
# # async def test_invalid_css_selector(crawler_wrapper):
|
||||
# # url = "https://www.nbcnews.com/business"
|
||||
# # with pytest.raises(InvalidCSSSelectorError):
|
||||
# # await crawler_wrapper.crawler.arun(url=url, bypass_cache=True, css_selector="invalid>>selector")
|
||||
|
||||
# # @pytest.mark.asyncio
|
||||
# # async def test_js_execution_error(crawler_wrapper):
|
||||
# # url = "https://www.nbcnews.com/business"
|
||||
# # invalid_js = "This is not valid JavaScript code;"
|
||||
# # result = await crawler_wrapper.crawler.arun(url=url, bypass_cache=True, js=invalid_js)
|
||||
# # assert not result.success
|
||||
# # assert "JavaScript" in result.error_message
|
||||
|
||||
# # @pytest.mark.asyncio
|
||||
# # async def test_empty_page(crawler_wrapper):
|
||||
# # # Use a URL that typically returns an empty page
|
||||
# # url = "http://example.com/empty"
|
||||
# # result = await crawler_wrapper.crawler.arun(url=url, bypass_cache=True)
|
||||
# # assert result.success # The crawl itself should succeed
|
||||
# # assert not result.markdown.strip() # The markdown content should be empty or just whitespace
|
||||
|
||||
# # @pytest.mark.asyncio
|
||||
# # async def test_rate_limiting(crawler_wrapper):
|
||||
# # # Simulate rate limiting by making multiple rapid requests
|
||||
# # url = "https://www.nbcnews.com/business"
|
||||
# # results = await asyncio.gather(*[crawler_wrapper.crawler.arun(url=url, bypass_cache=True) for _ in range(10)])
|
||||
# # assert any(not result.success and "rate limit" in result.error_message.lower() for result in results)
|
||||
|
||||
# # Entry point for debugging
|
||||
# if __name__ == "__main__":
|
||||
# pytest.main([__file__, "-v"])
|
||||
94
tests/async/test_parameters_and_options.py
Normal file
94
tests/async/test_parameters_and_options.py
Normal file
@@ -0,0 +1,94 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_word_count_threshold():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result_no_threshold = await crawler.arun(url=url, word_count_threshold=0, bypass_cache=True)
|
||||
result_with_threshold = await crawler.arun(url=url, word_count_threshold=50, bypass_cache=True)
|
||||
|
||||
assert len(result_no_threshold.markdown) > len(result_with_threshold.markdown)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_css_selector():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
css_selector = "h1, h2, h3"
|
||||
result = await crawler.arun(url=url, css_selector=css_selector, bypass_cache=True)
|
||||
|
||||
assert result.success
|
||||
assert "<h1" in result.cleaned_html or "<h2" in result.cleaned_html or "<h3" in result.cleaned_html
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_javascript_execution():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
|
||||
# Crawl without JS
|
||||
result_without_more = await crawler.arun(url=url, bypass_cache=True)
|
||||
|
||||
js_code = ["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"]
|
||||
result_with_more = await crawler.arun(url=url, js=js_code, bypass_cache=True)
|
||||
|
||||
assert result_with_more.success
|
||||
assert len(result_with_more.markdown) > len(result_without_more.markdown)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_screenshot():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, screenshot=True, bypass_cache=True)
|
||||
|
||||
assert result.success
|
||||
assert result.screenshot
|
||||
assert isinstance(result.screenshot, str) # Should be a base64 encoded string
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_user_agent():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
custom_user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Crawl4AI/1.0"
|
||||
result = await crawler.arun(url=url, user_agent=custom_user_agent, bypass_cache=True)
|
||||
|
||||
assert result.success
|
||||
# Note: We can't directly verify the user agent in the result, but we can check if the crawl was successful
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_media_and_links():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
|
||||
assert result.success
|
||||
assert result.media
|
||||
assert isinstance(result.media, dict)
|
||||
assert 'images' in result.media
|
||||
assert result.links
|
||||
assert isinstance(result.links, dict)
|
||||
assert 'internal' in result.links and 'external' in result.links
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_metadata_extraction():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
|
||||
assert result.success
|
||||
assert result.metadata
|
||||
assert isinstance(result.metadata, dict)
|
||||
# Check for common metadata fields
|
||||
assert any(key in result.metadata for key in ['title', 'description', 'keywords'])
|
||||
|
||||
# Entry point for debugging
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
72
tests/async/test_performance.py
Normal file
72
tests/async/test_performance.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import asyncio
|
||||
import time
|
||||
|
||||
# Add the parent directory to the Python path
|
||||
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_crawl_speed():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
start_time = time.time()
|
||||
result = await crawler.arun(url=url, bypass_cache=True)
|
||||
end_time = time.time()
|
||||
|
||||
assert result.success
|
||||
crawl_time = end_time - start_time
|
||||
print(f"Crawl time: {crawl_time:.2f} seconds")
|
||||
|
||||
assert crawl_time < 10, f"Crawl took too long: {crawl_time:.2f} seconds"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_crawling_performance():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
urls = [
|
||||
"https://www.nbcnews.com/business",
|
||||
"https://www.example.com",
|
||||
"https://www.python.org",
|
||||
"https://www.github.com",
|
||||
"https://www.stackoverflow.com"
|
||||
]
|
||||
|
||||
start_time = time.time()
|
||||
results = await crawler.arun_many(urls=urls, bypass_cache=True)
|
||||
end_time = time.time()
|
||||
|
||||
total_time = end_time - start_time
|
||||
print(f"Total time for concurrent crawling: {total_time:.2f} seconds")
|
||||
|
||||
assert all(result.success for result in results)
|
||||
assert len(results) == len(urls)
|
||||
|
||||
assert total_time < len(urls) * 5, f"Concurrent crawling not significantly faster: {total_time:.2f} seconds"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_crawl_speed_with_caching():
|
||||
async with AsyncWebCrawler(verbose=True) as crawler:
|
||||
url = "https://www.nbcnews.com/business"
|
||||
|
||||
start_time = time.time()
|
||||
result1 = await crawler.arun(url=url, bypass_cache=True)
|
||||
end_time = time.time()
|
||||
first_crawl_time = end_time - start_time
|
||||
|
||||
start_time = time.time()
|
||||
result2 = await crawler.arun(url=url, bypass_cache=False)
|
||||
end_time = time.time()
|
||||
second_crawl_time = end_time - start_time
|
||||
|
||||
assert result1.success and result2.success
|
||||
print(f"First crawl time: {first_crawl_time:.2f} seconds")
|
||||
print(f"Second crawl time (cached): {second_crawl_time:.2f} seconds")
|
||||
|
||||
assert second_crawl_time < first_crawl_time / 2, "Cached crawl not significantly faster"
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user