Some debugging for caching

This commit is contained in:
unclecode
2025-12-21 04:45:52 +00:00
parent f6b29a8f9f
commit 48426f73f0
11 changed files with 1464 additions and 4 deletions

View File

@@ -1061,6 +1061,15 @@ class CrawlerRunConfig():
shared_data (dict or None): Shared data to be passed between hooks.
Default: None.
# Cache Validation Parameters (Smart Cache)
check_cache_freshness (bool): If True, validates cached content freshness using HTTP
conditional requests (ETag/Last-Modified) and head fingerprinting
before returning cached results. Avoids full browser crawls when
content hasn't changed. Only applies when cache_mode allows reads.
Default: False.
cache_validation_timeout (float): Timeout in seconds for cache validation HTTP requests.
Default: 10.0.
# Page Navigation and Timing Parameters
wait_until (str): The condition to wait for when navigating, e.g. "domcontentloaded".
Default: "domcontentloaded".
@@ -1226,6 +1235,9 @@ class CrawlerRunConfig():
no_cache_read: bool = False,
no_cache_write: bool = False,
shared_data: dict = None,
# Cache Validation Parameters (Smart Cache)
check_cache_freshness: bool = False,
cache_validation_timeout: float = 10.0,
# Page Navigation and Timing Parameters
wait_until: str = "domcontentloaded",
page_timeout: int = PAGE_TIMEOUT,
@@ -1339,6 +1351,9 @@ class CrawlerRunConfig():
self.no_cache_read = no_cache_read
self.no_cache_write = no_cache_write
self.shared_data = shared_data
# Cache Validation (Smart Cache)
self.check_cache_freshness = check_cache_freshness
self.cache_validation_timeout = cache_validation_timeout
# Page Navigation and Timing Parameters
self.wait_until = wait_until

View File

@@ -1,10 +1,11 @@
import os
import time
from pathlib import Path
import aiosqlite
import asyncio
from typing import Optional, Dict
from contextlib import asynccontextmanager
import json
import json
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
import aiofiles
from .async_logger import AsyncLogger
@@ -262,6 +263,11 @@ class AsyncDatabaseManager:
"screenshot",
"response_headers",
"downloaded_files",
# Smart cache validation columns (added in 0.8.x)
"etag",
"last_modified",
"head_fingerprint",
"cached_at",
]
for column in new_columns:
@@ -275,6 +281,11 @@ class AsyncDatabaseManager:
await db.execute(
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"'
)
elif new_column == "cached_at":
# Timestamp column for cache validation
await db.execute(
f"ALTER TABLE crawled_data ADD COLUMN {new_column} REAL DEFAULT 0"
)
else:
await db.execute(
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
@@ -378,6 +389,92 @@ class AsyncDatabaseManager:
)
return None
async def aget_cache_metadata(self, url: str) -> Optional[Dict]:
"""
Retrieve only cache validation metadata for a URL (lightweight query).
Returns dict with: url, etag, last_modified, head_fingerprint, cached_at, response_headers
This is used for cache validation without loading full content.
"""
async def _get_metadata(db):
async with db.execute(
"""SELECT url, etag, last_modified, head_fingerprint, cached_at, response_headers
FROM crawled_data WHERE url = ?""",
(url,)
) as cursor:
row = await cursor.fetchone()
if not row:
return None
columns = [description[0] for description in cursor.description]
row_dict = dict(zip(columns, row))
# Parse response_headers JSON
try:
row_dict["response_headers"] = (
json.loads(row_dict["response_headers"])
if row_dict["response_headers"] else {}
)
except json.JSONDecodeError:
row_dict["response_headers"] = {}
return row_dict
try:
return await self.execute_with_retry(_get_metadata)
except Exception as e:
self.logger.error(
message="Error retrieving cache metadata: {error}",
tag="ERROR",
force_verbose=True,
params={"error": str(e)},
)
return None
async def aupdate_cache_metadata(
self,
url: str,
etag: Optional[str] = None,
last_modified: Optional[str] = None,
head_fingerprint: Optional[str] = None,
):
"""
Update only the cache validation metadata for a URL.
Used to update etag/last_modified after a successful validation.
"""
async def _update(db):
updates = []
values = []
if etag is not None:
updates.append("etag = ?")
values.append(etag)
if last_modified is not None:
updates.append("last_modified = ?")
values.append(last_modified)
if head_fingerprint is not None:
updates.append("head_fingerprint = ?")
values.append(head_fingerprint)
if not updates:
return
values.append(url)
await db.execute(
f"UPDATE crawled_data SET {', '.join(updates)} WHERE url = ?",
tuple(values)
)
try:
await self.execute_with_retry(_update)
except Exception as e:
self.logger.error(
message="Error updating cache metadata: {error}",
tag="ERROR",
force_verbose=True,
params={"error": str(e)},
)
async def acache_url(self, result: CrawlResult):
"""Cache CrawlResult data"""
# Store content files and get hashes
@@ -425,15 +522,24 @@ class AsyncDatabaseManager:
for field, (content, content_type) in content_map.items():
content_hashes[field] = await self._store_content(content, content_type)
# Extract cache validation headers from response
response_headers = result.response_headers or {}
etag = response_headers.get("etag") or response_headers.get("ETag") or ""
last_modified = response_headers.get("last-modified") or response_headers.get("Last-Modified") or ""
# head_fingerprint is set by caller via result attribute (if available)
head_fingerprint = getattr(result, "head_fingerprint", None) or ""
cached_at = time.time()
async def _cache(db):
await db.execute(
"""
INSERT INTO crawled_data (
url, html, cleaned_html, markdown,
extracted_content, success, media, links, metadata,
screenshot, response_headers, downloaded_files
screenshot, response_headers, downloaded_files,
etag, last_modified, head_fingerprint, cached_at
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(url) DO UPDATE SET
html = excluded.html,
cleaned_html = excluded.cleaned_html,
@@ -445,7 +551,11 @@ class AsyncDatabaseManager:
metadata = excluded.metadata,
screenshot = excluded.screenshot,
response_headers = excluded.response_headers,
downloaded_files = excluded.downloaded_files
downloaded_files = excluded.downloaded_files,
etag = excluded.etag,
last_modified = excluded.last_modified,
head_fingerprint = excluded.head_fingerprint,
cached_at = excluded.cached_at
""",
(
result.url,
@@ -460,6 +570,10 @@ class AsyncDatabaseManager:
content_hashes["screenshot"],
json.dumps(result.response_headers or {}),
json.dumps(result.downloaded_files or []),
etag,
last_modified,
head_fingerprint,
cached_at,
),
)

View File

@@ -47,7 +47,9 @@ from .utils import (
get_error_context,
RobotsParser,
preprocess_html_for_schema,
compute_head_fingerprint,
)
from .cache_validator import CacheValidator, CacheValidationResult
class AsyncWebCrawler:
@@ -267,6 +269,51 @@ class AsyncWebCrawler:
if cache_context.should_read():
cached_result = await async_db_manager.aget_cached_url(url)
# Smart Cache: Validate cache freshness if enabled
if cached_result and config.check_cache_freshness:
cache_metadata = await async_db_manager.aget_cache_metadata(url)
if cache_metadata:
async with CacheValidator(timeout=config.cache_validation_timeout) as validator:
validation = await validator.validate(
url=url,
stored_etag=cache_metadata.get("etag"),
stored_last_modified=cache_metadata.get("last_modified"),
stored_head_fingerprint=cache_metadata.get("head_fingerprint"),
)
if validation.status == CacheValidationResult.FRESH:
cached_result.cache_status = "hit_validated"
self.logger.info(
message="Cache validated: {reason}",
tag="CACHE",
params={"reason": validation.reason}
)
# Update metadata if we got new values
if validation.new_etag or validation.new_last_modified:
await async_db_manager.aupdate_cache_metadata(
url=url,
etag=validation.new_etag,
last_modified=validation.new_last_modified,
head_fingerprint=validation.new_head_fingerprint,
)
elif validation.status == CacheValidationResult.ERROR:
cached_result.cache_status = "hit_fallback"
self.logger.warning(
message="Cache validation failed, using cached: {reason}",
tag="CACHE",
params={"reason": validation.reason}
)
else:
# STALE or UNKNOWN - force recrawl
self.logger.info(
message="Cache stale: {reason}",
tag="CACHE",
params={"reason": validation.reason}
)
cached_result = None
elif cached_result:
cached_result.cache_status = "hit"
if cached_result:
html = sanitize_input_encode(cached_result.html)
extracted_content = sanitize_input_encode(
@@ -383,6 +430,14 @@ class AsyncWebCrawler:
crawl_result.success = bool(html)
crawl_result.session_id = getattr(
config, "session_id", None)
crawl_result.cache_status = "miss"
# Compute head fingerprint for cache validation
if html:
head_end = html.lower().find('</head>')
if head_end != -1:
head_html = html[:head_end + 7]
crawl_result.head_fingerprint = compute_head_fingerprint(head_html)
self.logger.url_status(
url=cache_context.display_url,

270
crawl4ai/cache_validator.py Normal file
View File

@@ -0,0 +1,270 @@
"""
Cache validation using HTTP conditional requests and head fingerprinting.
Uses httpx for fast, lightweight HTTP requests (no browser needed).
This module enables smart cache validation to avoid unnecessary full browser crawls
when content hasn't changed.
Validation Strategy:
1. Send HEAD request with If-None-Match / If-Modified-Since headers
2. If server returns 304 Not Modified → cache is FRESH
3. If server returns 200 → fetch <head> and compare fingerprint
4. If fingerprint matches → cache is FRESH (minor changes only)
5. Otherwise → cache is STALE, need full recrawl
"""
import httpx
from dataclasses import dataclass
from typing import Optional, Tuple
from enum import Enum
from .utils import compute_head_fingerprint
class CacheValidationResult(Enum):
"""Result of cache validation check."""
FRESH = "fresh" # Content unchanged, use cache
STALE = "stale" # Content changed, need recrawl
UNKNOWN = "unknown" # Couldn't determine, need recrawl
ERROR = "error" # Request failed, use cache as fallback
@dataclass
class ValidationResult:
"""Detailed result of a cache validation attempt."""
status: CacheValidationResult
new_etag: Optional[str] = None
new_last_modified: Optional[str] = None
new_head_fingerprint: Optional[str] = None
reason: str = ""
class CacheValidator:
"""
Validates cache freshness using lightweight HTTP requests.
This validator uses httpx to make fast HTTP requests without needing
a full browser. It supports two validation methods:
1. HTTP Conditional Requests (Layer 3):
- Uses If-None-Match with stored ETag
- Uses If-Modified-Since with stored Last-Modified
- Server returns 304 if content unchanged
2. Head Fingerprinting (Layer 4):
- Fetches only the <head> section (~5KB)
- Compares fingerprint of key meta tags
- Catches changes even without server support for conditional requests
"""
def __init__(self, timeout: float = 10.0, user_agent: Optional[str] = None):
"""
Initialize the cache validator.
Args:
timeout: Request timeout in seconds
user_agent: Custom User-Agent string (optional)
"""
self.timeout = timeout
self.user_agent = user_agent or "Mozilla/5.0 (compatible; Crawl4AI/1.0)"
self._client: Optional[httpx.AsyncClient] = None
async def _get_client(self) -> httpx.AsyncClient:
"""Get or create the httpx client."""
if self._client is None:
self._client = httpx.AsyncClient(
http2=True,
timeout=self.timeout,
follow_redirects=True,
headers={"User-Agent": self.user_agent}
)
return self._client
async def validate(
self,
url: str,
stored_etag: Optional[str] = None,
stored_last_modified: Optional[str] = None,
stored_head_fingerprint: Optional[str] = None,
) -> ValidationResult:
"""
Validate if cached content is still fresh.
Args:
url: The URL to validate
stored_etag: Previously stored ETag header value
stored_last_modified: Previously stored Last-Modified header value
stored_head_fingerprint: Previously computed head fingerprint
Returns:
ValidationResult with status and any updated metadata
"""
client = await self._get_client()
# Build conditional request headers
headers = {}
if stored_etag:
headers["If-None-Match"] = stored_etag
if stored_last_modified:
headers["If-Modified-Since"] = stored_last_modified
try:
# Step 1: Try HEAD request with conditional headers
if headers:
response = await client.head(url, headers=headers)
if response.status_code == 304:
return ValidationResult(
status=CacheValidationResult.FRESH,
reason="Server returned 304 Not Modified"
)
# Got 200, extract new headers for potential update
new_etag = response.headers.get("etag")
new_last_modified = response.headers.get("last-modified")
# If we have fingerprint, compare it
if stored_head_fingerprint:
head_html, _, _ = await self._fetch_head(url)
if head_html:
new_fingerprint = compute_head_fingerprint(head_html)
if new_fingerprint and new_fingerprint == stored_head_fingerprint:
return ValidationResult(
status=CacheValidationResult.FRESH,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint matches"
)
elif new_fingerprint:
return ValidationResult(
status=CacheValidationResult.STALE,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint changed"
)
# Headers changed and no fingerprint match
return ValidationResult(
status=CacheValidationResult.STALE,
new_etag=new_etag,
new_last_modified=new_last_modified,
reason="Server returned 200, content may have changed"
)
# Step 2: No conditional headers available, try fingerprint only
if stored_head_fingerprint:
head_html, new_etag, new_last_modified = await self._fetch_head(url)
if head_html:
new_fingerprint = compute_head_fingerprint(head_html)
if new_fingerprint and new_fingerprint == stored_head_fingerprint:
return ValidationResult(
status=CacheValidationResult.FRESH,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint matches"
)
elif new_fingerprint:
return ValidationResult(
status=CacheValidationResult.STALE,
new_etag=new_etag,
new_last_modified=new_last_modified,
new_head_fingerprint=new_fingerprint,
reason="Head fingerprint changed"
)
# Step 3: No validation data available
return ValidationResult(
status=CacheValidationResult.UNKNOWN,
reason="No validation data available (no etag, last-modified, or fingerprint)"
)
except httpx.TimeoutException:
return ValidationResult(
status=CacheValidationResult.ERROR,
reason="Validation request timed out"
)
except httpx.RequestError as e:
return ValidationResult(
status=CacheValidationResult.ERROR,
reason=f"Validation request failed: {type(e).__name__}"
)
except Exception as e:
# On unexpected error, prefer using cache over failing
return ValidationResult(
status=CacheValidationResult.ERROR,
reason=f"Validation error: {str(e)}"
)
async def _fetch_head(self, url: str) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""
Fetch only the <head> section of a page.
Uses streaming to stop reading after </head> is found,
minimizing bandwidth usage.
Args:
url: The URL to fetch
Returns:
Tuple of (head_html, etag, last_modified)
"""
client = await self._get_client()
try:
async with client.stream(
"GET",
url,
headers={"Accept-Encoding": "identity"} # Disable compression for easier parsing
) as response:
etag = response.headers.get("etag")
last_modified = response.headers.get("last-modified")
if response.status_code != 200:
return None, etag, last_modified
# Read until </head> or max 64KB
chunks = []
total_bytes = 0
max_bytes = 65536
async for chunk in response.aiter_bytes(4096):
chunks.append(chunk)
total_bytes += len(chunk)
content = b''.join(chunks)
# Check for </head> (case insensitive)
if b'</head>' in content.lower() or b'</HEAD>' in content:
break
if total_bytes >= max_bytes:
break
html = content.decode('utf-8', errors='replace')
# Extract just the head section
head_end = html.lower().find('</head>')
if head_end != -1:
html = html[:head_end + 7]
return html, etag, last_modified
except Exception:
return None, None, None
async def close(self):
"""Close the HTTP client and release resources."""
if self._client:
await self._client.aclose()
self._client = None
async def __aenter__(self):
"""Async context manager entry."""
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async context manager exit."""
await self.close()

View File

@@ -152,6 +152,10 @@ class CrawlResult(BaseModel):
network_requests: Optional[List[Dict[str, Any]]] = None
console_messages: Optional[List[Dict[str, Any]]] = None
tables: List[Dict] = Field(default_factory=list) # NEW [{headers,rows,caption,summary}]
# Cache validation metadata (Smart Cache)
head_fingerprint: Optional[str] = None
cached_at: Optional[float] = None
cache_status: Optional[str] = None # "hit", "hit_validated", "hit_fallback", "miss"
model_config = ConfigDict(arbitrary_types_allowed=True)

View File

@@ -2828,6 +2828,67 @@ def generate_content_hash(content: str) -> str:
# return hashlib.sha256(content.encode()).hexdigest()
def compute_head_fingerprint(head_html: str) -> str:
"""
Compute a fingerprint of <head> content for cache validation.
Focuses on content that typically changes when page updates:
- <title>
- <meta name="description">
- <meta property="og:title|og:description|og:image|og:updated_time">
- <meta property="article:modified_time">
- <meta name="last-modified">
Uses xxhash for speed, combines multiple signals into a single hash.
Args:
head_html: The HTML content of the <head> section
Returns:
A hex string fingerprint, or empty string if no signals found
"""
if not head_html:
return ""
head_lower = head_html.lower()
signals = []
# Extract title
title_match = re.search(r'<title[^>]*>(.*?)</title>', head_lower, re.DOTALL)
if title_match:
signals.append(title_match.group(1).strip())
# Meta tags to extract (name or property attribute, and the value to match)
meta_tags = [
("name", "description"),
("name", "last-modified"),
("property", "og:title"),
("property", "og:description"),
("property", "og:image"),
("property", "og:updated_time"),
("property", "article:modified_time"),
]
for attr_type, attr_value in meta_tags:
# Handle both attribute orders: attr="value" content="..." and content="..." attr="value"
patterns = [
rf'<meta[^>]*{attr_type}=["\']{ re.escape(attr_value)}["\'][^>]*content=["\']([^"\']*)["\']',
rf'<meta[^>]*content=["\']([^"\']*)["\'][^>]*{attr_type}=["\']{re.escape(attr_value)}["\']',
]
for pattern in patterns:
match = re.search(pattern, head_lower)
if match:
signals.append(match.group(1).strip())
break # Found this tag, move to next
if not signals:
return ""
# Combine signals and hash
combined = '|'.join(signals)
return xxhash.xxh64(combined.encode()).hexdigest()
def ensure_content_dirs(base_path: str) -> Dict[str, str]:
"""Create content directories if they don't exist"""
dirs = {

View File

@@ -0,0 +1 @@
# Cache validation test suite

View File

@@ -0,0 +1,40 @@
"""Pytest fixtures for cache validation tests."""
import pytest
def pytest_configure(config):
"""Register custom markers."""
config.addinivalue_line(
"markers", "integration: marks tests as integration tests (may require network)"
)
@pytest.fixture
def sample_head_html():
"""Sample HTML head section for testing."""
return '''
<head>
<meta charset="utf-8">
<title>Test Page Title</title>
<meta name="description" content="This is a test page description">
<meta property="og:title" content="OG Test Title">
<meta property="og:description" content="OG Description">
<meta property="og:image" content="https://example.com/image.jpg">
<meta property="article:modified_time" content="2024-12-01T00:00:00Z">
<link rel="stylesheet" href="style.css">
<script src="app.js"></script>
</head>
'''
@pytest.fixture
def minimal_head_html():
"""Minimal head with just a title."""
return '<head><title>Minimal</title></head>'
@pytest.fixture
def empty_head_html():
"""Empty head section."""
return '<head></head>'

View File

@@ -0,0 +1,449 @@
"""
End-to-end tests for Smart Cache validation.
Tests the full flow:
1. Fresh crawl (browser launch) - SLOW
2. Cached crawl without validation (check_cache_freshness=False) - FAST
3. Cached crawl with validation (check_cache_freshness=True) - FAST (304/fingerprint)
Verifies all layers:
- Database storage of etag, last_modified, head_fingerprint, cached_at
- Cache validation logic
- HTTP conditional requests (304 Not Modified)
- Performance improvements
"""
import pytest
import time
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.async_database import async_db_manager
class TestEndToEndCacheValidation:
"""End-to-end tests for the complete cache validation flow."""
@pytest.mark.asyncio
async def test_full_cache_flow_docs_python(self):
"""
Test complete cache flow with docs.python.org:
1. Fresh crawl (slow - browser) - using BYPASS to force fresh
2. Cache hit without validation (fast)
3. Cache hit with validation (fast - 304)
"""
url = "https://docs.python.org/3/"
browser_config = BrowserConfig(headless=True, verbose=False)
# ========== CRAWL 1: Fresh crawl (force with WRITE_ONLY to skip cache read) ==========
config1 = CrawlerRunConfig(
cache_mode=CacheMode.WRITE_ONLY, # Skip reading, write new data
check_cache_freshness=False,
)
async with AsyncWebCrawler(config=browser_config) as crawler:
start1 = time.perf_counter()
result1 = await crawler.arun(url, config=config1)
time1 = time.perf_counter() - start1
assert result1.success, f"First crawl failed: {result1.error_message}"
# WRITE_ONLY means we did a fresh crawl and wrote to cache
assert result1.cache_status == "miss", f"Expected 'miss', got '{result1.cache_status}'"
print(f"\n[CRAWL 1] Fresh crawl: {time1:.2f}s (cache_status: {result1.cache_status})")
# Verify data is stored in database
metadata = await async_db_manager.aget_cache_metadata(url)
assert metadata is not None, "Metadata should be stored in database"
assert metadata.get("etag") or metadata.get("last_modified"), "Should have ETag or Last-Modified"
print(f" - Stored ETag: {metadata.get('etag', 'N/A')[:30]}...")
print(f" - Stored Last-Modified: {metadata.get('last_modified', 'N/A')}")
print(f" - Stored head_fingerprint: {metadata.get('head_fingerprint', 'N/A')}")
print(f" - Stored cached_at: {metadata.get('cached_at', 'N/A')}")
# ========== CRAWL 2: Cache hit WITHOUT validation ==========
config2 = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED,
check_cache_freshness=False, # Skip validation - pure cache hit
)
async with AsyncWebCrawler(config=browser_config) as crawler:
start2 = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
time2 = time.perf_counter() - start2
assert result2.success, f"Second crawl failed: {result2.error_message}"
assert result2.cache_status == "hit", f"Expected 'hit', got '{result2.cache_status}'"
print(f"\n[CRAWL 2] Cache hit (no validation): {time2:.2f}s (cache_status: {result2.cache_status})")
print(f" - Speedup: {time1/time2:.1f}x faster than fresh crawl")
# Should be MUCH faster - no browser, no HTTP request
assert time2 < time1 / 2, f"Cache hit should be at least 2x faster (was {time1/time2:.1f}x)"
# ========== CRAWL 3: Cache hit WITH validation (304) ==========
config3 = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED,
check_cache_freshness=True, # Validate cache freshness
)
async with AsyncWebCrawler(config=browser_config) as crawler:
start3 = time.perf_counter()
result3 = await crawler.arun(url, config=config3)
time3 = time.perf_counter() - start3
assert result3.success, f"Third crawl failed: {result3.error_message}"
# Should be "hit_validated" (304) or "hit_fallback" (error during validation)
assert result3.cache_status in ["hit_validated", "hit_fallback"], \
f"Expected validated cache hit, got '{result3.cache_status}'"
print(f"\n[CRAWL 3] Cache hit (with validation): {time3:.2f}s (cache_status: {result3.cache_status})")
print(f" - Speedup: {time1/time3:.1f}x faster than fresh crawl")
# Should still be fast - just a HEAD request, no browser
assert time3 < time1 / 2, f"Validated cache hit should be faster than fresh crawl"
# ========== SUMMARY ==========
print(f"\n{'='*60}")
print(f"PERFORMANCE SUMMARY for {url}")
print(f"{'='*60}")
print(f" Fresh crawl (browser): {time1:.2f}s")
print(f" Cache hit (no validation): {time2:.2f}s ({time1/time2:.1f}x faster)")
print(f" Cache hit (with validation): {time3:.2f}s ({time1/time3:.1f}x faster)")
print(f"{'='*60}")
@pytest.mark.asyncio
async def test_full_cache_flow_crawl4ai_docs(self):
"""Test with docs.crawl4ai.com."""
url = "https://docs.crawl4ai.com/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl - use WRITE_ONLY to ensure we get fresh data
config1 = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start1 = time.perf_counter()
result1 = await crawler.arun(url, config=config1)
time1 = time.perf_counter() - start1
assert result1.success
assert result1.cache_status == "miss"
print(f"\n[docs.crawl4ai.com] Fresh: {time1:.2f}s")
# Cache hit with validation
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start2 = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
time2 = time.perf_counter() - start2
assert result2.success
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f"[docs.crawl4ai.com] Validated: {time2:.2f}s ({time1/time2:.1f}x faster)")
@pytest.mark.asyncio
async def test_verify_database_storage(self):
"""Verify all validation metadata is properly stored in database."""
url = "https://docs.python.org/3/library/asyncio.html"
browser_config = BrowserConfig(headless=True, verbose=False)
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url, config=config)
assert result.success
# Verify all fields in database
metadata = await async_db_manager.aget_cache_metadata(url)
assert metadata is not None, "Metadata must be stored"
assert "url" in metadata
assert "etag" in metadata
assert "last_modified" in metadata
assert "head_fingerprint" in metadata
assert "cached_at" in metadata
assert "response_headers" in metadata
print(f"\nDatabase storage verification for {url}:")
print(f" - etag: {metadata['etag'][:40] if metadata['etag'] else 'None'}...")
print(f" - last_modified: {metadata['last_modified']}")
print(f" - head_fingerprint: {metadata['head_fingerprint']}")
print(f" - cached_at: {metadata['cached_at']}")
print(f" - response_headers keys: {list(metadata['response_headers'].keys())[:5]}...")
# At least one validation field should be populated
has_validation_data = (
metadata["etag"] or
metadata["last_modified"] or
metadata["head_fingerprint"]
)
assert has_validation_data, "Should have at least one validation field"
@pytest.mark.asyncio
async def test_head_fingerprint_stored_and_used(self):
"""Verify head fingerprint is computed, stored, and used for validation."""
url = "https://example.com/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl
config1 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
assert result1.success
assert result1.head_fingerprint, "head_fingerprint should be set on CrawlResult"
# Verify in database
metadata = await async_db_manager.aget_cache_metadata(url)
assert metadata["head_fingerprint"], "head_fingerprint should be stored in database"
assert metadata["head_fingerprint"] == result1.head_fingerprint
print(f"\nHead fingerprint for {url}:")
print(f" - CrawlResult.head_fingerprint: {result1.head_fingerprint}")
print(f" - Database head_fingerprint: {metadata['head_fingerprint']}")
# Validate using fingerprint
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
result2 = await crawler.arun(url, config=config2)
assert result2.success
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f" - Validation result: {result2.cache_status}")
class TestCacheValidationPerformance:
"""Performance benchmarks for cache validation."""
@pytest.mark.asyncio
async def test_multiple_urls_performance(self):
"""Test cache performance across multiple URLs."""
urls = [
"https://docs.python.org/3/",
"https://docs.python.org/3/library/asyncio.html",
"https://en.wikipedia.org/wiki/Python_(programming_language)",
]
browser_config = BrowserConfig(headless=True, verbose=False)
fresh_times = []
cached_times = []
print(f"\n{'='*70}")
print("MULTI-URL PERFORMANCE TEST")
print(f"{'='*70}")
# Fresh crawls - use WRITE_ONLY to force fresh crawl
for url in urls:
config = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
elapsed = time.perf_counter() - start
fresh_times.append(elapsed)
print(f"Fresh: {url[:50]:50} {elapsed:.2f}s ({result.cache_status})")
# Cached crawls with validation
for url in urls:
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
elapsed = time.perf_counter() - start
cached_times.append(elapsed)
print(f"Cached: {url[:50]:50} {elapsed:.2f}s ({result.cache_status})")
avg_fresh = sum(fresh_times) / len(fresh_times)
avg_cached = sum(cached_times) / len(cached_times)
total_fresh = sum(fresh_times)
total_cached = sum(cached_times)
print(f"\n{'='*70}")
print(f"RESULTS:")
print(f" Total fresh crawl time: {total_fresh:.2f}s")
print(f" Total cached time: {total_cached:.2f}s")
print(f" Average speedup: {avg_fresh/avg_cached:.1f}x")
print(f" Time saved: {total_fresh - total_cached:.2f}s")
print(f"{'='*70}")
# Cached should be significantly faster
assert avg_cached < avg_fresh / 2, "Cached crawls should be at least 2x faster"
@pytest.mark.asyncio
async def test_repeated_access_same_url(self):
"""Test repeated access to the same URL shows consistent cache hits."""
url = "https://docs.python.org/3/"
num_accesses = 5
browser_config = BrowserConfig(headless=True, verbose=False)
print(f"\n{'='*60}")
print(f"REPEATED ACCESS TEST: {url}")
print(f"{'='*60}")
# First access - fresh crawl
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
fresh_time = time.perf_counter() - start
print(f"Access 1 (fresh): {fresh_time:.2f}s - {result.cache_status}")
# Repeated accesses - should all be cache hits
cached_times = []
for i in range(2, num_accesses + 1):
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result = await crawler.arun(url, config=config)
elapsed = time.perf_counter() - start
cached_times.append(elapsed)
print(f"Access {i} (cached): {elapsed:.2f}s - {result.cache_status}")
assert result.cache_status in ["hit", "hit_validated", "hit_fallback"]
avg_cached = sum(cached_times) / len(cached_times)
print(f"\nAverage cached time: {avg_cached:.2f}s")
print(f"Speedup over fresh: {fresh_time/avg_cached:.1f}x")
class TestCacheValidationModes:
"""Test different cache modes and their behavior."""
@pytest.mark.asyncio
async def test_cache_bypass_always_fresh(self):
"""CacheMode.BYPASS should always do fresh crawl."""
# Use a unique URL path to avoid cache from other tests
url = "https://example.com/test-bypass"
browser_config = BrowserConfig(headless=True, verbose=False)
# First crawl with WRITE_ONLY to populate cache (always fresh)
config1 = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
assert result1.cache_status == "miss"
# Second crawl with BYPASS - should NOT use cache
config2 = CrawlerRunConfig(cache_mode=CacheMode.BYPASS, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result2 = await crawler.arun(url, config=config2)
# BYPASS mode means no cache interaction
assert result2.cache_status is None or result2.cache_status == "miss"
print(f"\nCacheMode.BYPASS result: {result2.cache_status}")
@pytest.mark.asyncio
async def test_validation_disabled_uses_cache_directly(self):
"""With check_cache_freshness=False, should use cache without HTTP validation."""
url = "https://docs.python.org/3/tutorial/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl - use WRITE_ONLY to force fresh
config1 = CrawlerRunConfig(cache_mode=CacheMode.WRITE_ONLY, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
assert result1.cache_status == "miss"
# Cached with validation DISABLED - should be "hit" (not "hit_validated")
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
elapsed = time.perf_counter() - start
assert result2.cache_status == "hit", f"Expected 'hit', got '{result2.cache_status}'"
print(f"\nValidation disabled: {elapsed:.3f}s (cache_status: {result2.cache_status})")
# Should be very fast - no HTTP request at all
assert elapsed < 1.0, "Cache hit without validation should be < 1 second"
@pytest.mark.asyncio
async def test_validation_enabled_checks_freshness(self):
"""With check_cache_freshness=True, should validate before using cache."""
url = "https://docs.python.org/3/reference/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl
config1 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
# Cached with validation ENABLED - should be "hit_validated"
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
start = time.perf_counter()
result2 = await crawler.arun(url, config=config2)
elapsed = time.perf_counter() - start
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f"\nValidation enabled: {elapsed:.3f}s (cache_status: {result2.cache_status})")
class TestCacheValidationResponseHeaders:
"""Test that response headers are properly stored and retrieved."""
@pytest.mark.asyncio
async def test_response_headers_stored(self):
"""Verify response headers including ETag and Last-Modified are stored."""
url = "https://docs.python.org/3/"
browser_config = BrowserConfig(headless=True, verbose=False)
config = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result = await crawler.arun(url, config=config)
assert result.success
assert result.response_headers is not None
# Check that cache-relevant headers are captured
headers = result.response_headers
print(f"\nResponse headers for {url}:")
# Look for ETag (case-insensitive)
etag = headers.get("etag") or headers.get("ETag")
print(f" - ETag: {etag}")
# Look for Last-Modified
last_modified = headers.get("last-modified") or headers.get("Last-Modified")
print(f" - Last-Modified: {last_modified}")
# Look for Cache-Control
cache_control = headers.get("cache-control") or headers.get("Cache-Control")
print(f" - Cache-Control: {cache_control}")
# At least one should be present for docs.python.org
assert etag or last_modified, "Should have ETag or Last-Modified header"
@pytest.mark.asyncio
async def test_headers_used_for_validation(self):
"""Verify stored headers are used for conditional requests."""
url = "https://docs.crawl4ai.com/"
browser_config = BrowserConfig(headless=True, verbose=False)
# Fresh crawl to store headers
config1 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=False)
async with AsyncWebCrawler(config=browser_config) as crawler:
result1 = await crawler.arun(url, config=config1)
# Get stored metadata
metadata = await async_db_manager.aget_cache_metadata(url)
stored_etag = metadata.get("etag")
stored_last_modified = metadata.get("last_modified")
print(f"\nStored validation data for {url}:")
print(f" - etag: {stored_etag}")
print(f" - last_modified: {stored_last_modified}")
# Validate - should use stored headers
config2 = CrawlerRunConfig(cache_mode=CacheMode.ENABLED, check_cache_freshness=True)
async with AsyncWebCrawler(config=browser_config) as crawler:
result2 = await crawler.arun(url, config=config2)
# Should get validated hit (304 response)
assert result2.cache_status in ["hit_validated", "hit_fallback"]
print(f" - Validation result: {result2.cache_status}")

View File

@@ -0,0 +1,97 @@
"""Unit tests for head fingerprinting."""
import pytest
from crawl4ai.utils import compute_head_fingerprint
class TestHeadFingerprint:
"""Tests for the compute_head_fingerprint function."""
def test_same_content_same_fingerprint(self):
"""Identical <head> content produces same fingerprint."""
head = "<head><title>Test Page</title></head>"
fp1 = compute_head_fingerprint(head)
fp2 = compute_head_fingerprint(head)
assert fp1 == fp2
assert fp1 != ""
def test_different_title_different_fingerprint(self):
"""Different title produces different fingerprint."""
head1 = "<head><title>Title A</title></head>"
head2 = "<head><title>Title B</title></head>"
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_empty_head_returns_empty_string(self):
"""Empty or None head should return empty fingerprint."""
assert compute_head_fingerprint("") == ""
assert compute_head_fingerprint(None) == ""
def test_head_without_signals_returns_empty(self):
"""Head without title or key meta tags returns empty."""
head = "<head><link rel='stylesheet' href='style.css'></head>"
assert compute_head_fingerprint(head) == ""
def test_extracts_title(self):
"""Title is extracted and included in fingerprint."""
head1 = "<head><title>My Title</title></head>"
head2 = "<head><title>My Title</title><link href='x'></head>"
# Same title should produce same fingerprint
assert compute_head_fingerprint(head1) == compute_head_fingerprint(head2)
def test_extracts_meta_description(self):
"""Meta description is extracted."""
head1 = '<head><meta name="description" content="Test description"></head>'
head2 = '<head><meta name="description" content="Different description"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_extracts_og_tags(self):
"""Open Graph tags are extracted."""
head1 = '<head><meta property="og:title" content="OG Title"></head>'
head2 = '<head><meta property="og:title" content="Different OG Title"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_extracts_og_image(self):
"""og:image is extracted and affects fingerprint."""
head1 = '<head><meta property="og:image" content="https://example.com/img1.jpg"></head>'
head2 = '<head><meta property="og:image" content="https://example.com/img2.jpg"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_extracts_article_modified_time(self):
"""article:modified_time is extracted."""
head1 = '<head><meta property="article:modified_time" content="2024-01-01T00:00:00Z"></head>'
head2 = '<head><meta property="article:modified_time" content="2024-12-01T00:00:00Z"></head>'
assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
def test_case_insensitive(self):
"""Fingerprinting is case-insensitive for tags."""
head1 = "<head><TITLE>Test</TITLE></head>"
head2 = "<head><title>test</title></head>"
# Both should extract title (case insensitive)
fp1 = compute_head_fingerprint(head1)
fp2 = compute_head_fingerprint(head2)
assert fp1 != ""
assert fp2 != ""
def test_handles_attribute_order(self):
"""Handles different attribute orders in meta tags."""
head1 = '<head><meta name="description" content="Test"></head>'
head2 = '<head><meta content="Test" name="description"></head>'
assert compute_head_fingerprint(head1) == compute_head_fingerprint(head2)
def test_real_world_head(self):
"""Test with a realistic head section."""
head = '''
<head>
<meta charset="utf-8">
<title>Python Documentation</title>
<meta name="description" content="Official Python documentation">
<meta property="og:title" content="Python Docs">
<meta property="og:description" content="Learn Python">
<meta property="og:image" content="https://python.org/logo.png">
<link rel="stylesheet" href="styles.css">
</head>
'''
fp = compute_head_fingerprint(head)
assert fp != ""
# Should be deterministic
assert fp == compute_head_fingerprint(head)

View File

@@ -0,0 +1,354 @@
"""
Real-world tests for cache validation using actual HTTP requests.
No mocks - all tests hit real servers.
"""
import pytest
from crawl4ai.cache_validator import CacheValidator, CacheValidationResult
from crawl4ai.utils import compute_head_fingerprint
class TestRealDomainsConditionalSupport:
"""Test domains that support HTTP conditional requests (ETag/Last-Modified)."""
@pytest.mark.asyncio
async def test_docs_python_org_etag(self):
"""docs.python.org supports ETag - should return 304."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
# First fetch to get ETag
head_html, etag, last_modified = await validator._fetch_head(url)
assert head_html is not None, "Should fetch head content"
assert etag is not None, "docs.python.org should return ETag"
# Validate with the ETag we just got
result = await validator.validate(url=url, stored_etag=etag)
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
assert "304" in result.reason
@pytest.mark.asyncio
async def test_docs_crawl4ai_etag(self):
"""docs.crawl4ai.com supports ETag - should return 304."""
url = "https://docs.crawl4ai.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
assert etag is not None, "docs.crawl4ai.com should return ETag"
result = await validator.validate(url=url, stored_etag=etag)
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
@pytest.mark.asyncio
async def test_wikipedia_last_modified(self):
"""Wikipedia supports Last-Modified - should return 304."""
url = "https://en.wikipedia.org/wiki/Web_crawler"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
assert last_modified is not None, "Wikipedia should return Last-Modified"
result = await validator.validate(url=url, stored_last_modified=last_modified)
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
@pytest.mark.asyncio
async def test_github_pages(self):
"""GitHub Pages supports conditional requests."""
url = "https://pages.github.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# GitHub Pages typically has at least one
has_conditional = etag is not None or last_modified is not None
assert has_conditional, "GitHub Pages should support conditional requests"
result = await validator.validate(
url=url,
stored_etag=etag,
stored_last_modified=last_modified,
)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_httpbin_etag(self):
"""httpbin.org/etag endpoint for testing ETag."""
url = "https://httpbin.org/etag/test-etag-value"
async with CacheValidator(timeout=15.0) as validator:
result = await validator.validate(url=url, stored_etag='"test-etag-value"')
# httpbin should return 304 for matching ETag
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
class TestRealDomainsNoConditionalSupport:
"""Test domains that may NOT support HTTP conditional requests."""
@pytest.mark.asyncio
async def test_dynamic_site_fingerprint_fallback(self):
"""Test fingerprint-based validation for sites without conditional support."""
# Use a site that changes frequently but has stable head
url = "https://example.com/"
async with CacheValidator(timeout=15.0) as validator:
# Get head and compute fingerprint
head_html, etag, last_modified = await validator._fetch_head(url)
assert head_html is not None
fingerprint = compute_head_fingerprint(head_html)
# Validate using fingerprint (not etag/last-modified)
result = await validator.validate(
url=url,
stored_head_fingerprint=fingerprint,
)
# Should be FRESH since fingerprint should match
assert result.status == CacheValidationResult.FRESH, f"Expected FRESH, got {result.status}: {result.reason}"
assert "fingerprint" in result.reason.lower()
@pytest.mark.asyncio
async def test_news_site_changes_frequently(self):
"""News sites change frequently - test that we can detect changes."""
url = "https://www.bbc.com/news"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# BBC News has ETag but it changes with content
assert head_html is not None
# Using a fake old ETag should return STALE (200 with different content)
result = await validator.validate(
url=url,
stored_etag='"fake-old-etag-12345"',
)
# Should be STALE because the ETag doesn't match
assert result.status == CacheValidationResult.STALE, f"Expected STALE, got {result.status}: {result.reason}"
class TestRealDomainsEdgeCases:
"""Edge cases with real domains."""
@pytest.mark.asyncio
async def test_nonexistent_domain(self):
"""Non-existent domain should return ERROR."""
url = "https://this-domain-definitely-does-not-exist-xyz123.com/"
async with CacheValidator(timeout=5.0) as validator:
result = await validator.validate(url=url, stored_etag='"test"')
assert result.status == CacheValidationResult.ERROR
@pytest.mark.asyncio
async def test_timeout_slow_server(self):
"""Test timeout handling with a slow endpoint."""
# httpbin delay endpoint
url = "https://httpbin.org/delay/10"
async with CacheValidator(timeout=2.0) as validator: # 2 second timeout
result = await validator.validate(url=url, stored_etag='"test"')
# Should timeout and return ERROR
assert result.status == CacheValidationResult.ERROR
assert "timeout" in result.reason.lower() or "timed out" in result.reason.lower()
@pytest.mark.asyncio
async def test_redirect_handling(self):
"""Test that redirects are followed."""
# httpbin redirect
url = "https://httpbin.org/redirect/1"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# Should follow redirect and get content
# The final page might not have useful head content, but shouldn't error
# This tests that redirects are handled
@pytest.mark.asyncio
async def test_https_only(self):
"""Test HTTPS connection."""
url = "https://www.google.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
assert head_html is not None
assert "<title" in head_html.lower()
class TestRealDomainsHeadFingerprint:
"""Test head fingerprint extraction with real domains."""
@pytest.mark.asyncio
async def test_python_docs_fingerprint(self):
"""Python docs has title and meta tags."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
fingerprint = compute_head_fingerprint(head_html)
assert fingerprint != "", "Should extract fingerprint from Python docs"
# Fingerprint should be consistent
fingerprint2 = compute_head_fingerprint(head_html)
assert fingerprint == fingerprint2
@pytest.mark.asyncio
async def test_github_fingerprint(self):
"""GitHub has og: tags."""
url = "https://github.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
assert "og:" in head_html.lower() or "title" in head_html.lower()
fingerprint = compute_head_fingerprint(head_html)
assert fingerprint != ""
@pytest.mark.asyncio
async def test_crawl4ai_docs_fingerprint(self):
"""Crawl4AI docs should have title and description."""
url = "https://docs.crawl4ai.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
fingerprint = compute_head_fingerprint(head_html)
assert fingerprint != "", "Should extract fingerprint from Crawl4AI docs"
class TestRealDomainsFetchHead:
"""Test _fetch_head functionality with real domains."""
@pytest.mark.asyncio
async def test_fetch_stops_at_head_close(self):
"""Verify we stop reading after </head>."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
assert head_html is not None
assert "</head>" in head_html.lower()
# Should NOT contain body content
assert "<body" not in head_html.lower() or head_html.lower().index("</head>") < head_html.lower().find("<body")
@pytest.mark.asyncio
async def test_extracts_both_headers(self):
"""Test extraction of both ETag and Last-Modified."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# Python docs should have both
assert etag is not None, "Should have ETag"
assert last_modified is not None, "Should have Last-Modified"
@pytest.mark.asyncio
async def test_handles_missing_head_tag(self):
"""Handle pages that might not have proper head structure."""
# API endpoint that returns JSON (no HTML head)
url = "https://httpbin.org/json"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# Should not crash, may return partial content or None
# The important thing is it doesn't error
class TestRealDomainsValidationCombinations:
"""Test various combinations of validation data."""
@pytest.mark.asyncio
async def test_etag_only(self):
"""Validate with only ETag."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
_, etag, _ = await validator._fetch_head(url)
result = await validator.validate(url=url, stored_etag=etag)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_last_modified_only(self):
"""Validate with only Last-Modified."""
url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
async with CacheValidator(timeout=15.0) as validator:
_, _, last_modified = await validator._fetch_head(url)
if last_modified:
result = await validator.validate(url=url, stored_last_modified=last_modified)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_fingerprint_only(self):
"""Validate with only fingerprint."""
url = "https://example.com/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
fingerprint = compute_head_fingerprint(head_html)
if fingerprint:
result = await validator.validate(url=url, stored_head_fingerprint=fingerprint)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_all_validation_data(self):
"""Validate with all available data."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
fingerprint = compute_head_fingerprint(head_html)
result = await validator.validate(
url=url,
stored_etag=etag,
stored_last_modified=last_modified,
stored_head_fingerprint=fingerprint,
)
assert result.status == CacheValidationResult.FRESH
@pytest.mark.asyncio
async def test_stale_etag_fresh_fingerprint(self):
"""When ETag is stale but fingerprint matches, should be FRESH."""
url = "https://docs.python.org/3/"
async with CacheValidator(timeout=15.0) as validator:
head_html, _, _ = await validator._fetch_head(url)
fingerprint = compute_head_fingerprint(head_html)
# Use fake ETag but real fingerprint
result = await validator.validate(
url=url,
stored_etag='"fake-stale-etag"',
stored_head_fingerprint=fingerprint,
)
# Fingerprint should save us
assert result.status == CacheValidationResult.FRESH
assert "fingerprint" in result.reason.lower()