Compare commits
2 Commits
fix/sitema
...
docs-proxy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe353c4e27 | ||
|
|
263ac890fd |
@@ -845,15 +845,6 @@ class AsyncUrlSeeder:
|
|||||||
return
|
return
|
||||||
|
|
||||||
data = gzip.decompress(r.content) if url.endswith(".gz") else r.content
|
data = gzip.decompress(r.content) if url.endswith(".gz") else r.content
|
||||||
base_url = str(r.url)
|
|
||||||
|
|
||||||
def _normalize_loc(raw: Optional[str]) -> Optional[str]:
|
|
||||||
if not raw:
|
|
||||||
return None
|
|
||||||
normalized = urljoin(base_url, raw.strip())
|
|
||||||
if not normalized:
|
|
||||||
return None
|
|
||||||
return normalized
|
|
||||||
|
|
||||||
# Detect if this is a sitemap index by checking for <sitemapindex> or presence of <sitemap> elements
|
# Detect if this is a sitemap index by checking for <sitemapindex> or presence of <sitemap> elements
|
||||||
is_sitemap_index = False
|
is_sitemap_index = False
|
||||||
@@ -866,42 +857,25 @@ class AsyncUrlSeeder:
|
|||||||
# Use XML parser for sitemaps, not HTML parser
|
# Use XML parser for sitemaps, not HTML parser
|
||||||
parser = etree.XMLParser(recover=True)
|
parser = etree.XMLParser(recover=True)
|
||||||
root = etree.fromstring(data, parser=parser)
|
root = etree.fromstring(data, parser=parser)
|
||||||
# Namespace-agnostic lookups using local-name() so we honor custom or missing namespaces
|
|
||||||
sitemap_loc_nodes = root.xpath("//*[local-name()='sitemap']/*[local-name()='loc']")
|
|
||||||
url_loc_nodes = root.xpath("//*[local-name()='url']/*[local-name()='loc']")
|
|
||||||
|
|
||||||
self._log(
|
# Define namespace for sitemap
|
||||||
"debug",
|
ns = {'s': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
|
||||||
"Parsed sitemap {url}: {sitemap_count} sitemap entries, {url_count} url entries discovered",
|
|
||||||
params={
|
|
||||||
"url": url,
|
|
||||||
"sitemap_count": len(sitemap_loc_nodes),
|
|
||||||
"url_count": len(url_loc_nodes),
|
|
||||||
},
|
|
||||||
tag="URL_SEED",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check for sitemap index entries
|
# Check for sitemap index entries
|
||||||
if sitemap_loc_nodes:
|
sitemap_locs = root.xpath('//s:sitemap/s:loc', namespaces=ns)
|
||||||
|
if sitemap_locs:
|
||||||
is_sitemap_index = True
|
is_sitemap_index = True
|
||||||
for sitemap_elem in sitemap_loc_nodes:
|
for sitemap_elem in sitemap_locs:
|
||||||
loc = _normalize_loc(sitemap_elem.text)
|
loc = sitemap_elem.text.strip() if sitemap_elem.text else ""
|
||||||
if loc:
|
if loc:
|
||||||
sub_sitemaps.append(loc)
|
sub_sitemaps.append(loc)
|
||||||
|
|
||||||
# If not a sitemap index, get regular URLs
|
# If not a sitemap index, get regular URLs
|
||||||
if not is_sitemap_index:
|
if not is_sitemap_index:
|
||||||
for loc_elem in url_loc_nodes:
|
for loc_elem in root.xpath('//s:url/s:loc', namespaces=ns):
|
||||||
loc = _normalize_loc(loc_elem.text)
|
loc = loc_elem.text.strip() if loc_elem.text else ""
|
||||||
if loc:
|
if loc:
|
||||||
regular_urls.append(loc)
|
regular_urls.append(loc)
|
||||||
if not regular_urls:
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"No <loc> entries found inside <url> tags for sitemap {url}. The sitemap might be empty or use an unexpected structure.",
|
|
||||||
params={"url": url},
|
|
||||||
tag="URL_SEED",
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._log("error", "LXML parsing error for sitemap {url}: {error}",
|
self._log("error", "LXML parsing error for sitemap {url}: {error}",
|
||||||
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
||||||
@@ -918,39 +892,19 @@ class AsyncUrlSeeder:
|
|||||||
|
|
||||||
# Check for sitemap index entries
|
# Check for sitemap index entries
|
||||||
sitemaps = root.findall('.//sitemap')
|
sitemaps = root.findall('.//sitemap')
|
||||||
url_entries = root.findall('.//url')
|
|
||||||
self._log(
|
|
||||||
"debug",
|
|
||||||
"ElementTree parsed sitemap {url}: {sitemap_count} sitemap entries, {url_count} url entries discovered",
|
|
||||||
params={
|
|
||||||
"url": url,
|
|
||||||
"sitemap_count": len(sitemaps),
|
|
||||||
"url_count": len(url_entries),
|
|
||||||
},
|
|
||||||
tag="URL_SEED",
|
|
||||||
)
|
|
||||||
if sitemaps:
|
if sitemaps:
|
||||||
is_sitemap_index = True
|
is_sitemap_index = True
|
||||||
for sitemap in sitemaps:
|
for sitemap in sitemaps:
|
||||||
loc_elem = sitemap.find('loc')
|
loc_elem = sitemap.find('loc')
|
||||||
loc = _normalize_loc(loc_elem.text if loc_elem is not None else None)
|
if loc_elem is not None and loc_elem.text:
|
||||||
if loc:
|
sub_sitemaps.append(loc_elem.text.strip())
|
||||||
sub_sitemaps.append(loc)
|
|
||||||
|
|
||||||
# If not a sitemap index, get regular URLs
|
# If not a sitemap index, get regular URLs
|
||||||
if not is_sitemap_index:
|
if not is_sitemap_index:
|
||||||
for url_elem in url_entries:
|
for url_elem in root.findall('.//url'):
|
||||||
loc_elem = url_elem.find('loc')
|
loc_elem = url_elem.find('loc')
|
||||||
loc = _normalize_loc(loc_elem.text if loc_elem is not None else None)
|
if loc_elem is not None and loc_elem.text:
|
||||||
if loc:
|
regular_urls.append(loc_elem.text.strip())
|
||||||
regular_urls.append(loc)
|
|
||||||
if not regular_urls:
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"No <loc> entries found inside <url> tags for sitemap {url}. The sitemap might be empty or use an unexpected structure.",
|
|
||||||
params={"url": url},
|
|
||||||
tag="URL_SEED",
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._log("error", "ElementTree parsing error for sitemap {url}: {error}",
|
self._log("error", "ElementTree parsing error for sitemap {url}: {error}",
|
||||||
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
||||||
|
|||||||
@@ -1,98 +1,304 @@
|
|||||||
# Proxy
|
# Proxy & Security
|
||||||
|
|
||||||
|
This guide covers proxy configuration and security features in Crawl4AI, including SSL certificate analysis and proxy rotation strategies.
|
||||||
|
|
||||||
|
## Understanding Proxy Configuration
|
||||||
|
|
||||||
|
Crawl4AI recommends configuring proxies per request through `CrawlerRunConfig.proxy_config`. This gives you precise control, enables rotation strategies, and keeps examples simple enough to copy, paste, and run.
|
||||||
|
|
||||||
## Basic Proxy Setup
|
## Basic Proxy Setup
|
||||||
|
|
||||||
Simple proxy configuration with `BrowserConfig`:
|
Configure proxies that apply to each crawl operation:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from crawl4ai.async_configs import BrowserConfig
|
import asyncio
|
||||||
|
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, ProxyConfig
|
||||||
|
|
||||||
# Using HTTP proxy
|
run_config = CrawlerRunConfig(proxy_config=ProxyConfig(server="http://proxy.example.com:8080"))
|
||||||
browser_config = BrowserConfig(proxy_config={"server": "http://proxy.example.com:8080"})
|
# run_config = CrawlerRunConfig(proxy_config={"server": "http://proxy.example.com:8080"})
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
# run_config = CrawlerRunConfig(proxy_config="http://proxy.example.com:8080")
|
||||||
result = await crawler.arun(url="https://example.com")
|
|
||||||
|
|
||||||
# Using SOCKS proxy
|
|
||||||
browser_config = BrowserConfig(proxy_config={"server": "socks5://proxy.example.com:1080"})
|
async def main():
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
browser_config = BrowserConfig()
|
||||||
result = await crawler.arun(url="https://example.com")
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
|
result = await crawler.arun(url="https://example.com", config=run_config)
|
||||||
|
print(f"Success: {result.success} -> {result.url}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
```
|
```
|
||||||
|
|
||||||
## Authenticated Proxy
|
!!! note "Why request-level?"
|
||||||
|
`CrawlerRunConfig.proxy_config` keeps each request self-contained, so swapping proxies or rotation strategies is just a matter of building a new run configuration.
|
||||||
|
|
||||||
Use an authenticated proxy with `BrowserConfig`:
|
## Supported Proxy Formats
|
||||||
|
|
||||||
|
The `ProxyConfig.from_string()` method supports multiple formats:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from crawl4ai.async_configs import BrowserConfig
|
from crawl4ai import ProxyConfig
|
||||||
|
|
||||||
browser_config = BrowserConfig(proxy_config={
|
# HTTP proxy with authentication
|
||||||
"server": "http://[host]:[port]",
|
proxy1 = ProxyConfig.from_string("http://user:pass@192.168.1.1:8080")
|
||||||
"username": "[username]",
|
|
||||||
"password": "[password]",
|
# HTTPS proxy
|
||||||
})
|
proxy2 = ProxyConfig.from_string("https://proxy.example.com:8080")
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
|
||||||
result = await crawler.arun(url="https://example.com")
|
# SOCKS5 proxy
|
||||||
|
proxy3 = ProxyConfig.from_string("socks5://proxy.example.com:1080")
|
||||||
|
|
||||||
|
# Simple IP:port format
|
||||||
|
proxy4 = ProxyConfig.from_string("192.168.1.1:8080")
|
||||||
|
|
||||||
|
# IP:port:user:pass format
|
||||||
|
proxy5 = ProxyConfig.from_string("192.168.1.1:8080:user:pass")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Authenticated Proxies
|
||||||
|
|
||||||
|
For proxies requiring authentication:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import asyncio
|
||||||
|
from crawl4ai import AsyncWebCrawler,BrowserConfig, CrawlerRunConfig, ProxyConfig
|
||||||
|
|
||||||
|
run_config = CrawlerRunConfig(
|
||||||
|
proxy_config=ProxyConfig(
|
||||||
|
server="http://proxy.example.com:8080",
|
||||||
|
username="your_username",
|
||||||
|
password="your_password",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# Or dictionary style:
|
||||||
|
# run_config = CrawlerRunConfig(proxy_config={
|
||||||
|
# "server": "http://proxy.example.com:8080",
|
||||||
|
# "username": "your_username",
|
||||||
|
# "password": "your_password",
|
||||||
|
# })
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
browser_config = BrowserConfig()
|
||||||
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
|
result = await crawler.arun(url="https://example.com", config=run_config)
|
||||||
|
print(f"Success: {result.success} -> {result.url}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment Variable Configuration
|
||||||
|
|
||||||
|
Load proxies from environment variables for easy configuration:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from crawl4ai import ProxyConfig, CrawlerRunConfig
|
||||||
|
|
||||||
|
# Set environment variable
|
||||||
|
os.environ["PROXIES"] = "ip1:port1:user1:pass1,ip2:port2:user2:pass2,ip3:port3"
|
||||||
|
|
||||||
|
# Load all proxies
|
||||||
|
proxies = ProxyConfig.from_env()
|
||||||
|
print(f"Loaded {len(proxies)} proxies")
|
||||||
|
|
||||||
|
# Use first proxy
|
||||||
|
if proxies:
|
||||||
|
run_config = CrawlerRunConfig(proxy_config=proxies[0])
|
||||||
|
```
|
||||||
|
|
||||||
## Rotating Proxies
|
## Rotating Proxies
|
||||||
|
|
||||||
Example using a proxy rotation service dynamically:
|
Crawl4AI supports automatic proxy rotation to distribute requests across multiple proxy servers. Rotation is applied per request using a rotation strategy on `CrawlerRunConfig`.
|
||||||
|
|
||||||
|
### Proxy Rotation (recommended)
|
||||||
```python
|
```python
|
||||||
import re
|
|
||||||
from crawl4ai import (
|
|
||||||
AsyncWebCrawler,
|
|
||||||
BrowserConfig,
|
|
||||||
CrawlerRunConfig,
|
|
||||||
CacheMode,
|
|
||||||
RoundRobinProxyStrategy,
|
|
||||||
)
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from crawl4ai import ProxyConfig
|
import re
|
||||||
|
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, ProxyConfig
|
||||||
|
from crawl4ai.proxy_strategy import RoundRobinProxyStrategy
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
# Load proxies and create rotation strategy
|
# Load proxies from environment
|
||||||
proxies = ProxyConfig.from_env()
|
proxies = ProxyConfig.from_env()
|
||||||
#eg: export PROXIES="ip1:port1:username1:password1,ip2:port2:username2:password2"
|
|
||||||
if not proxies:
|
if not proxies:
|
||||||
print("No proxies found in environment. Set PROXIES env variable!")
|
print("No proxies found! Set PROXIES environment variable.")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Create rotation strategy
|
||||||
proxy_strategy = RoundRobinProxyStrategy(proxies)
|
proxy_strategy = RoundRobinProxyStrategy(proxies)
|
||||||
|
|
||||||
# Create configs
|
# Configure per-request with proxy rotation
|
||||||
browser_config = BrowserConfig(headless=True, verbose=False)
|
browser_config = BrowserConfig(headless=True, verbose=False)
|
||||||
run_config = CrawlerRunConfig(
|
run_config = CrawlerRunConfig(
|
||||||
cache_mode=CacheMode.BYPASS,
|
cache_mode=CacheMode.BYPASS,
|
||||||
proxy_rotation_strategy=proxy_strategy
|
proxy_rotation_strategy=proxy_strategy,
|
||||||
)
|
)
|
||||||
|
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
urls = ["https://httpbin.org/ip"] * (len(proxies) * 2) # Test each proxy twice
|
urls = ["https://httpbin.org/ip"] * (len(proxies) * 2) # Test each proxy twice
|
||||||
|
|
||||||
print("\n📈 Initializing crawler with proxy rotation...")
|
print(f"🚀 Testing {len(proxies)} proxies with rotation...")
|
||||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
results = await crawler.arun_many(urls=urls, config=run_config)
|
||||||
print("\n🚀 Starting batch crawl with proxy rotation...")
|
|
||||||
results = await crawler.arun_many(
|
|
||||||
urls=urls,
|
|
||||||
config=run_config
|
|
||||||
)
|
|
||||||
for result in results:
|
|
||||||
if result.success:
|
|
||||||
ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html)
|
|
||||||
current_proxy = run_config.proxy_config if run_config.proxy_config else None
|
|
||||||
|
|
||||||
if current_proxy and ip_match:
|
for i, result in enumerate(results):
|
||||||
print(f"URL {result.url}")
|
if result.success:
|
||||||
print(f"Proxy {current_proxy.server} -> Response IP: {ip_match.group(0)}")
|
# Extract IP from response
|
||||||
verified = ip_match.group(0) == current_proxy.ip
|
ip_match = re.search(r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}', result.html)
|
||||||
if verified:
|
if ip_match:
|
||||||
print(f"✅ Proxy working! IP matches: {current_proxy.ip}")
|
detected_ip = ip_match.group(0)
|
||||||
else:
|
proxy_index = i % len(proxies)
|
||||||
print("❌ Proxy failed or IP mismatch!")
|
expected_ip = proxies[proxy_index].ip
|
||||||
print("---")
|
|
||||||
|
|
||||||
asyncio.run(main())
|
print(f"✅ Request {i+1}: Proxy {proxy_index+1} -> IP {detected_ip}")
|
||||||
|
if detected_ip == expected_ip:
|
||||||
|
print(" 🎯 IP matches proxy configuration")
|
||||||
|
else:
|
||||||
|
print(f" ⚠️ IP mismatch (expected {expected_ip})")
|
||||||
|
else:
|
||||||
|
print(f"❌ Request {i+1}: Could not extract IP from response")
|
||||||
|
else:
|
||||||
|
print(f"❌ Request {i+1}: Failed - {result.error_message}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## SSL Certificate Analysis
|
||||||
|
|
||||||
|
Combine proxy usage with SSL certificate inspection for enhanced security analysis. SSL certificate fetching is configured per request via `CrawlerRunConfig`.
|
||||||
|
|
||||||
|
### Per-Request SSL Certificate Analysis
|
||||||
|
```python
|
||||||
|
import asyncio
|
||||||
|
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
|
||||||
|
|
||||||
|
run_config = CrawlerRunConfig(
|
||||||
|
proxy_config={
|
||||||
|
"server": "http://proxy.example.com:8080",
|
||||||
|
"username": "user",
|
||||||
|
"password": "pass",
|
||||||
|
},
|
||||||
|
fetch_ssl_certificate=True, # Enable SSL certificate analysis for this request
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
browser_config = BrowserConfig()
|
||||||
|
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||||
|
result = await crawler.arun(url="https://example.com", config=run_config)
|
||||||
|
|
||||||
|
if result.success:
|
||||||
|
print(f"✅ Crawled via proxy: {result.url}")
|
||||||
|
|
||||||
|
# Analyze SSL certificate
|
||||||
|
if result.ssl_certificate:
|
||||||
|
cert = result.ssl_certificate
|
||||||
|
print("🔒 SSL Certificate Info:")
|
||||||
|
print(f" Issuer: {cert.issuer}")
|
||||||
|
print(f" Subject: {cert.subject}")
|
||||||
|
print(f" Valid until: {cert.valid_until}")
|
||||||
|
print(f" Fingerprint: {cert.fingerprint}")
|
||||||
|
|
||||||
|
# Export certificate
|
||||||
|
cert.to_json("certificate.json")
|
||||||
|
print("💾 Certificate exported to certificate.json")
|
||||||
|
else:
|
||||||
|
print("⚠️ No SSL certificate information available")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Best Practices
|
||||||
|
|
||||||
|
### 1. Proxy Rotation for Anonymity
|
||||||
|
```python
|
||||||
|
from crawl4ai import CrawlerRunConfig, ProxyConfig
|
||||||
|
from crawl4ai.proxy_strategy import RoundRobinProxyStrategy
|
||||||
|
|
||||||
|
# Use multiple proxies to avoid IP blocking
|
||||||
|
proxies = ProxyConfig.from_env("PROXIES")
|
||||||
|
strategy = RoundRobinProxyStrategy(proxies)
|
||||||
|
|
||||||
|
# Configure rotation per request (recommended)
|
||||||
|
run_config = CrawlerRunConfig(proxy_rotation_strategy=strategy)
|
||||||
|
|
||||||
|
# For a fixed proxy across all requests, just reuse the same run_config instance
|
||||||
|
static_run_config = run_config
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. SSL Certificate Verification
|
||||||
|
```python
|
||||||
|
from crawl4ai import CrawlerRunConfig
|
||||||
|
|
||||||
|
# Always verify SSL certificates when possible
|
||||||
|
# Per-request (affects specific requests)
|
||||||
|
run_config = CrawlerRunConfig(fetch_ssl_certificate=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Environment Variable Security
|
||||||
|
```bash
|
||||||
|
# Use environment variables for sensitive proxy credentials
|
||||||
|
# Avoid hardcoding usernames/passwords in code
|
||||||
|
export PROXIES="ip1:port1:user1:pass1,ip2:port2:user2:pass2"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. SOCKS5 for Enhanced Security
|
||||||
|
```python
|
||||||
|
from crawl4ai import CrawlerRunConfig
|
||||||
|
|
||||||
|
# Prefer SOCKS5 proxies for better protocol support
|
||||||
|
run_config = CrawlerRunConfig(proxy_config="socks5://proxy.example.com:1080")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration from Deprecated `proxy` Parameter
|
||||||
|
|
||||||
|
!!! warning "Deprecation Notice"
|
||||||
|
The legacy `proxy` argument on `BrowserConfig` is deprecated. Configure proxies through `CrawlerRunConfig.proxy_config` so each request fully describes its network settings.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Old (deprecated) approach
|
||||||
|
# from crawl4ai import BrowserConfig
|
||||||
|
# browser_config = BrowserConfig(proxy="http://proxy.example.com:8080")
|
||||||
|
|
||||||
|
# New (preferred) approach
|
||||||
|
from crawl4ai import CrawlerRunConfig
|
||||||
|
run_config = CrawlerRunConfig(proxy_config="http://proxy.example.com:8080")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Safe Logging of Proxies
|
||||||
|
```python
|
||||||
|
from crawl4ai import ProxyConfig
|
||||||
|
|
||||||
|
def safe_proxy_repr(proxy: ProxyConfig):
|
||||||
|
if getattr(proxy, "username", None):
|
||||||
|
return f"{proxy.server} (auth: ****)"
|
||||||
|
return proxy.server
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
???+ question "Proxy connection failed"
|
||||||
|
- Verify the proxy server is reachable from your network.
|
||||||
|
- Double-check authentication credentials.
|
||||||
|
- Ensure the protocol matches (`http`, `https`, or `socks5`).
|
||||||
|
|
||||||
|
???+ question "SSL certificate errors"
|
||||||
|
- Some proxies break SSL inspection; switch proxies if you see repeated failures.
|
||||||
|
- Consider temporarily disabling certificate fetching to isolate the issue.
|
||||||
|
|
||||||
|
???+ question "Environment variables not loading"
|
||||||
|
- Confirm `PROXIES` (or your custom env var) is set before running the script.
|
||||||
|
- Check formatting: `ip:port:user:pass,ip:port:user:pass`.
|
||||||
|
|
||||||
|
???+ question "Proxy rotation not working"
|
||||||
|
- Ensure `ProxyConfig.from_env()` actually loaded entries (`len(proxies) > 0`).
|
||||||
|
- Attach `proxy_rotation_strategy` to `CrawlerRunConfig`.
|
||||||
|
- Validate the proxy definitions you pass into the strategy.
|
||||||
|
|||||||
@@ -1,134 +0,0 @@
|
|||||||
import sys
|
|
||||||
from types import SimpleNamespace
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
# Provide a lightweight stub for rank_bm25 before importing the seeder to avoid
|
|
||||||
# optional dependency issues (e.g., incompatible wheels in CI).
|
|
||||||
class _FakeBM25:
|
|
||||||
def __init__(self, corpus):
|
|
||||||
self._scores = [1.0] * len(corpus)
|
|
||||||
|
|
||||||
def get_scores(self, tokens):
|
|
||||||
return self._scores
|
|
||||||
|
|
||||||
|
|
||||||
sys.modules.setdefault("rank_bm25", SimpleNamespace(BM25Okapi=_FakeBM25))
|
|
||||||
|
|
||||||
from crawl4ai.async_url_seeder import AsyncUrlSeeder
|
|
||||||
|
|
||||||
|
|
||||||
class DummyResponse:
|
|
||||||
def __init__(self, request_url: str, text: str):
|
|
||||||
self.status_code = 200
|
|
||||||
self._content = text.encode("utf-8")
|
|
||||||
self.url = request_url
|
|
||||||
|
|
||||||
def raise_for_status(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def content(self):
|
|
||||||
return self._content
|
|
||||||
|
|
||||||
@property
|
|
||||||
def text(self):
|
|
||||||
return self._content.decode("utf-8")
|
|
||||||
|
|
||||||
|
|
||||||
class DummyAsyncClient:
|
|
||||||
def __init__(self, response_map):
|
|
||||||
self._responses = response_map
|
|
||||||
|
|
||||||
async def get(self, url, **kwargs):
|
|
||||||
payload = self._responses[url]
|
|
||||||
if callable(payload):
|
|
||||||
payload = payload()
|
|
||||||
return DummyResponse(url, payload)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_iter_sitemap_handles_namespace_less_sitemaps():
|
|
||||||
xml = """<?xml version="1.0"?>
|
|
||||||
<urlset>
|
|
||||||
<url><loc>https://example.com/a</loc></url>
|
|
||||||
<url><loc>https://example.com/b</loc></url>
|
|
||||||
</urlset>
|
|
||||||
"""
|
|
||||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient({"https://example.com/sitemap.xml": xml}))
|
|
||||||
|
|
||||||
urls = []
|
|
||||||
async for u in seeder._iter_sitemap("https://example.com/sitemap.xml"):
|
|
||||||
urls.append(u)
|
|
||||||
|
|
||||||
assert urls == ["https://example.com/a", "https://example.com/b"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_iter_sitemap_handles_custom_namespace():
|
|
||||||
xml = """<?xml version="1.0"?>
|
|
||||||
<urlset xmlns="https://custom.namespace/schema">
|
|
||||||
<url><loc>https://example.com/ns</loc></url>
|
|
||||||
</urlset>
|
|
||||||
"""
|
|
||||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient({"https://example.com/ns-sitemap.xml": xml}))
|
|
||||||
|
|
||||||
urls = []
|
|
||||||
async for u in seeder._iter_sitemap("https://example.com/ns-sitemap.xml"):
|
|
||||||
urls.append(u)
|
|
||||||
|
|
||||||
assert urls == ["https://example.com/ns"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_iter_sitemap_handles_namespace_index_and_children():
|
|
||||||
index_xml = """<?xml version="1.0"?>
|
|
||||||
<sitemapindex xmlns="http://another.example/ns">
|
|
||||||
<sitemap>
|
|
||||||
<loc>https://example.com/child-1.xml</loc>
|
|
||||||
</sitemap>
|
|
||||||
<sitemap>
|
|
||||||
<loc>https://example.com/child-2.xml</loc>
|
|
||||||
</sitemap>
|
|
||||||
</sitemapindex>
|
|
||||||
"""
|
|
||||||
child_xml = """<?xml version="1.0"?>
|
|
||||||
<urlset xmlns="http://irrelevant">
|
|
||||||
<url><loc>https://example.com/page-{n}</loc></url>
|
|
||||||
</urlset>
|
|
||||||
"""
|
|
||||||
responses = {
|
|
||||||
"https://example.com/index.xml": index_xml,
|
|
||||||
"https://example.com/child-1.xml": child_xml.format(n=1),
|
|
||||||
"https://example.com/child-2.xml": child_xml.format(n=2),
|
|
||||||
}
|
|
||||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient(responses))
|
|
||||||
|
|
||||||
urls = []
|
|
||||||
async for u in seeder._iter_sitemap("https://example.com/index.xml"):
|
|
||||||
urls.append(u)
|
|
||||||
|
|
||||||
assert sorted(urls) == [
|
|
||||||
"https://example.com/page-1",
|
|
||||||
"https://example.com/page-2",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_iter_sitemap_normalizes_relative_locations():
|
|
||||||
xml = """<?xml version="1.0"?>
|
|
||||||
<urlset>
|
|
||||||
<url><loc>/relative-path</loc></url>
|
|
||||||
<url><loc>https://example.com/absolute</loc></url>
|
|
||||||
</urlset>
|
|
||||||
"""
|
|
||||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient({"https://example.com/sitemap.xml": xml}))
|
|
||||||
|
|
||||||
urls = []
|
|
||||||
async for u in seeder._iter_sitemap("https://example.com/sitemap.xml"):
|
|
||||||
urls.append(u)
|
|
||||||
|
|
||||||
assert urls == [
|
|
||||||
"https://example.com/relative-path",
|
|
||||||
"https://example.com/absolute",
|
|
||||||
]
|
|
||||||
Reference in New Issue
Block a user