Compare commits
11 Commits
fix/cdp
...
fix/sitema
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
80745bceb9 | ||
|
|
d56b0eb9a9 | ||
|
|
66175e132b | ||
|
|
a30548a98f | ||
|
|
2c918155aa | ||
|
|
854694ef33 | ||
|
|
6534ece026 | ||
|
|
46e1a67f61 | ||
|
|
7dfe528d43 | ||
|
|
2dc6588573 | ||
|
|
e3467c08f6 |
Submodule .yoyo/snapshot deleted from 5e783b71e7
@@ -1383,9 +1383,10 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
try:
|
||||
await self.adapter.evaluate(page,
|
||||
f"""
|
||||
(() => {{
|
||||
(async () => {{
|
||||
try {{
|
||||
{remove_overlays_js}
|
||||
const removeOverlays = {remove_overlays_js};
|
||||
await removeOverlays();
|
||||
return {{ success: true }};
|
||||
}} catch (error) {{
|
||||
return {{
|
||||
|
||||
@@ -845,6 +845,15 @@ class AsyncUrlSeeder:
|
||||
return
|
||||
|
||||
data = gzip.decompress(r.content) if url.endswith(".gz") else r.content
|
||||
base_url = str(r.url)
|
||||
|
||||
def _normalize_loc(raw: Optional[str]) -> Optional[str]:
|
||||
if not raw:
|
||||
return None
|
||||
normalized = urljoin(base_url, raw.strip())
|
||||
if not normalized:
|
||||
return None
|
||||
return normalized
|
||||
|
||||
# Detect if this is a sitemap index by checking for <sitemapindex> or presence of <sitemap> elements
|
||||
is_sitemap_index = False
|
||||
@@ -857,25 +866,42 @@ class AsyncUrlSeeder:
|
||||
# Use XML parser for sitemaps, not HTML parser
|
||||
parser = etree.XMLParser(recover=True)
|
||||
root = etree.fromstring(data, parser=parser)
|
||||
# Namespace-agnostic lookups using local-name() so we honor custom or missing namespaces
|
||||
sitemap_loc_nodes = root.xpath("//*[local-name()='sitemap']/*[local-name()='loc']")
|
||||
url_loc_nodes = root.xpath("//*[local-name()='url']/*[local-name()='loc']")
|
||||
|
||||
# Define namespace for sitemap
|
||||
ns = {'s': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
|
||||
self._log(
|
||||
"debug",
|
||||
"Parsed sitemap {url}: {sitemap_count} sitemap entries, {url_count} url entries discovered",
|
||||
params={
|
||||
"url": url,
|
||||
"sitemap_count": len(sitemap_loc_nodes),
|
||||
"url_count": len(url_loc_nodes),
|
||||
},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
|
||||
# Check for sitemap index entries
|
||||
sitemap_locs = root.xpath('//s:sitemap/s:loc', namespaces=ns)
|
||||
if sitemap_locs:
|
||||
if sitemap_loc_nodes:
|
||||
is_sitemap_index = True
|
||||
for sitemap_elem in sitemap_locs:
|
||||
loc = sitemap_elem.text.strip() if sitemap_elem.text else ""
|
||||
for sitemap_elem in sitemap_loc_nodes:
|
||||
loc = _normalize_loc(sitemap_elem.text)
|
||||
if loc:
|
||||
sub_sitemaps.append(loc)
|
||||
|
||||
# If not a sitemap index, get regular URLs
|
||||
if not is_sitemap_index:
|
||||
for loc_elem in root.xpath('//s:url/s:loc', namespaces=ns):
|
||||
loc = loc_elem.text.strip() if loc_elem.text else ""
|
||||
for loc_elem in url_loc_nodes:
|
||||
loc = _normalize_loc(loc_elem.text)
|
||||
if loc:
|
||||
regular_urls.append(loc)
|
||||
if not regular_urls:
|
||||
self._log(
|
||||
"warning",
|
||||
"No <loc> entries found inside <url> tags for sitemap {url}. The sitemap might be empty or use an unexpected structure.",
|
||||
params={"url": url},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
except Exception as e:
|
||||
self._log("error", "LXML parsing error for sitemap {url}: {error}",
|
||||
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
||||
@@ -892,19 +918,39 @@ class AsyncUrlSeeder:
|
||||
|
||||
# Check for sitemap index entries
|
||||
sitemaps = root.findall('.//sitemap')
|
||||
url_entries = root.findall('.//url')
|
||||
self._log(
|
||||
"debug",
|
||||
"ElementTree parsed sitemap {url}: {sitemap_count} sitemap entries, {url_count} url entries discovered",
|
||||
params={
|
||||
"url": url,
|
||||
"sitemap_count": len(sitemaps),
|
||||
"url_count": len(url_entries),
|
||||
},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
if sitemaps:
|
||||
is_sitemap_index = True
|
||||
for sitemap in sitemaps:
|
||||
loc_elem = sitemap.find('loc')
|
||||
if loc_elem is not None and loc_elem.text:
|
||||
sub_sitemaps.append(loc_elem.text.strip())
|
||||
loc = _normalize_loc(loc_elem.text if loc_elem is not None else None)
|
||||
if loc:
|
||||
sub_sitemaps.append(loc)
|
||||
|
||||
# If not a sitemap index, get regular URLs
|
||||
if not is_sitemap_index:
|
||||
for url_elem in root.findall('.//url'):
|
||||
for url_elem in url_entries:
|
||||
loc_elem = url_elem.find('loc')
|
||||
if loc_elem is not None and loc_elem.text:
|
||||
regular_urls.append(loc_elem.text.strip())
|
||||
loc = _normalize_loc(loc_elem.text if loc_elem is not None else None)
|
||||
if loc:
|
||||
regular_urls.append(loc)
|
||||
if not regular_urls:
|
||||
self._log(
|
||||
"warning",
|
||||
"No <loc> entries found inside <url> tags for sitemap {url}. The sitemap might be empty or use an unexpected structure.",
|
||||
params={"url": url},
|
||||
tag="URL_SEED",
|
||||
)
|
||||
except Exception as e:
|
||||
self._log("error", "ElementTree parsing error for sitemap {url}: {error}",
|
||||
params={"url": url, "error": str(e)}, tag="URL_SEED")
|
||||
|
||||
@@ -617,7 +617,17 @@ class AsyncWebCrawler:
|
||||
else config.chunking_strategy
|
||||
)
|
||||
sections = chunking.chunk(content)
|
||||
extracted_content = config.extraction_strategy.run(url, sections)
|
||||
# extracted_content = config.extraction_strategy.run(url, sections)
|
||||
|
||||
# Use async version if available for better parallelism
|
||||
if hasattr(config.extraction_strategy, 'arun'):
|
||||
extracted_content = await config.extraction_strategy.arun(url, sections)
|
||||
else:
|
||||
# Fallback to sync version run in thread pool to avoid blocking
|
||||
extracted_content = await asyncio.to_thread(
|
||||
config.extraction_strategy.run, url, sections
|
||||
)
|
||||
|
||||
extracted_content = json.dumps(
|
||||
extracted_content, indent=4, default=str, ensure_ascii=False
|
||||
)
|
||||
|
||||
@@ -1,23 +1,22 @@
|
||||
import asyncio
|
||||
import hashlib
|
||||
import time
|
||||
from typing import List, Optional
|
||||
import os
|
||||
import shlex
|
||||
import sys
|
||||
import shutil
|
||||
import tempfile
|
||||
import psutil
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import warnings
|
||||
from typing import List, Optional
|
||||
|
||||
import psutil
|
||||
import shlex
|
||||
from playwright.async_api import BrowserContext
|
||||
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .config import DOWNLOAD_PAGE_TIMEOUT
|
||||
import hashlib
|
||||
from .js_snippet import load_js_script
|
||||
from .config import DOWNLOAD_PAGE_TIMEOUT
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .utils import get_chromium_path
|
||||
import warnings
|
||||
|
||||
|
||||
BROWSER_DISABLE_OPTIONS = [
|
||||
"--disable-background-networking",
|
||||
@@ -66,7 +65,7 @@ class ManagedBrowser:
|
||||
_cleanup(): Terminates the browser process and removes the temporary directory.
|
||||
create_profile(): Static method to create a user profile by launching a browser for user interaction.
|
||||
"""
|
||||
|
||||
|
||||
@staticmethod
|
||||
def build_browser_flags(config: BrowserConfig) -> List[str]:
|
||||
"""Common CLI flags for launching Chromium"""
|
||||
@@ -93,25 +92,21 @@ class ManagedBrowser:
|
||||
if config.light_mode:
|
||||
flags.extend(BROWSER_DISABLE_OPTIONS)
|
||||
if config.text_mode:
|
||||
flags.extend(
|
||||
[
|
||||
"--blink-settings=imagesEnabled=false",
|
||||
"--disable-remote-fonts",
|
||||
"--disable-images",
|
||||
"--disable-javascript",
|
||||
"--disable-software-rasterizer",
|
||||
"--disable-dev-shm-usage",
|
||||
]
|
||||
)
|
||||
flags.extend([
|
||||
"--blink-settings=imagesEnabled=false",
|
||||
"--disable-remote-fonts",
|
||||
"--disable-images",
|
||||
"--disable-javascript",
|
||||
"--disable-software-rasterizer",
|
||||
"--disable-dev-shm-usage",
|
||||
])
|
||||
# proxy support
|
||||
if config.proxy:
|
||||
flags.append(f"--proxy-server={config.proxy}")
|
||||
elif config.proxy_config:
|
||||
creds = ""
|
||||
if config.proxy_config.username and config.proxy_config.password:
|
||||
creds = (
|
||||
f"{config.proxy_config.username}:{config.proxy_config.password}@"
|
||||
)
|
||||
creds = f"{config.proxy_config.username}:{config.proxy_config.password}@"
|
||||
flags.append(f"--proxy-server={creds}{config.proxy_config.server}")
|
||||
# dedupe
|
||||
return list(dict.fromkeys(flags))
|
||||
@@ -132,7 +127,7 @@ class ManagedBrowser:
|
||||
logger=None,
|
||||
host: str = "localhost",
|
||||
debugging_port: int = 9222,
|
||||
cdp_url: Optional[str] = None,
|
||||
cdp_url: Optional[str] = None,
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
):
|
||||
"""
|
||||
@@ -168,7 +163,7 @@ class ManagedBrowser:
|
||||
Starts the browser process or returns CDP endpoint URL.
|
||||
If cdp_url is provided, returns it directly.
|
||||
If user_data_dir is not provided for local browser, creates a temporary directory.
|
||||
|
||||
|
||||
Returns:
|
||||
str: CDP endpoint URL
|
||||
"""
|
||||
@@ -184,9 +179,10 @@ class ManagedBrowser:
|
||||
# Get browser path and args based on OS and browser type
|
||||
# browser_path = self._get_browser_path()
|
||||
args = await self._get_browser_args()
|
||||
|
||||
|
||||
if self.browser_config.extra_args:
|
||||
args.extend(self.browser_config.extra_args)
|
||||
|
||||
|
||||
# ── make sure no old Chromium instance is owning the same port/profile ──
|
||||
try:
|
||||
@@ -204,9 +200,7 @@ class ManagedBrowser:
|
||||
else: # macOS / Linux
|
||||
# kill any process listening on the same debugging port
|
||||
pids = (
|
||||
subprocess.check_output(
|
||||
shlex.split(f"lsof -t -i:{self.debugging_port}")
|
||||
)
|
||||
subprocess.check_output(shlex.split(f"lsof -t -i:{self.debugging_port}"))
|
||||
.decode()
|
||||
.strip()
|
||||
.splitlines()
|
||||
@@ -225,7 +219,8 @@ class ManagedBrowser:
|
||||
os.remove(fp)
|
||||
except Exception as _e:
|
||||
# non-fatal — we'll try to start anyway, but log what happened
|
||||
self.logger.warning(f"pre-launch cleanup failed: {_e}", tag="BROWSER")
|
||||
self.logger.warning(f"pre-launch cleanup failed: {_e}", tag="BROWSER")
|
||||
|
||||
|
||||
# Start browser process
|
||||
try:
|
||||
@@ -233,26 +228,26 @@ class ManagedBrowser:
|
||||
# On Unix, we'll use preexec_fn=os.setpgrp to start the process in a new process group
|
||||
if sys.platform == "win32":
|
||||
self.browser_process = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
creationflags=subprocess.DETACHED_PROCESS
|
||||
| subprocess.CREATE_NEW_PROCESS_GROUP,
|
||||
creationflags=subprocess.DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
)
|
||||
else:
|
||||
self.browser_process = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
preexec_fn=os.setpgrp, # Start in a new process group
|
||||
preexec_fn=os.setpgrp # Start in a new process group
|
||||
)
|
||||
|
||||
|
||||
# If verbose is True print args used to run the process
|
||||
if self.logger and self.browser_config.verbose:
|
||||
self.logger.debug(
|
||||
f"Starting browser with args: {' '.join(args)}", tag="BROWSER"
|
||||
)
|
||||
|
||||
f"Starting browser with args: {' '.join(args)}",
|
||||
tag="BROWSER"
|
||||
)
|
||||
|
||||
# We'll monitor for a short time to make sure it starts properly, but won't keep monitoring
|
||||
await asyncio.sleep(0.5) # Give browser time to start
|
||||
await self._initial_startup_check()
|
||||
@@ -269,7 +264,7 @@ class ManagedBrowser:
|
||||
"""
|
||||
if not self.browser_process:
|
||||
return
|
||||
|
||||
|
||||
# Check that process started without immediate termination
|
||||
await asyncio.sleep(0.5)
|
||||
if self.browser_process.poll() is not None:
|
||||
@@ -279,7 +274,7 @@ class ManagedBrowser:
|
||||
stdout, stderr = self.browser_process.communicate(timeout=0.5)
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
|
||||
|
||||
self.logger.error(
|
||||
message="Browser process terminated during startup | Code: {code} | STDOUT: {stdout} | STDERR: {stderr}",
|
||||
tag="ERROR",
|
||||
@@ -289,7 +284,7 @@ class ManagedBrowser:
|
||||
"stderr": stderr.decode() if stderr else "",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def _monitor_browser_process(self):
|
||||
"""
|
||||
Monitor the browser process for unexpected termination.
|
||||
@@ -374,6 +369,9 @@ class ManagedBrowser:
|
||||
]
|
||||
if self.headless:
|
||||
flags.append("--headless=new")
|
||||
# Add viewport flag if specified in config
|
||||
if self.browser_config.viewport_height and self.browser_config.viewport_width:
|
||||
flags.append(f"--window-size={self.browser_config.viewport_width},{self.browser_config.viewport_height}")
|
||||
# merge common launch flags
|
||||
flags.extend(self.build_browser_flags(self.browser_config))
|
||||
elif self.browser_type == "firefox":
|
||||
@@ -412,14 +410,7 @@ class ManagedBrowser:
|
||||
if sys.platform == "win32":
|
||||
# On Windows we might need taskkill for detached processes
|
||||
try:
|
||||
subprocess.run(
|
||||
[
|
||||
"taskkill",
|
||||
"/F",
|
||||
"/PID",
|
||||
str(self.browser_process.pid),
|
||||
]
|
||||
)
|
||||
subprocess.run(["taskkill", "/F", "/PID", str(self.browser_process.pid)])
|
||||
except Exception:
|
||||
self.browser_process.kill()
|
||||
else:
|
||||
@@ -429,7 +420,7 @@ class ManagedBrowser:
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error terminating browser: {error}",
|
||||
tag="ERROR",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
@@ -442,77 +433,75 @@ class ManagedBrowser:
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
# These methods have been moved to BrowserProfiler class
|
||||
@staticmethod
|
||||
async def create_profile(browser_config=None, profile_name=None, logger=None):
|
||||
"""
|
||||
This method has been moved to the BrowserProfiler class.
|
||||
|
||||
|
||||
Creates a browser profile by launching a browser for interactive user setup
|
||||
and waits until the user closes it. The profile is stored in a directory that
|
||||
can be used later with BrowserConfig.user_data_dir.
|
||||
|
||||
|
||||
Please use BrowserProfiler.create_profile() instead.
|
||||
|
||||
|
||||
Example:
|
||||
```python
|
||||
from crawl4ai.browser_profiler import BrowserProfiler
|
||||
|
||||
|
||||
profiler = BrowserProfiler()
|
||||
profile_path = await profiler.create_profile(profile_name="my-login-profile")
|
||||
```
|
||||
"""
|
||||
from .browser_profiler import BrowserProfiler
|
||||
|
||||
|
||||
# Create a BrowserProfiler instance and delegate to it
|
||||
profiler = BrowserProfiler(logger=logger)
|
||||
return await profiler.create_profile(
|
||||
profile_name=profile_name, browser_config=browser_config
|
||||
)
|
||||
|
||||
return await profiler.create_profile(profile_name=profile_name, browser_config=browser_config)
|
||||
|
||||
@staticmethod
|
||||
def list_profiles():
|
||||
"""
|
||||
This method has been moved to the BrowserProfiler class.
|
||||
|
||||
|
||||
Lists all available browser profiles in the Crawl4AI profiles directory.
|
||||
|
||||
|
||||
Please use BrowserProfiler.list_profiles() instead.
|
||||
|
||||
|
||||
Example:
|
||||
```python
|
||||
from crawl4ai.browser_profiler import BrowserProfiler
|
||||
|
||||
|
||||
profiler = BrowserProfiler()
|
||||
profiles = profiler.list_profiles()
|
||||
```
|
||||
"""
|
||||
from .browser_profiler import BrowserProfiler
|
||||
|
||||
|
||||
# Create a BrowserProfiler instance and delegate to it
|
||||
profiler = BrowserProfiler()
|
||||
return profiler.list_profiles()
|
||||
|
||||
|
||||
@staticmethod
|
||||
def delete_profile(profile_name_or_path):
|
||||
"""
|
||||
This method has been moved to the BrowserProfiler class.
|
||||
|
||||
|
||||
Delete a browser profile by name or path.
|
||||
|
||||
|
||||
Please use BrowserProfiler.delete_profile() instead.
|
||||
|
||||
|
||||
Example:
|
||||
```python
|
||||
from crawl4ai.browser_profiler import BrowserProfiler
|
||||
|
||||
|
||||
profiler = BrowserProfiler()
|
||||
success = profiler.delete_profile("my-profile")
|
||||
```
|
||||
"""
|
||||
from .browser_profiler import BrowserProfiler
|
||||
|
||||
|
||||
# Create a BrowserProfiler instance and delegate to it
|
||||
profiler = BrowserProfiler()
|
||||
return profiler.delete_profile(profile_name_or_path)
|
||||
@@ -565,10 +554,11 @@ async def clone_runtime_state(
|
||||
"accuracy": crawlerRunConfig.geolocation.accuracy,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
return dst
|
||||
|
||||
|
||||
|
||||
class BrowserManager:
|
||||
"""
|
||||
Manages the browser instance and context.
|
||||
@@ -585,7 +575,7 @@ class BrowserManager:
|
||||
"""
|
||||
|
||||
_playwright_instance = None
|
||||
|
||||
|
||||
@classmethod
|
||||
async def get_playwright(cls, use_undetected: bool = False):
|
||||
if use_undetected:
|
||||
@@ -593,11 +583,9 @@ class BrowserManager:
|
||||
else:
|
||||
from playwright.async_api import async_playwright
|
||||
cls._playwright_instance = await async_playwright().start()
|
||||
return cls._playwright_instance
|
||||
return cls._playwright_instance
|
||||
|
||||
def __init__(
|
||||
self, browser_config: BrowserConfig, logger=None, use_undetected: bool = False
|
||||
):
|
||||
def __init__(self, browser_config: BrowserConfig, logger=None, use_undetected: bool = False):
|
||||
"""
|
||||
Initialize the BrowserManager with a browser configuration.
|
||||
|
||||
@@ -623,17 +611,16 @@ class BrowserManager:
|
||||
# Keep track of contexts by a "config signature," so each unique config reuses a single context
|
||||
self.contexts_by_config = {}
|
||||
self._contexts_lock = asyncio.Lock()
|
||||
|
||||
|
||||
# Serialize context.new_page() across concurrent tasks to avoid races
|
||||
# when using a shared persistent context (context.pages may be empty
|
||||
# for all racers). Prevents 'Target page/context closed' errors.
|
||||
self._page_lock = asyncio.Lock()
|
||||
|
||||
|
||||
# Stealth adapter for stealth mode
|
||||
self._stealth_adapter = None
|
||||
if self.config.enable_stealth and not self.use_undetected:
|
||||
from .browser_adapter import StealthAdapter
|
||||
|
||||
self._stealth_adapter = StealthAdapter()
|
||||
|
||||
# Initialize ManagedBrowser if needed
|
||||
@@ -662,7 +649,7 @@ class BrowserManager:
|
||||
"""
|
||||
if self.playwright is not None:
|
||||
await self.close()
|
||||
|
||||
|
||||
if self.use_undetected:
|
||||
from patchright.async_api import async_playwright
|
||||
else:
|
||||
@@ -673,11 +660,7 @@ class BrowserManager:
|
||||
|
||||
if self.config.cdp_url or self.config.use_managed_browser:
|
||||
self.config.use_managed_browser = True
|
||||
cdp_url = (
|
||||
await self.managed_browser.start()
|
||||
if not self.config.cdp_url
|
||||
else self.config.cdp_url
|
||||
)
|
||||
cdp_url = await self.managed_browser.start() if not self.config.cdp_url else self.config.cdp_url
|
||||
self.browser = await self.playwright.chromium.connect_over_cdp(cdp_url)
|
||||
contexts = self.browser.contexts
|
||||
if contexts:
|
||||
@@ -698,6 +681,7 @@ class BrowserManager:
|
||||
|
||||
self.default_context = self.browser
|
||||
|
||||
|
||||
def _build_browser_args(self) -> dict:
|
||||
"""Build browser launch arguments from config."""
|
||||
args = [
|
||||
@@ -743,7 +727,7 @@ class BrowserManager:
|
||||
|
||||
# Deduplicate args
|
||||
args = list(dict.fromkeys(args))
|
||||
|
||||
|
||||
browser_args = {"headless": self.config.headless, "args": args}
|
||||
|
||||
if self.config.chrome_channel:
|
||||
@@ -820,9 +804,9 @@ class BrowserManager:
|
||||
context.set_default_navigation_timeout(DOWNLOAD_PAGE_TIMEOUT)
|
||||
if self.config.downloads_path:
|
||||
context._impl_obj._options["accept_downloads"] = True
|
||||
context._impl_obj._options["downloads_path"] = (
|
||||
self.config.downloads_path
|
||||
)
|
||||
context._impl_obj._options[
|
||||
"downloads_path"
|
||||
] = self.config.downloads_path
|
||||
|
||||
# Handle user agent and browser hints
|
||||
if self.config.user_agent:
|
||||
@@ -853,7 +837,7 @@ class BrowserManager:
|
||||
or crawlerRunConfig.simulate_user
|
||||
or crawlerRunConfig.magic
|
||||
):
|
||||
await context.add_init_script(load_js_script("navigator_overrider"))
|
||||
await context.add_init_script(load_js_script("navigator_overrider"))
|
||||
|
||||
async def create_browser_context(self, crawlerRunConfig: CrawlerRunConfig = None):
|
||||
"""
|
||||
@@ -864,7 +848,7 @@ class BrowserManager:
|
||||
Context: Browser context object with the specified configurations
|
||||
"""
|
||||
# Base settings
|
||||
user_agent = self.config.headers.get("User-Agent", self.config.user_agent)
|
||||
user_agent = self.config.headers.get("User-Agent", self.config.user_agent)
|
||||
viewport_settings = {
|
||||
"width": self.config.viewport_width,
|
||||
"height": self.config.viewport_height,
|
||||
@@ -937,7 +921,7 @@ class BrowserManager:
|
||||
"device_scale_factor": 1.0,
|
||||
"java_script_enabled": self.config.java_script_enabled,
|
||||
}
|
||||
|
||||
|
||||
if crawlerRunConfig:
|
||||
# Check if there is value for crawlerRunConfig.proxy_config set add that to context
|
||||
if crawlerRunConfig.proxy_config:
|
||||
@@ -945,12 +929,10 @@ class BrowserManager:
|
||||
"server": crawlerRunConfig.proxy_config.server,
|
||||
}
|
||||
if crawlerRunConfig.proxy_config.username:
|
||||
proxy_settings.update(
|
||||
{
|
||||
"username": crawlerRunConfig.proxy_config.username,
|
||||
"password": crawlerRunConfig.proxy_config.password,
|
||||
}
|
||||
)
|
||||
proxy_settings.update({
|
||||
"username": crawlerRunConfig.proxy_config.username,
|
||||
"password": crawlerRunConfig.proxy_config.password,
|
||||
})
|
||||
context_settings["proxy"] = proxy_settings
|
||||
|
||||
if self.config.text_mode:
|
||||
@@ -1008,12 +990,12 @@ class BrowserManager:
|
||||
"cache_mode",
|
||||
"content_filter",
|
||||
"semaphore_count",
|
||||
"url",
|
||||
"url"
|
||||
]
|
||||
|
||||
|
||||
# Do NOT exclude locale, timezone_id, or geolocation as these DO affect browser context
|
||||
# and should cause a new context to be created if they change
|
||||
|
||||
|
||||
for key in ephemeral_keys:
|
||||
if key in config_dict:
|
||||
del config_dict[key]
|
||||
@@ -1034,7 +1016,7 @@ class BrowserManager:
|
||||
self.logger.warning(
|
||||
message="Failed to apply stealth to page: {error}",
|
||||
tag="STEALTH",
|
||||
params={"error": str(e)},
|
||||
params={"error": str(e)}
|
||||
)
|
||||
|
||||
async def get_page(self, crawlerRunConfig: CrawlerRunConfig):
|
||||
@@ -1060,10 +1042,8 @@ class BrowserManager:
|
||||
if self.config.use_managed_browser:
|
||||
if self.config.storage_state:
|
||||
context = await self.create_browser_context(crawlerRunConfig)
|
||||
ctx = self.default_context # default context, one window only
|
||||
ctx = await clone_runtime_state(
|
||||
context, ctx, crawlerRunConfig, self.config
|
||||
)
|
||||
ctx = self.default_context # default context, one window only
|
||||
ctx = await clone_runtime_state(context, ctx, crawlerRunConfig, self.config)
|
||||
# Avoid concurrent new_page on shared persistent context
|
||||
# See GH-1198: context.pages can be empty under races
|
||||
async with self._page_lock:
|
||||
@@ -1075,21 +1055,14 @@ class BrowserManager:
|
||||
page = next((p for p in pages if p.url == crawlerRunConfig.url), None)
|
||||
if not page:
|
||||
if pages:
|
||||
# FIX: Always create a new page for managed browsers to support concurrent crawling
|
||||
# Previously: page = pages[0]
|
||||
async with self._page_lock:
|
||||
page = await context.new_page()
|
||||
await self._apply_stealth_to_page(page)
|
||||
page = pages[0]
|
||||
else:
|
||||
# Double-check under lock to avoid TOCTOU and ensure only
|
||||
# one task calls new_page when pages=[] concurrently
|
||||
async with self._page_lock:
|
||||
pages = context.pages
|
||||
if pages:
|
||||
# FIX: Always create a new page for managed browsers to support concurrent crawling
|
||||
# Previously: page = pages[0]
|
||||
page = await context.new_page()
|
||||
await self._apply_stealth_to_page(page)
|
||||
page = pages[0]
|
||||
else:
|
||||
page = await context.new_page()
|
||||
await self._apply_stealth_to_page(page)
|
||||
@@ -1145,7 +1118,7 @@ class BrowserManager:
|
||||
"""Close all browser resources and clean up."""
|
||||
if self.config.cdp_url:
|
||||
return
|
||||
|
||||
|
||||
if self.config.sleep_on_close:
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
@@ -1161,7 +1134,7 @@ class BrowserManager:
|
||||
self.logger.error(
|
||||
message="Error closing context: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
params={"error": str(e)}
|
||||
)
|
||||
self.contexts_by_config.clear()
|
||||
|
||||
|
||||
@@ -94,6 +94,20 @@ class ExtractionStrategy(ABC):
|
||||
extracted_content.extend(future.result())
|
||||
return extracted_content
|
||||
|
||||
async def arun(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Async version: Process sections of text in parallel using asyncio.
|
||||
|
||||
Default implementation runs the sync version in a thread pool.
|
||||
Subclasses can override this for true async processing.
|
||||
|
||||
:param url: The URL of the webpage.
|
||||
:param sections: List of sections (strings) to process.
|
||||
:return: A list of processed JSON blocks.
|
||||
"""
|
||||
import asyncio
|
||||
return await asyncio.to_thread(self.run, url, sections, *q, **kwargs)
|
||||
|
||||
|
||||
class NoExtractionStrategy(ExtractionStrategy):
|
||||
"""
|
||||
@@ -780,6 +794,177 @@ class LLMExtractionStrategy(ExtractionStrategy):
|
||||
|
||||
return extracted_content
|
||||
|
||||
async def aextract(self, url: str, ix: int, html: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Async version: Extract meaningful blocks or chunks from the given HTML using an LLM.
|
||||
|
||||
How it works:
|
||||
1. Construct a prompt with variables.
|
||||
2. Make an async request to the LLM using the prompt.
|
||||
3. Parse the response and extract blocks or chunks.
|
||||
|
||||
Args:
|
||||
url: The URL of the webpage.
|
||||
ix: Index of the block.
|
||||
html: The HTML content of the webpage.
|
||||
|
||||
Returns:
|
||||
A list of extracted blocks or chunks.
|
||||
"""
|
||||
from .utils import aperform_completion_with_backoff
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] Call LLM for {url} - block index: {ix}")
|
||||
|
||||
variable_values = {
|
||||
"URL": url,
|
||||
"HTML": escape_json_string(sanitize_html(html)),
|
||||
}
|
||||
|
||||
prompt_with_variables = PROMPT_EXTRACT_BLOCKS
|
||||
if self.instruction:
|
||||
variable_values["REQUEST"] = self.instruction
|
||||
prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION
|
||||
|
||||
if self.extract_type == "schema" and self.schema:
|
||||
variable_values["SCHEMA"] = json.dumps(self.schema, indent=2)
|
||||
prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION
|
||||
|
||||
if self.extract_type == "schema" and not self.schema:
|
||||
prompt_with_variables = PROMPT_EXTRACT_INFERRED_SCHEMA
|
||||
|
||||
for variable in variable_values:
|
||||
prompt_with_variables = prompt_with_variables.replace(
|
||||
"{" + variable + "}", variable_values[variable]
|
||||
)
|
||||
|
||||
try:
|
||||
response = await aperform_completion_with_backoff(
|
||||
self.llm_config.provider,
|
||||
prompt_with_variables,
|
||||
self.llm_config.api_token,
|
||||
base_url=self.llm_config.base_url,
|
||||
json_response=self.force_json_response,
|
||||
extra_args=self.extra_args,
|
||||
)
|
||||
# Track usage
|
||||
usage = TokenUsage(
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
total_tokens=response.usage.total_tokens,
|
||||
completion_tokens_details=response.usage.completion_tokens_details.__dict__
|
||||
if response.usage.completion_tokens_details
|
||||
else {},
|
||||
prompt_tokens_details=response.usage.prompt_tokens_details.__dict__
|
||||
if response.usage.prompt_tokens_details
|
||||
else {},
|
||||
)
|
||||
self.usages.append(usage)
|
||||
|
||||
# Update totals
|
||||
self.total_usage.completion_tokens += usage.completion_tokens
|
||||
self.total_usage.prompt_tokens += usage.prompt_tokens
|
||||
self.total_usage.total_tokens += usage.total_tokens
|
||||
|
||||
try:
|
||||
content = response.choices[0].message.content
|
||||
blocks = None
|
||||
|
||||
if self.force_json_response:
|
||||
blocks = json.loads(content)
|
||||
if isinstance(blocks, dict):
|
||||
if len(blocks) == 1 and isinstance(list(blocks.values())[0], list):
|
||||
blocks = list(blocks.values())[0]
|
||||
else:
|
||||
blocks = [blocks]
|
||||
elif isinstance(blocks, list):
|
||||
blocks = blocks
|
||||
else:
|
||||
blocks = extract_xml_data(["blocks"], content)["blocks"]
|
||||
blocks = json.loads(blocks)
|
||||
|
||||
for block in blocks:
|
||||
block["error"] = False
|
||||
except Exception:
|
||||
parsed, unparsed = split_and_parse_json_objects(
|
||||
response.choices[0].message.content
|
||||
)
|
||||
blocks = parsed
|
||||
if unparsed:
|
||||
blocks.append(
|
||||
{"index": 0, "error": True, "tags": ["error"], "content": unparsed}
|
||||
)
|
||||
|
||||
if self.verbose:
|
||||
print(
|
||||
"[LOG] Extracted",
|
||||
len(blocks),
|
||||
"blocks from URL:",
|
||||
url,
|
||||
"block index:",
|
||||
ix,
|
||||
)
|
||||
return blocks
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(f"[LOG] Error in LLM extraction: {e}")
|
||||
return [
|
||||
{
|
||||
"index": ix,
|
||||
"error": True,
|
||||
"tags": ["error"],
|
||||
"content": str(e),
|
||||
}
|
||||
]
|
||||
|
||||
async def arun(self, url: str, sections: List[str]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Async version: Process sections with true parallelism using asyncio.gather.
|
||||
|
||||
Args:
|
||||
url: The URL of the webpage.
|
||||
sections: List of sections (strings) to process.
|
||||
|
||||
Returns:
|
||||
A list of extracted blocks or chunks.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
merged_sections = self._merge(
|
||||
sections,
|
||||
self.chunk_token_threshold,
|
||||
overlap=int(self.chunk_token_threshold * self.overlap_rate),
|
||||
)
|
||||
|
||||
extracted_content = []
|
||||
|
||||
# Create tasks for all sections to run in parallel
|
||||
tasks = [
|
||||
self.aextract(url, ix, sanitize_input_encode(section))
|
||||
for ix, section in enumerate(merged_sections)
|
||||
]
|
||||
|
||||
# Execute all tasks concurrently
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# Process results
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
if self.verbose:
|
||||
print(f"Error in async extraction: {result}")
|
||||
extracted_content.append(
|
||||
{
|
||||
"index": 0,
|
||||
"error": True,
|
||||
"tags": ["error"],
|
||||
"content": str(result),
|
||||
}
|
||||
)
|
||||
else:
|
||||
extracted_content.extend(result)
|
||||
|
||||
return extracted_content
|
||||
|
||||
def show_usage(self) -> None:
|
||||
"""Print a detailed token usage report showing total and per-request usage."""
|
||||
print("\n=== Token Usage Summary ===")
|
||||
|
||||
@@ -1825,6 +1825,82 @@ def perform_completion_with_backoff(
|
||||
# ]
|
||||
|
||||
|
||||
async def aperform_completion_with_backoff(
|
||||
provider,
|
||||
prompt_with_variables,
|
||||
api_token,
|
||||
json_response=False,
|
||||
base_url=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Async version: Perform an API completion request with exponential backoff.
|
||||
|
||||
How it works:
|
||||
1. Sends an async completion request to the API.
|
||||
2. Retries on rate-limit errors with exponential delays (async).
|
||||
3. Returns the API response or an error after all retries.
|
||||
|
||||
Args:
|
||||
provider (str): The name of the API provider.
|
||||
prompt_with_variables (str): The input prompt for the completion request.
|
||||
api_token (str): The API token for authentication.
|
||||
json_response (bool): Whether to request a JSON response. Defaults to False.
|
||||
base_url (Optional[str]): The base URL for the API. Defaults to None.
|
||||
**kwargs: Additional arguments for the API request.
|
||||
|
||||
Returns:
|
||||
dict: The API response or an error message after all retries.
|
||||
"""
|
||||
|
||||
from litellm import acompletion
|
||||
from litellm.exceptions import RateLimitError
|
||||
import asyncio
|
||||
|
||||
max_attempts = 3
|
||||
base_delay = 2 # Base delay in seconds, you can adjust this based on your needs
|
||||
|
||||
extra_args = {"temperature": 0.01, "api_key": api_token, "base_url": base_url}
|
||||
if json_response:
|
||||
extra_args["response_format"] = {"type": "json_object"}
|
||||
|
||||
if kwargs.get("extra_args"):
|
||||
extra_args.update(kwargs["extra_args"])
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
response = await acompletion(
|
||||
model=provider,
|
||||
messages=[{"role": "user", "content": prompt_with_variables}],
|
||||
**extra_args,
|
||||
)
|
||||
return response # Return the successful response
|
||||
except RateLimitError as e:
|
||||
print("Rate limit error:", str(e))
|
||||
|
||||
if attempt == max_attempts - 1:
|
||||
# Last attempt failed, raise the error.
|
||||
raise
|
||||
|
||||
# Check if we have exhausted our max attempts
|
||||
if attempt < max_attempts - 1:
|
||||
# Calculate the delay and wait
|
||||
delay = base_delay * (2**attempt) # Exponential backoff formula
|
||||
print(f"Waiting for {delay} seconds before retrying...")
|
||||
await asyncio.sleep(delay)
|
||||
else:
|
||||
# Return an error response after exhausting all retries
|
||||
return [
|
||||
{
|
||||
"index": 0,
|
||||
"tags": ["error"],
|
||||
"content": ["Rate limit error. Please try again later."],
|
||||
}
|
||||
]
|
||||
except Exception as e:
|
||||
raise e # Raise any other exceptions immediately
|
||||
|
||||
|
||||
def extract_blocks(url, html, provider=DEFAULT_PROVIDER, api_token=None, base_url=None):
|
||||
"""
|
||||
Extract content blocks from website HTML using an AI provider.
|
||||
|
||||
@@ -6,15 +6,16 @@ x-base-config: &base-config
|
||||
- "11235:11235" # Gunicorn port
|
||||
env_file:
|
||||
- .llm.env # API keys (create from .llm.env.example)
|
||||
environment:
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
||||
- TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
- GEMINI_API_TOKEN=${GEMINI_API_TOKEN:-}
|
||||
- LLM_PROVIDER=${LLM_PROVIDER:-} # Optional: Override default provider (e.g., "anthropic/claude-3-opus")
|
||||
# Uncomment to set default environment variables (will overwrite .llm.env)
|
||||
# environment:
|
||||
# - OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
# - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
|
||||
# - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||
# - GROQ_API_KEY=${GROQ_API_KEY:-}
|
||||
# - TOGETHER_API_KEY=${TOGETHER_API_KEY:-}
|
||||
# - MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
|
||||
# - GEMINI_API_KEY=${GEMINI_API_KEY:-}
|
||||
# - LLM_PROVIDER=${LLM_PROVIDER:-} # Optional: Override default provider (e.g., "anthropic/claude-3-opus")
|
||||
volumes:
|
||||
- /dev/shm:/dev/shm # Chromium performance
|
||||
deploy:
|
||||
|
||||
@@ -18,7 +18,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
2. **Install Dependencies**
|
||||
```bash
|
||||
pip install flask
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. **Launch the Server**
|
||||
@@ -28,7 +28,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
4. **Open in Browser**
|
||||
```
|
||||
http://localhost:8080
|
||||
http://localhost:8000
|
||||
```
|
||||
|
||||
**🌐 Try Online**: [Live Demo](https://docs.crawl4ai.com/c4a-script/demo)
|
||||
@@ -325,7 +325,7 @@ Powers the recording functionality:
|
||||
### Configuration
|
||||
```python
|
||||
# server.py configuration
|
||||
PORT = 8080
|
||||
PORT = 8000
|
||||
DEBUG = True
|
||||
THREADED = True
|
||||
```
|
||||
@@ -343,9 +343,9 @@ THREADED = True
|
||||
**Port Already in Use**
|
||||
```bash
|
||||
# Kill existing process
|
||||
lsof -ti:8080 | xargs kill -9
|
||||
lsof -ti:8000 | xargs kill -9
|
||||
# Or use different port
|
||||
python server.py --port 8081
|
||||
python server.py --port 8001
|
||||
```
|
||||
|
||||
**Blockly Not Loading**
|
||||
|
||||
@@ -216,7 +216,7 @@ def get_examples():
|
||||
'name': 'Handle Cookie Banner',
|
||||
'description': 'Accept cookies and close newsletter popup',
|
||||
'script': '''# Handle cookie banner and newsletter
|
||||
GO http://127.0.0.1:8080/playground/
|
||||
GO http://127.0.0.1:8000/playground/
|
||||
WAIT `body` 2
|
||||
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
|
||||
IF (EXISTS `.newsletter-popup`) THEN CLICK `.close`'''
|
||||
|
||||
@@ -82,6 +82,42 @@ If you installed Crawl4AI (which installs Playwright under the hood), you alread
|
||||
|
||||
---
|
||||
|
||||
### Creating a Profile Using the Crawl4AI CLI (Easiest)
|
||||
|
||||
If you prefer a guided, interactive setup, use the built-in CLI to create and manage persistent browser profiles.
|
||||
|
||||
1.⠀Launch the profile manager:
|
||||
```bash
|
||||
crwl profiles
|
||||
```
|
||||
|
||||
2.⠀Choose "Create new profile" and enter a profile name. A Chromium window opens so you can log in to sites and configure settings. When finished, return to the terminal and press `q` to save the profile.
|
||||
|
||||
3.⠀Profiles are saved under `~/.crawl4ai/profiles/<profile_name>` (for example: `/home/<you>/.crawl4ai/profiles/test_profile_1`) along with a `storage_state.json` for cookies and session data.
|
||||
|
||||
4.⠀Optionally, choose "List profiles" in the CLI to view available profiles and their paths.
|
||||
|
||||
5.⠀Use the saved path with `BrowserConfig.user_data_dir`:
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
|
||||
profile_path = "/home/<you>/.crawl4ai/profiles/test_profile_1"
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
use_managed_browser=True,
|
||||
user_data_dir=profile_path,
|
||||
browser_type="chromium",
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(url="https://example.com/private")
|
||||
```
|
||||
|
||||
The CLI also supports listing and deleting profiles, and even testing a crawl directly from the menu.
|
||||
|
||||
---
|
||||
|
||||
## 3. Using Managed Browsers in Crawl4AI
|
||||
|
||||
Once you have a data directory with your session data, pass it to **`BrowserConfig`**:
|
||||
|
||||
@@ -18,7 +18,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
2. **Install Dependencies**
|
||||
```bash
|
||||
pip install flask
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. **Launch the Server**
|
||||
@@ -28,7 +28,7 @@ A comprehensive web-based tutorial for learning and experimenting with C4A-Scrip
|
||||
|
||||
4. **Open in Browser**
|
||||
```
|
||||
http://localhost:8080
|
||||
http://localhost:8000
|
||||
```
|
||||
|
||||
**🌐 Try Online**: [Live Demo](https://docs.crawl4ai.com/c4a-script/demo)
|
||||
@@ -325,7 +325,7 @@ Powers the recording functionality:
|
||||
### Configuration
|
||||
```python
|
||||
# server.py configuration
|
||||
PORT = 8080
|
||||
PORT = 8000
|
||||
DEBUG = True
|
||||
THREADED = True
|
||||
```
|
||||
@@ -343,9 +343,9 @@ THREADED = True
|
||||
**Port Already in Use**
|
||||
```bash
|
||||
# Kill existing process
|
||||
lsof -ti:8080 | xargs kill -9
|
||||
lsof -ti:8000 | xargs kill -9
|
||||
# Or use different port
|
||||
python server.py --port 8081
|
||||
python server.py --port 8001
|
||||
```
|
||||
|
||||
**Blockly Not Loading**
|
||||
|
||||
@@ -216,7 +216,7 @@ def get_examples():
|
||||
'name': 'Handle Cookie Banner',
|
||||
'description': 'Accept cookies and close newsletter popup',
|
||||
'script': '''# Handle cookie banner and newsletter
|
||||
GO http://127.0.0.1:8080/playground/
|
||||
GO http://127.0.0.1:8000/playground/
|
||||
WAIT `body` 2
|
||||
IF (EXISTS `.cookie-banner`) THEN CLICK `.accept`
|
||||
IF (EXISTS `.newsletter-popup`) THEN CLICK `.close`'''
|
||||
@@ -283,7 +283,7 @@ WAIT `.success-message` 5'''
|
||||
return jsonify(examples)
|
||||
|
||||
if __name__ == '__main__':
|
||||
port = int(os.environ.get('PORT', 8080))
|
||||
port = int(os.environ.get('PORT', 8000))
|
||||
print(f"""
|
||||
╔══════════════════════════════════════════════════════════╗
|
||||
║ C4A-Script Interactive Tutorial Server ║
|
||||
|
||||
@@ -69,12 +69,12 @@ The tutorial includes a Flask-based web interface with:
|
||||
cd docs/examples/c4a_script/tutorial/
|
||||
|
||||
# Install dependencies
|
||||
pip install flask
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Launch the tutorial server
|
||||
python app.py
|
||||
python server.py
|
||||
|
||||
# Open http://localhost:5000 in your browser
|
||||
# Open http://localhost:8000 in your browser
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
@@ -111,8 +111,8 @@ CLICK `.submit-btn`
|
||||
# By attribute
|
||||
CLICK `button[type="submit"]`
|
||||
|
||||
# By text content
|
||||
CLICK `button:contains("Sign In")`
|
||||
# By accessible attributes
|
||||
CLICK `button[aria-label="Search"][title="Search"]`
|
||||
|
||||
# Complex selectors
|
||||
CLICK `.form-container input[name="email"]`
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
|
||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for large language models, AI agents, and data pipelines. Fully open source, flexible, and built for real-time performance, **Crawl4AI** empowers developers with unmatched speed, precision, and deployment ease.
|
||||
|
||||
> **Note**: If you're looking for the old documentation, you can access it [here](https://old.docs.crawl4ai.com).
|
||||
> Enjoy using Crawl4AI? Consider **[becoming a sponsor](https://github.com/sponsors/unclecode)** to support ongoing development and community growth!
|
||||
|
||||
## 🆕 AI Assistant Skill Now Available!
|
||||
|
||||
|
||||
@@ -364,5 +364,19 @@ async def test_network_error_handling():
|
||||
async with AsyncPlaywrightCrawlerStrategy() as strategy:
|
||||
await strategy.crawl("https://invalid.example.com", config)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_overlay_elements(crawler_strategy):
|
||||
config = CrawlerRunConfig(
|
||||
remove_overlay_elements=True,
|
||||
delay_before_return_html=5,
|
||||
)
|
||||
|
||||
response = await crawler_strategy.crawl(
|
||||
"https://www2.hm.com/en_us/index.html",
|
||||
config
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert "Accept all cookies" not in response.html
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
@@ -1,283 +0,0 @@
|
||||
"""
|
||||
Compact test suite for CDP concurrency fix.
|
||||
|
||||
This file consolidates all tests related to the CDP concurrency fix for
|
||||
AsyncWebCrawler.arun_many() with managed browsers.
|
||||
|
||||
The bug was that all concurrent tasks were fighting over one shared tab,
|
||||
causing failures. This has been fixed by modifying the get_page() method
|
||||
in browser_manager.py to always create new pages instead of reusing pages[0].
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from crawl4ai import AsyncWebCrawler, CacheMode, CrawlerRunConfig
|
||||
from crawl4ai.async_configs import BrowserConfig
|
||||
|
||||
# =============================================================================
|
||||
# TEST 1: Basic arun_many functionality
|
||||
# =============================================================================
|
||||
|
||||
|
||||
async def test_basic_arun_many():
|
||||
"""Test that arun_many works correctly with basic configuration."""
|
||||
print("=== TEST 1: Basic arun_many functionality ===")
|
||||
|
||||
# Configuration to bypass cache for testing
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
# Test URLs - using reliable test URLs
|
||||
test_urls = [
|
||||
"https://httpbin.org/html", # Simple HTML page
|
||||
"https://httpbin.org/json", # Simple JSON response
|
||||
]
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
|
||||
|
||||
# This should work correctly
|
||||
result = await crawler.arun_many(urls=test_urls, config=config)
|
||||
|
||||
# Simple verification - if we get here without exception, the basic functionality works
|
||||
print(f"✓ arun_many completed successfully")
|
||||
return True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TEST 2: CDP Browser with Managed Configuration
|
||||
# =============================================================================
|
||||
|
||||
|
||||
async def test_arun_many_with_managed_cdp_browser():
|
||||
"""Test that arun_many works correctly with managed CDP browsers."""
|
||||
print("\n=== TEST 2: arun_many with managed CDP browser ===")
|
||||
|
||||
# Create a temporary user data directory for the CDP browser
|
||||
user_data_dir = tempfile.mkdtemp(prefix="crawl4ai-cdp-test-")
|
||||
|
||||
try:
|
||||
# Configure browser to use managed CDP mode
|
||||
browser_config = BrowserConfig(
|
||||
use_managed_browser=True,
|
||||
browser_type="chromium",
|
||||
headless=True,
|
||||
user_data_dir=user_data_dir,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Configuration to bypass cache for testing
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
page_timeout=60000,
|
||||
wait_until="domcontentloaded",
|
||||
)
|
||||
|
||||
# Test URLs - using reliable test URLs
|
||||
test_urls = [
|
||||
"https://httpbin.org/html", # Simple HTML page
|
||||
"https://httpbin.org/json", # Simple JSON response
|
||||
]
|
||||
|
||||
# Create crawler with CDP browser configuration
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
|
||||
|
||||
# This should work correctly with our fix
|
||||
result = await crawler.arun_many(urls=test_urls, config=crawler_config)
|
||||
|
||||
print(f"✓ arun_many completed successfully with managed CDP browser")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Test failed with error: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Clean up temporary directory
|
||||
try:
|
||||
shutil.rmtree(user_data_dir, ignore_errors=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TEST 3: Concurrency Verification
|
||||
# =============================================================================
|
||||
|
||||
|
||||
async def test_concurrent_crawling():
|
||||
"""Test concurrent crawling to verify the fix works."""
|
||||
print("\n=== TEST 3: Concurrent crawling verification ===")
|
||||
|
||||
# Configuration to bypass cache for testing
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
# Test URLs - using reliable test URLs
|
||||
test_urls = [
|
||||
"https://httpbin.org/html", # Simple HTML page
|
||||
"https://httpbin.org/json", # Simple JSON response
|
||||
"https://httpbin.org/uuid", # Simple UUID response
|
||||
"https://example.com/", # Standard example page
|
||||
]
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
|
||||
|
||||
# This should work correctly with our fix
|
||||
results = await crawler.arun_many(urls=test_urls, config=config)
|
||||
|
||||
# Simple verification - if we get here without exception, the fix works
|
||||
print("✓ arun_many completed successfully with concurrent crawling")
|
||||
return True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TEST 4: Concurrency Fix Demonstration
|
||||
# =============================================================================
|
||||
|
||||
|
||||
async def test_concurrency_fix():
|
||||
"""Demonstrate that the concurrency fix works."""
|
||||
print("\n=== TEST 4: Concurrency fix demonstration ===")
|
||||
|
||||
# Configuration to bypass cache for testing
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
# Test URLs - using reliable test URLs
|
||||
test_urls = [
|
||||
"https://httpbin.org/html", # Simple HTML page
|
||||
"https://httpbin.org/json", # Simple JSON response
|
||||
"https://httpbin.org/uuid", # Simple UUID response
|
||||
]
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
|
||||
|
||||
# This should work correctly with our fix
|
||||
results = await crawler.arun_many(urls=test_urls, config=config)
|
||||
|
||||
# Simple verification - if we get here without exception, the fix works
|
||||
print("✓ arun_many completed successfully with concurrent crawling")
|
||||
return True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TEST 5: Before/After Behavior Comparison
|
||||
# =============================================================================
|
||||
|
||||
|
||||
async def test_before_after_behavior():
|
||||
"""Test that demonstrates concurrent crawling works correctly after the fix."""
|
||||
print("\n=== TEST 5: Before/After behavior test ===")
|
||||
|
||||
# Configuration to bypass cache for testing
|
||||
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
|
||||
# Test URLs - using reliable test URLs that would stress the concurrency system
|
||||
test_urls = [
|
||||
"https://httpbin.org/delay/1", # Delayed response to increase chance of contention
|
||||
"https://httpbin.org/delay/2", # Delayed response to increase chance of contention
|
||||
"https://httpbin.org/uuid", # Fast response
|
||||
"https://httpbin.org/json", # Fast response
|
||||
]
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
print(
|
||||
f"Testing concurrent crawling of {len(test_urls)} URLs (including delayed responses)..."
|
||||
)
|
||||
print(
|
||||
"This test would have failed before the concurrency fix due to page contention."
|
||||
)
|
||||
|
||||
# This should work correctly with our fix
|
||||
results = await crawler.arun_many(urls=test_urls, config=config)
|
||||
|
||||
# Simple verification - if we get here without exception, the fix works
|
||||
print("✓ arun_many completed successfully with concurrent crawling")
|
||||
print("✓ No page contention issues detected")
|
||||
return True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TEST 6: Reference Pattern Test
|
||||
# =============================================================================
|
||||
|
||||
|
||||
async def test_reference_pattern():
|
||||
"""Main test function following reference pattern."""
|
||||
print("\n=== TEST 6: Reference pattern test ===")
|
||||
|
||||
# Configure crawler settings
|
||||
crawler_cfg = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
page_timeout=60000,
|
||||
wait_until="domcontentloaded",
|
||||
)
|
||||
|
||||
# Define URLs to crawl
|
||||
URLS = [
|
||||
"https://httpbin.org/html",
|
||||
"https://httpbin.org/json",
|
||||
"https://httpbin.org/uuid",
|
||||
]
|
||||
|
||||
# Crawl all URLs using arun_many
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
print(f"Testing concurrent crawling of {len(URLS)} URLs...")
|
||||
results = await crawler.arun_many(urls=URLS, config=crawler_cfg)
|
||||
|
||||
# Simple verification - if we get here without exception, the fix works
|
||||
print("✓ arun_many completed successfully with concurrent crawling")
|
||||
print("✅ Reference pattern test completed successfully!")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# MAIN EXECUTION
|
||||
# =============================================================================
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all tests."""
|
||||
print("Running compact CDP concurrency test suite...")
|
||||
print("=" * 60)
|
||||
|
||||
tests = [
|
||||
test_basic_arun_many,
|
||||
test_arun_many_with_managed_cdp_browser,
|
||||
test_concurrent_crawling,
|
||||
test_concurrency_fix,
|
||||
test_before_after_behavior,
|
||||
test_reference_pattern,
|
||||
]
|
||||
|
||||
passed = 0
|
||||
failed = 0
|
||||
|
||||
for test_func in tests:
|
||||
try:
|
||||
await test_func()
|
||||
passed += 1
|
||||
except Exception as e:
|
||||
print(f"❌ Test failed: {str(e)}")
|
||||
failed += 1
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print(f"Test Results: {passed} passed, {failed} failed")
|
||||
|
||||
if failed == 0:
|
||||
print("🎉 All tests passed! The CDP concurrency fix is working correctly.")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ {failed} test(s) failed!")
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = asyncio.run(main())
|
||||
sys.exit(0 if success else 1)
|
||||
220
tests/test_llm_extraction_parallel_issue_1055.py
Normal file
220
tests/test_llm_extraction_parallel_issue_1055.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""
|
||||
Final verification test for Issue #1055 fix
|
||||
|
||||
This test demonstrates that LLM extraction now runs in parallel
|
||||
when using arun_many with multiple URLs.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import asyncio
|
||||
|
||||
grandparent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
sys.path.append(grandparent_dir)
|
||||
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
LLMExtractionStrategy,
|
||||
LLMConfig,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class SimpleData(BaseModel):
|
||||
title: str
|
||||
summary: str
|
||||
|
||||
|
||||
def print_section(title):
|
||||
print("\n" + "=" * 80)
|
||||
print(title)
|
||||
print("=" * 80 + "\n")
|
||||
|
||||
|
||||
async def test_without_llm():
|
||||
"""Baseline: Test crawling without LLM extraction"""
|
||||
print_section("TEST 1: Crawling WITHOUT LLM Extraction")
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
browser_config = BrowserConfig(headless=True, verbose=False)
|
||||
|
||||
urls = [
|
||||
"https://www.example.com",
|
||||
"https://www.iana.org",
|
||||
"https://www.wikipedia.org",
|
||||
]
|
||||
|
||||
print(f"Crawling {len(urls)} URLs without LLM extraction...")
|
||||
print("Expected: Fast and parallel\n")
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
results = await crawler.arun_many(urls=urls, config=config)
|
||||
|
||||
duration = time.time() - start_time
|
||||
|
||||
print(f"\n✅ Completed in {duration:.2f}s")
|
||||
print(f" Successful: {sum(1 for r in results if r.success)}/{len(urls)}")
|
||||
print(f" Average: {duration/len(urls):.2f}s per URL")
|
||||
|
||||
return duration
|
||||
|
||||
|
||||
async def test_with_llm_before_fix():
|
||||
"""Demonstrate the problem: Sequential execution with LLM"""
|
||||
print_section("TEST 2: What Issue #1055 Reported (LLM Sequential Behavior)")
|
||||
|
||||
print("The issue reported that with LLM extraction, URLs would crawl")
|
||||
print("one after another instead of in parallel.")
|
||||
print("\nWithout our fix, this would show:")
|
||||
print(" - URL 1 fetches → extracts → completes")
|
||||
print(" - URL 2 fetches → extracts → completes")
|
||||
print(" - URL 3 fetches → extracts → completes")
|
||||
print("\nTotal time would be approximately sum of all individual times.")
|
||||
|
||||
|
||||
async def test_with_llm_after_fix():
|
||||
"""Demonstrate the fix: Parallel execution with LLM"""
|
||||
print_section("TEST 3: After Fix - LLM Extraction in Parallel")
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
|
||||
schema=SimpleData.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="Extract title and summary",
|
||||
)
|
||||
)
|
||||
|
||||
browser_config = BrowserConfig(headless=True, verbose=False)
|
||||
|
||||
urls = [
|
||||
"https://www.example.com",
|
||||
"https://www.iana.org",
|
||||
"https://www.wikipedia.org",
|
||||
]
|
||||
|
||||
print(f"Crawling {len(urls)} URLs WITH LLM extraction...")
|
||||
print("Expected: Parallel execution with our fix\n")
|
||||
|
||||
completion_times = {}
|
||||
start_time = time.time()
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
results = await crawler.arun_many(urls=urls, config=config)
|
||||
for result in results:
|
||||
elapsed = time.time() - start_time
|
||||
completion_times[result.url] = elapsed
|
||||
print(f" [{elapsed:5.2f}s] ✓ {result.url[:50]}")
|
||||
|
||||
duration = time.time() - start_time
|
||||
|
||||
print(f"\n✅ Total time: {duration:.2f}s")
|
||||
print(f" Successful: {sum(1 for url in urls if url in completion_times)}/{len(urls)}")
|
||||
|
||||
# Analyze parallelism
|
||||
times = list(completion_times.values())
|
||||
if len(times) >= 2:
|
||||
# If parallel, completion times should be staggered, not evenly spaced
|
||||
time_diffs = [times[i+1] - times[i] for i in range(len(times)-1)]
|
||||
avg_diff = sum(time_diffs) / len(time_diffs)
|
||||
|
||||
print(f"\nParallelism Analysis:")
|
||||
print(f" Completion time differences: {[f'{d:.2f}s' for d in time_diffs]}")
|
||||
print(f" Average difference: {avg_diff:.2f}s")
|
||||
|
||||
# In parallel mode, some tasks complete close together
|
||||
# In sequential mode, they're evenly spaced (avg ~2-3s apart)
|
||||
if avg_diff < duration / len(urls):
|
||||
print(f" ✅ PARALLEL: Tasks completed with overlapping execution")
|
||||
else:
|
||||
print(f" ⚠️ SEQUENTIAL: Tasks completed one after another")
|
||||
|
||||
return duration
|
||||
|
||||
|
||||
async def test_multiple_arun_calls():
|
||||
"""Test multiple individual arun() calls in parallel"""
|
||||
print_section("TEST 4: Multiple arun() Calls with asyncio.gather")
|
||||
|
||||
config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
|
||||
schema=SimpleData.model_json_schema(),
|
||||
extraction_type="schema",
|
||||
instruction="Extract title and summary",
|
||||
)
|
||||
)
|
||||
|
||||
browser_config = BrowserConfig(headless=True, verbose=False)
|
||||
|
||||
urls = [
|
||||
"https://www.example.com",
|
||||
"https://www.iana.org",
|
||||
"https://www.wikipedia.org",
|
||||
]
|
||||
|
||||
print(f"Running {len(urls)} arun() calls with asyncio.gather()...")
|
||||
print("Expected: True parallel execution\n")
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
tasks = [crawler.arun(url, config=config) for url in urls]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
duration = time.time() - start_time
|
||||
|
||||
print(f"\n✅ Completed in {duration:.2f}s")
|
||||
print(f" Successful: {sum(1 for r in results if r.success)}/{len(urls)}")
|
||||
print(f" This proves the async LLM extraction works correctly")
|
||||
|
||||
return duration
|
||||
|
||||
|
||||
async def main():
|
||||
print("\n" + "🚀" * 40)
|
||||
print("ISSUE #1055 FIX VERIFICATION")
|
||||
print("Testing: Sequential → Parallel LLM Extraction")
|
||||
print("🚀" * 40)
|
||||
|
||||
# Run tests
|
||||
await test_without_llm()
|
||||
|
||||
await test_with_llm_before_fix()
|
||||
|
||||
time_with_llm = await test_with_llm_after_fix()
|
||||
|
||||
time_gather = await test_multiple_arun_calls()
|
||||
|
||||
# Final summary
|
||||
print_section("FINAL VERDICT")
|
||||
|
||||
print("✅ Fix Verified!")
|
||||
print("\nWhat changed:")
|
||||
print(" • Created aperform_completion_with_backoff() using litellm.acompletion")
|
||||
print(" • Added arun() method to ExtractionStrategy base class")
|
||||
print(" • Implemented parallel arun() in LLMExtractionStrategy")
|
||||
print(" • Updated AsyncWebCrawler to use arun() when available")
|
||||
print("\nResult:")
|
||||
print(" • LLM extraction now runs in parallel across multiple URLs")
|
||||
print(" • Backward compatible - existing strategies still work")
|
||||
print(" • No breaking changes to the API")
|
||||
print("\n✨ Issue #1055 is RESOLVED!")
|
||||
|
||||
print("\n" + "=" * 80 + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
134
tests/unit/test_sitemap_namespace_parsing.py
Normal file
134
tests/unit/test_sitemap_namespace_parsing.py
Normal file
@@ -0,0 +1,134 @@
|
||||
import sys
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
# Provide a lightweight stub for rank_bm25 before importing the seeder to avoid
|
||||
# optional dependency issues (e.g., incompatible wheels in CI).
|
||||
class _FakeBM25:
|
||||
def __init__(self, corpus):
|
||||
self._scores = [1.0] * len(corpus)
|
||||
|
||||
def get_scores(self, tokens):
|
||||
return self._scores
|
||||
|
||||
|
||||
sys.modules.setdefault("rank_bm25", SimpleNamespace(BM25Okapi=_FakeBM25))
|
||||
|
||||
from crawl4ai.async_url_seeder import AsyncUrlSeeder
|
||||
|
||||
|
||||
class DummyResponse:
|
||||
def __init__(self, request_url: str, text: str):
|
||||
self.status_code = 200
|
||||
self._content = text.encode("utf-8")
|
||||
self.url = request_url
|
||||
|
||||
def raise_for_status(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def content(self):
|
||||
return self._content
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self._content.decode("utf-8")
|
||||
|
||||
|
||||
class DummyAsyncClient:
|
||||
def __init__(self, response_map):
|
||||
self._responses = response_map
|
||||
|
||||
async def get(self, url, **kwargs):
|
||||
payload = self._responses[url]
|
||||
if callable(payload):
|
||||
payload = payload()
|
||||
return DummyResponse(url, payload)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_iter_sitemap_handles_namespace_less_sitemaps():
|
||||
xml = """<?xml version="1.0"?>
|
||||
<urlset>
|
||||
<url><loc>https://example.com/a</loc></url>
|
||||
<url><loc>https://example.com/b</loc></url>
|
||||
</urlset>
|
||||
"""
|
||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient({"https://example.com/sitemap.xml": xml}))
|
||||
|
||||
urls = []
|
||||
async for u in seeder._iter_sitemap("https://example.com/sitemap.xml"):
|
||||
urls.append(u)
|
||||
|
||||
assert urls == ["https://example.com/a", "https://example.com/b"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_iter_sitemap_handles_custom_namespace():
|
||||
xml = """<?xml version="1.0"?>
|
||||
<urlset xmlns="https://custom.namespace/schema">
|
||||
<url><loc>https://example.com/ns</loc></url>
|
||||
</urlset>
|
||||
"""
|
||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient({"https://example.com/ns-sitemap.xml": xml}))
|
||||
|
||||
urls = []
|
||||
async for u in seeder._iter_sitemap("https://example.com/ns-sitemap.xml"):
|
||||
urls.append(u)
|
||||
|
||||
assert urls == ["https://example.com/ns"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_iter_sitemap_handles_namespace_index_and_children():
|
||||
index_xml = """<?xml version="1.0"?>
|
||||
<sitemapindex xmlns="http://another.example/ns">
|
||||
<sitemap>
|
||||
<loc>https://example.com/child-1.xml</loc>
|
||||
</sitemap>
|
||||
<sitemap>
|
||||
<loc>https://example.com/child-2.xml</loc>
|
||||
</sitemap>
|
||||
</sitemapindex>
|
||||
"""
|
||||
child_xml = """<?xml version="1.0"?>
|
||||
<urlset xmlns="http://irrelevant">
|
||||
<url><loc>https://example.com/page-{n}</loc></url>
|
||||
</urlset>
|
||||
"""
|
||||
responses = {
|
||||
"https://example.com/index.xml": index_xml,
|
||||
"https://example.com/child-1.xml": child_xml.format(n=1),
|
||||
"https://example.com/child-2.xml": child_xml.format(n=2),
|
||||
}
|
||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient(responses))
|
||||
|
||||
urls = []
|
||||
async for u in seeder._iter_sitemap("https://example.com/index.xml"):
|
||||
urls.append(u)
|
||||
|
||||
assert sorted(urls) == [
|
||||
"https://example.com/page-1",
|
||||
"https://example.com/page-2",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_iter_sitemap_normalizes_relative_locations():
|
||||
xml = """<?xml version="1.0"?>
|
||||
<urlset>
|
||||
<url><loc>/relative-path</loc></url>
|
||||
<url><loc>https://example.com/absolute</loc></url>
|
||||
</urlset>
|
||||
"""
|
||||
seeder = AsyncUrlSeeder(client=DummyAsyncClient({"https://example.com/sitemap.xml": xml}))
|
||||
|
||||
urls = []
|
||||
async for u in seeder._iter_sitemap("https://example.com/sitemap.xml"):
|
||||
urls.append(u)
|
||||
|
||||
assert urls == [
|
||||
"https://example.com/relative-path",
|
||||
"https://example.com/absolute",
|
||||
]
|
||||
Reference in New Issue
Block a user